VirtualBox

source: vbox/trunk/src/VBox/VMM/include/CPUMInternal.mac@ 107922

Last change on this file since 107922 was 107700, checked in by vboxsync, 5 weeks ago

VMM/CPUM,++: Made the HostFeatures match the host when targeting x86 guests on arm64 hosts. Merged and deduplicated code targeting x86 & amd64. [build fix] jiraref:VBP-1470

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.3 KB
Line 
1; $Id: CPUMInternal.mac 107700 2025-01-10 20:02:30Z vboxsync $
2;; @file
3; CPUM - Internal header file (asm).
4;
5
6;
7; Copyright (C) 2006-2024 Oracle and/or its affiliates.
8;
9; This file is part of VirtualBox base platform packages, as
10; available from https://www.virtualbox.org.
11;
12; This program is free software; you can redistribute it and/or
13; modify it under the terms of the GNU General Public License
14; as published by the Free Software Foundation, in version 3 of the
15; License.
16;
17; This program is distributed in the hope that it will be useful, but
18; WITHOUT ANY WARRANTY; without even the implied warranty of
19; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20; General Public License for more details.
21;
22; You should have received a copy of the GNU General Public License
23; along with this program; if not, see <https://www.gnu.org/licenses>.
24;
25; SPDX-License-Identifier: GPL-3.0-only
26;
27
28%include "VBox/asmdefs.mac"
29%include "VBox/vmm/cpum.mac"
30
31;; Check sanity.
32%ifdef VBOX_WITH_KERNEL_USING_XMM
33 %ifndef IN_RING0
34 %error "What? We've got code assuming VBOX_WITH_KERNEL_USING_XMM is only defined in ring-0!"
35 %endif
36%endif
37
38;; For numeric expressions
39%ifdef RT_ARCH_AMD64
40 %define CPUM_IS_AMD64 1
41%else
42 %define CPUM_IS_AMD64 0
43%endif
44
45
46;;
47; CPU info
48struc CPUMINFO
49 .cMsrRanges resd 1 ; uint32_t
50 .fMsrMask resd 1 ; uint32_t
51 .fMxCsrMask resd 1 ; uint32_t
52 .cCpuIdLeaves resd 1 ; uint32_t
53 .iFirstExtCpuIdLeaf resd 1 ; uint32_t
54 .enmUnknownCpuIdMethod resd 1 ; CPUMUNKNOWNCPUID
55 .DefCpuId resb CPUMCPUID_size ; CPUMCPUID
56 .uScalableBusFreq resq 1 ; uint64_t
57 .uMicrocodeRevision resd 1
58 alignb 8
59 .paMsrRangesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMMSRRANGE)
60 .paCpuIdLeavesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMCPUIDLEAF)
61 .aCpuIdLeaves resb 256*32
62 .aMsrRanges resb 8192*128
63endstruc
64
65
66%define CPUM_USED_FPU_HOST RT_BIT(0)
67%define CPUM_USED_FPU_GUEST RT_BIT(10)
68%define CPUM_USED_FPU_SINCE_REM RT_BIT(1)
69%define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2)
70%define CPUM_USE_SYSENTER RT_BIT(3)
71%define CPUM_USE_SYSCALL RT_BIT(4)
72%define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5)
73%define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6)
74%define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7)
75%define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8)
76%define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9)
77%define CPUM_USE_FFXSR_LEAKY RT_BIT(19)
78%define CPUM_USE_SUPPORTS_LONGMODE RT_BIT(20)
79
80
81struc CPUM
82 .GuestFeatures resb 64
83 .HostFeatures resb 64
84
85 .u8PortableCpuIdLevel resb 1
86 .fPendingRestore resb 1
87 .fMtrrRead resb 1
88 .fMtrrWrite resb 1
89
90 .fHostUseFlags resd 1
91
92 alignb 8
93 .fXStateGuestMask resq 1
94 .fXStateHostMask resq 1
95 .fReservedRFlagsCookie resq 1
96
97 alignb 8
98 .GuestInfo resb CPUMINFO_size
99
100 ; Patch manager saved state compatability CPUID leaf arrays
101 .aGuestCpuIdPatmStd resb 16*6
102 .aGuestCpuIdPatmExt resb 16*10
103 .aGuestCpuIdPatmCentaur resb 16*4
104
105 alignb 8
106 .cMsrWrites resq 1
107 .cMsrWritesToIgnoredBits resq 1
108 .cMsrWritesRaiseGp resq 1
109 .cMsrWritesUnknown resq 1
110 .cMsrReads resq 1
111 .cMsrReadsRaiseGp resq 1
112 .cMsrReadsUnknown resq 1
113
114 .fHostMxCsrMask resd 1
115 alignb 8
116endstruc
117
118struc CPUMCPU
119 ;
120 ; Guest context state
121 ;
122 .Guest resq 0
123 .Guest.eax resq 1
124 .Guest.ecx resq 1
125 .Guest.edx resq 1
126 .Guest.ebx resq 1
127 .Guest.esp resq 1
128 .Guest.ebp resq 1
129 .Guest.esi resq 1
130 .Guest.edi resq 1
131 .Guest.r8 resq 1
132 .Guest.r9 resq 1
133 .Guest.r10 resq 1
134 .Guest.r11 resq 1
135 .Guest.r12 resq 1
136 .Guest.r13 resq 1
137 .Guest.r14 resq 1
138 .Guest.r15 resq 1
139 .Guest.es.Sel resw 1
140 .Guest.es.PaddingSel resw 1
141 .Guest.es.ValidSel resw 1
142 .Guest.es.fFlags resw 1
143 .Guest.es.u64Base resq 1
144 .Guest.es.u32Limit resd 1
145 .Guest.es.Attr resd 1
146 .Guest.cs.Sel resw 1
147 .Guest.cs.PaddingSel resw 1
148 .Guest.cs.ValidSel resw 1
149 .Guest.cs.fFlags resw 1
150 .Guest.cs.u64Base resq 1
151 .Guest.cs.u32Limit resd 1
152 .Guest.cs.Attr resd 1
153 .Guest.ss.Sel resw 1
154 .Guest.ss.PaddingSel resw 1
155 .Guest.ss.ValidSel resw 1
156 .Guest.ss.fFlags resw 1
157 .Guest.ss.u64Base resq 1
158 .Guest.ss.u32Limit resd 1
159 .Guest.ss.Attr resd 1
160 .Guest.ds.Sel resw 1
161 .Guest.ds.PaddingSel resw 1
162 .Guest.ds.ValidSel resw 1
163 .Guest.ds.fFlags resw 1
164 .Guest.ds.u64Base resq 1
165 .Guest.ds.u32Limit resd 1
166 .Guest.ds.Attr resd 1
167 .Guest.fs.Sel resw 1
168 .Guest.fs.PaddingSel resw 1
169 .Guest.fs.ValidSel resw 1
170 .Guest.fs.fFlags resw 1
171 .Guest.fs.u64Base resq 1
172 .Guest.fs.u32Limit resd 1
173 .Guest.fs.Attr resd 1
174 .Guest.gs.Sel resw 1
175 .Guest.gs.PaddingSel resw 1
176 .Guest.gs.ValidSel resw 1
177 .Guest.gs.fFlags resw 1
178 .Guest.gs.u64Base resq 1
179 .Guest.gs.u32Limit resd 1
180 .Guest.gs.Attr resd 1
181 .Guest.ldtr.Sel resw 1
182 .Guest.ldtr.PaddingSel resw 1
183 .Guest.ldtr.ValidSel resw 1
184 .Guest.ldtr.fFlags resw 1
185 .Guest.ldtr.u64Base resq 1
186 .Guest.ldtr.u32Limit resd 1
187 .Guest.ldtr.Attr resd 1
188 .Guest.tr.Sel resw 1
189 .Guest.tr.PaddingSel resw 1
190 .Guest.tr.ValidSel resw 1
191 .Guest.tr.fFlags resw 1
192 .Guest.tr.u64Base resq 1
193 .Guest.tr.u32Limit resd 1
194 .Guest.tr.Attr resd 1
195 alignb 8
196 .Guest.eip resq 1
197 .Guest.eflags resq 1
198 .Guest.fExtrn resq 1
199 .Guest.uRipInhibitInt resq 1
200 .Guest.cr0 resq 1
201 .Guest.cr2 resq 1
202 .Guest.cr3 resq 1
203 .Guest.cr4 resq 1
204 .Guest.dr resq 8
205 .Guest.gdtrPadding resw 3
206 .Guest.gdtr resw 0
207 .Guest.gdtr.cbGdt resw 1
208 .Guest.gdtr.pGdt resq 1
209 .Guest.idtrPadding resw 3
210 .Guest.idtr resw 0
211 .Guest.idtr.cbIdt resw 1
212 .Guest.idtr.pIdt resq 1
213 .Guest.SysEnter.cs resb 8
214 .Guest.SysEnter.eip resb 8
215 .Guest.SysEnter.esp resb 8
216 .Guest.msrEFER resb 8
217 .Guest.msrSTAR resb 8
218 .Guest.msrPAT resb 8
219 .Guest.msrLSTAR resb 8
220 .Guest.msrCSTAR resb 8
221 .Guest.msrSFMASK resb 8
222 .Guest.msrKERNELGSBASE resb 8
223
224 alignb 32
225 .Guest.aPaePdpes resq 4
226
227 alignb 8
228 .Guest.aXcr resq 2
229 .Guest.fXStateMask resq 1
230 .Guest.fUsedFpuGuest resb 1
231 alignb 8
232 .Guest.aoffXState resw 64
233 alignb 256
234 .Guest.abXState resb 0x4000-0x300
235 .Guest.XState EQU .Guest.abXState
236
237;;
238 alignb 4096
239 .Guest.hwvirt resb 0
240 .Guest.hwvirt.svm resb 0
241 .Guest.hwvirt.vmx resb 0
242
243 .Guest.hwvirt.svm.Vmcb EQU .Guest.hwvirt.svm
244 .Guest.hwvirt.svm.abMsrBitmap EQU (.Guest.hwvirt.svm.Vmcb + 0x1000)
245 .Guest.hwvirt.svm.abIoBitmap EQU (.Guest.hwvirt.svm.abMsrBitmap + 0x2000)
246 .Guest.hwvirt.svm.uMsrHSavePa EQU (.Guest.hwvirt.svm.abIoBitmap + 0x3000) ; resq 1
247 .Guest.hwvirt.svm.GCPhysVmcb EQU (.Guest.hwvirt.svm.uMsrHSavePa + 8) ; resq 1
248 alignb 8
249 .Guest.hwvirt.svm.HostState EQU (.Guest.hwvirt.svm.GCPhysVmcb + 8) ; resb 184
250 .Guest.hwvirt.svm.uPrevPauseTick EQU (.Guest.hwvirt.svm.HostState + 184) ; resq 1
251 .Guest.hwvirt.svm.cPauseFilter EQU (.Guest.hwvirt.svm.uPrevPauseTick + 8) ; resw 1
252 .Guest.hwvirt.svm.cPauseFilterThreshold EQU (.Guest.hwvirt.svm.cPauseFilter + 2) ; resw 1
253 .Guest.hwvirt.svm.fInterceptEvents EQU (.Guest.hwvirt.svm.cPauseFilterThreshold + 2) ; resb 1
254
255 .Guest.hwvirt.vmx.Vmcs resb 0x1000
256 .Guest.hwvirt.vmx.ShadowVmcs resb 0x1000
257 .Guest.hwvirt.vmx.abVmreadBitmap resb 0x1000
258 .Guest.hwvirt.vmx.abVmwriteBitmap resb 0x1000
259 .Guest.hwvirt.vmx.aEntryMsrLoadArea resb 0x2000
260 .Guest.hwvirt.vmx.aExitMsrStoreArea resb 0x2000
261 .Guest.hwvirt.vmx.aExitMsrLoadArea resb 0x2000
262 .Guest.hwvirt.vmx.abMsrBitmap resb 0x1000
263 .Guest.hwvirt.vmx.abIoBitmap resb 0x1000+0x1000
264 alignb 8
265 .Guest.hwvirt.vmx.GCPhysVmxon resq 1
266 .Guest.hwvirt.vmx.GCPhysVmcs resq 1
267 .Guest.hwvirt.vmx.GCPhysShadowVmcs resq 1
268 .Guest.hwvirt.vmx.enmDiag resd 1
269 .Guest.hwvirt.vmx.enmAbort resd 1
270 .Guest.hwvirt.vmx.uDiagAux resq 1
271 .Guest.hwvirt.vmx.uAbortAux resd 1
272 .Guest.hwvirt.vmx.fInVmxRootMode resb 1
273 .Guest.hwvirt.vmx.fInVmxNonRootMode resb 1
274 .Guest.hwvirt.vmx.fInterceptEvents resb 1
275 .Guest.hwvirt.vmx.fNmiUnblockingIret resb 1
276 .Guest.hwvirt.vmx.uFirstPauseLoopTick resq 1
277 .Guest.hwvirt.vmx.uPrevPauseTick resq 1
278 .Guest.hwvirt.vmx.uEntryTick resq 1
279 .Guest.hwvirt.vmx.offVirtApicWrite resw 1
280 .Guest.hwvirt.vmx.fVirtNmiBlocking resb 1
281 alignb 8
282 .Guest.hwvirt.vmx.Msrs resb 224
283
284 alignb 8
285 .Guest.hwvirt.enmHwvirt resd 1
286 .Guest.hwvirt.fGif resb 1
287 alignb 4
288 .Guest.hwvirt.fSavedInhibit resd 1
289 alignb 64
290
291 .GuestMsrs resq 0
292 .GuestMsrs.au64 resq 64
293
294 ;
295 ; Host context state
296 ;
297 alignb 64
298 .Host resb 0
299 .Host.abXState resb 0x4000-0x300
300 .Host.XState EQU .Host.abXState
301 ;.Host.rax resq 1 - scratch
302 .Host.rbx resq 1
303 ;.Host.rcx resq 1 - scratch
304 ;.Host.rdx resq 1 - scratch
305 .Host.rdi resq 1
306 .Host.rsi resq 1
307 .Host.rbp resq 1
308 .Host.rsp resq 1
309 ;.Host.r8 resq 1 - scratch
310 ;.Host.r9 resq 1 - scratch
311 .Host.r10 resq 1
312 .Host.r11 resq 1
313 .Host.r12 resq 1
314 .Host.r13 resq 1
315 .Host.r14 resq 1
316 .Host.r15 resq 1
317 ;.Host.rip resd 1 - scratch
318 .Host.rflags resq 1
319 .Host.ss resw 1
320 .Host.ssPadding resw 1
321 .Host.gs resw 1
322 .Host.gsPadding resw 1
323 .Host.fs resw 1
324 .Host.fsPadding resw 1
325 .Host.es resw 1
326 .Host.esPadding resw 1
327 .Host.ds resw 1
328 .Host.dsPadding resw 1
329 .Host.cs resw 1
330 .Host.csPadding resw 1
331
332 .Host.cr0Fpu:
333 .Host.cr0 resq 1
334 ;.Host.cr2 resq 1 - scratch
335 .Host.cr3 resq 1
336 .Host.cr4 resq 1
337 .Host.cr8 resq 1
338
339 .Host.dr0 resq 1
340 .Host.dr1 resq 1
341 .Host.dr2 resq 1
342 .Host.dr3 resq 1
343 .Host.dr6 resq 1
344 .Host.dr7 resq 1
345
346 .Host.gdtr resb 10 ; GDT limit + linear address
347 .Host.gdtrPadding resw 1
348 .Host.idtr resb 10 ; IDT limit + linear address
349 .Host.idtrPadding resw 1
350 .Host.ldtr resw 1
351 .Host.ldtrPadding resw 1
352 .Host.tr resw 1
353 .Host.trPadding resw 1
354
355 .Host.SysEnter.cs resq 1
356 .Host.SysEnter.eip resq 1
357 .Host.SysEnter.esp resq 1
358 .Host.FSbase resq 1
359 .Host.GSbase resq 1
360 .Host.efer resq 1
361 alignb 8
362 .Host.xcr0 resq 1
363 .Host.fXStateMask resq 1
364 alignb 64
365
366 ;
367 ; Other stuff.
368 ;
369 .fUseFlags resd 1
370 .fChanged resd 1
371 alignb 8
372 .hNestedVmxPreemptTimer resq 1
373 .fCpuIdApicFeatureVisible resb 1
374
375 ;
376 ; Hypervisor Context.
377 ;
378 alignb 16 ; linux makes the whole struct 16 byte aligned, so we provided the necessary padding here to keep it simple.
379 .Hyper resq 0
380 .Hyper.dr resq 8
381 .Hyper.cr3 resq 1
382 .Hyper.au64Padding resq 7
383
384%ifdef VBOX_WITH_CRASHDUMP_MAGIC
385 .aMagic resb 56
386 .uMagic resq 1
387%endif
388endstruc
389
390
391
392%if 0 ; Currently not used anywhere.
393;;
394; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
395;
396; Cleans the FPU state, if necessary, before restoring the FPU.
397;
398; This macro ASSUMES CR0.TS is not set!
399;
400; @param xDX Pointer to CPUMCPU.
401; @uses xAX, EFLAGS
402;
403; Changes here should also be reflected in CPUMRCA.asm's copy!
404;
405%macro CLEANFPU 0
406 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
407 jz .nothing_to_clean
408
409 xor eax, eax
410 fnstsw ax ; FSW -> AX.
411 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
412 ; while clearing & loading the FPU bits in 'clean_fpu' below.
413 jz .clean_fpu
414 fnclex
415
416.clean_fpu:
417 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs.
418 ; for the upcoming push (load)
419 fild dword [g_r32_Zero xWrtRIP] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
420.nothing_to_clean:
421%endmacro
422%endif ; Unused.
423
424
425;;
426; Makes sure we don't trap (#NM) accessing the FPU.
427;
428; In ring-0 this is a bit of work since we may have try convince the host kernel
429; to do the work for us, also, we must report any CR0 changes back to HMR0VMX
430; via the VINF_CPUM_HOST_CR0_MODIFIED status code.
431;
432; If we end up clearing CR0.TS/EM ourselves in ring-0, we'll save the original
433; value in CPUMCPU.Host.cr0Fpu. If we don't, we'll store zero there. (See also
434; CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET.)
435;
436; In raw-mode we will always have to clear TS and it will be recalculated
437; elsewhere and thus needs no saving.
438;
439; @param %1 Register to return the return status code in.
440; @param %2 Temporary scratch register.
441; @param %3 Ring-0 only, register pointing to the CPUMCPU structure
442; of the EMT we're on.
443; @uses EFLAGS, CR0, %1, %2
444;
445%macro CPUMRZ_TOUCH_FPU_CLEAR_CR0_FPU_TRAPS_SET_RC 3
446 ;
447 ; ring-0 - slightly complicated (than old raw-mode).
448 ;
449 xor %1, %1 ; 0 / VINF_SUCCESS. Wishing for no CR0 changes.
450 mov [%3 + CPUMCPU.Host.cr0Fpu], %1
451
452 mov %2, cr0
453 test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state.
454 jz %%no_cr0_change
455
456 %ifdef VMM_R0_TOUCH_FPU
457 ; Touch the state and check that the kernel updated CR0 for us.
458 movdqa xmm0, xmm0
459 mov %2, cr0
460 test %2, X86_CR0_TS | X86_CR0_EM
461 jz %%cr0_changed
462 %endif
463
464 ; Save CR0 and clear them flags ourselves.
465 mov [%3 + CPUMCPU.Host.cr0Fpu], %2
466 and %2, ~(X86_CR0_TS | X86_CR0_EM)
467 mov cr0, %2
468
469%%cr0_changed:
470 mov %1, VINF_CPUM_HOST_CR0_MODIFIED
471%%no_cr0_change:
472%endmacro
473
474
475;;
476; Restore CR0 if CR0.TS or CR0.EM were non-zero in the original state.
477;
478; @param %1 The original state to restore (or zero).
479;
480%macro CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET 1
481 test %1, X86_CR0_TS | X86_CR0_EM
482 jz %%skip_cr0_restore
483 mov cr0, %1
484%%skip_cr0_restore:
485%endmacro
486
487
488;;
489; Saves the host state.
490;
491; @uses rax, rdx
492; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
493; @param pXState Define for the register containing the extended state pointer.
494;
495%macro CPUMR0_SAVE_HOST 0
496 ;
497 ; Load a couple of registers we'll use later in all branches.
498 ;
499 lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
500 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
501
502 ;
503 ; XSAVE or FXSAVE?
504 ;
505 or eax, eax
506 jz %%host_fxsave
507
508 ; XSAVE
509 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
510 %ifdef RT_ARCH_AMD64
511 o64 xsave [pXState]
512 %else
513 xsave [pXState]
514 %endif
515 jmp %%host_done
516
517 ; FXSAVE
518%%host_fxsave:
519 %ifdef RT_ARCH_AMD64
520 o64 fxsave [pXState] ; Use explicit REX prefix. See @bugref{6398}.
521 %else
522 fxsave [pXState]
523 %endif
524
525%%host_done:
526%endmacro ; CPUMR0_SAVE_HOST
527
528
529;;
530; Loads the host state.
531;
532; @uses rax, rdx
533; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
534; @param pXState Define for the register containing the extended state pointer.
535;
536%macro CPUMR0_LOAD_HOST 0
537 ;
538 ; Load a couple of registers we'll use later in all branches.
539 ;
540 lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
541 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
542
543 ;
544 ; XRSTOR or FXRSTOR?
545 ;
546 or eax, eax
547 jz %%host_fxrstor
548
549 ; XRSTOR
550 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
551 %ifdef RT_ARCH_AMD64
552 o64 xrstor [pXState]
553 %else
554 xrstor [pXState]
555 %endif
556 jmp %%host_done
557
558 ; FXRSTOR
559%%host_fxrstor:
560 %ifdef RT_ARCH_AMD64
561 o64 fxrstor [pXState] ; Use explicit REX prefix. See @bugref{6398}.
562 %else
563 fxrstor [pXState]
564 %endif
565
566%%host_done:
567%endmacro ; CPUMR0_LOAD_HOST
568
569
570
571;; Macro for XSAVE/FXSAVE for the guest FPU but tries to figure out whether to
572; save the 32-bit FPU state or 64-bit FPU state.
573;
574; @param %1 Pointer to CPUMCPU.
575; @param %2 Pointer to XState.
576; @param %3 Force AMD64
577; @param %4 The instruction to use (xsave or fxsave)
578; @uses xAX, xDX, EFLAGS, 20h of stack.
579;
580%macro SAVE_32_OR_64_FPU 4
581%if CPUM_IS_AMD64 || %3
582 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
583 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
584 jnz short %%save_long_mode_guest
585%endif
586 %4 [pXState]
587%if CPUM_IS_AMD64 || %3
588 jmp %%save_done_32bit_cs_ds
589
590%%save_long_mode_guest:
591 o64 %4 [pXState]
592
593 xor edx, edx
594 cmp dword [pXState + X86FXSTATE.FPUCS], 0
595 jne short %%save_done
596
597 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0).
598 fnstenv [rsp]
599 movzx eax, word [rsp + 10h]
600 mov [pXState + X86FXSTATE.FPUCS], eax
601 movzx eax, word [rsp + 18h]
602 add rsp, 20h
603 mov [pXState + X86FXSTATE.FPUDS], eax
604%endif
605%%save_done_32bit_cs_ds:
606 mov edx, X86_FXSTATE_RSVD_32BIT_MAGIC
607%%save_done:
608 mov dword [pXState + X86_OFF_FXSTATE_RSVD], edx
609%endmacro ; SAVE_32_OR_64_FPU
610
611
612;;
613; Save the guest state.
614;
615; @uses rax, rdx
616; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
617; @param pXState Define for the register containing the extended state pointer.
618;
619%macro CPUMR0_SAVE_GUEST 0
620 ;
621 ; Load a couple of registers we'll use later in all branches.
622 ;
623 %ifdef IN_RING0
624 lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
625 %else
626 %error "Unsupported context!"
627 %endif
628 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
629
630 ;
631 ; XSAVE or FXSAVE?
632 ;
633 or eax, eax
634 jz %%guest_fxsave
635
636 ; XSAVE
637 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
638 %ifdef VBOX_WITH_KERNEL_USING_XMM
639 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Already saved in HMR0A.asm.
640 %endif
641 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, xsave
642 jmp %%guest_done
643
644 ; FXSAVE
645%%guest_fxsave:
646 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, fxsave
647
648%%guest_done:
649%endmacro ; CPUMR0_SAVE_GUEST
650
651
652;;
653; Wrapper for selecting 32-bit or 64-bit XRSTOR/FXRSTOR according to what SAVE_32_OR_64_FPU did.
654;
655; @param %1 Pointer to CPUMCPU.
656; @param %2 Pointer to XState.
657; @param %3 Force AMD64.
658; @param %4 The instruction to use (xrstor or fxrstor).
659; @uses xAX, xDX, EFLAGS
660;
661%macro RESTORE_32_OR_64_FPU 4
662%if CPUM_IS_AMD64 || %3
663 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
664 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
665 jz %%restore_32bit_fpu
666 cmp dword [pXState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC
667 jne short %%restore_64bit_fpu
668%%restore_32bit_fpu:
669%endif
670 %4 [pXState]
671%if CPUM_IS_AMD64 || %3
672 ; TODO: Restore XMM8-XMM15!
673 jmp short %%restore_fpu_done
674%%restore_64bit_fpu:
675 o64 %4 [pXState]
676%%restore_fpu_done:
677%endif
678%endmacro ; RESTORE_32_OR_64_FPU
679
680
681;;
682; Loads the guest state.
683;
684; @uses rax, rdx
685; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
686; @param pXState Define for the register containing the extended state pointer.
687;
688%macro CPUMR0_LOAD_GUEST 0
689 ;
690 ; Load a couple of registers we'll use later in all branches.
691 ;
692 lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
693 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
694
695 ;
696 ; XRSTOR or FXRSTOR?
697 ;
698 or eax, eax
699 jz %%guest_fxrstor
700
701 ; XRSTOR
702 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
703 %ifdef VBOX_WITH_KERNEL_USING_XMM
704 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Will be loaded by HMR0A.asm.
705 %endif
706 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, xrstor
707 jmp %%guest_done
708
709 ; FXRSTOR
710%%guest_fxrstor:
711 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, fxrstor
712
713%%guest_done:
714%endmacro ; CPUMR0_LOAD_GUEST
715
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette