VirtualBox

source: vbox/trunk/src/VBox/VMM/include/CPUMInternal.mac@ 61290

Last change on this file since 61290 was 61162, checked in by vboxsync, 9 years ago

CPUMInternal.mac: Apply FPUCS/DS hack to xsave just like we already do for fxsave. Kudos to quentin buathier for this fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 27.6 KB
Line 
1; $Id: CPUMInternal.mac 61162 2016-05-24 12:49:54Z vboxsync $
2;; @file
3; CPUM - Internal header file (asm).
4;
5
6;
7; Copyright (C) 2006-2015 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18%include "VBox/asmdefs.mac"
19%include "VBox/vmm/cpum.mac"
20
21;; Check sanity.
22%ifdef VBOX_WITH_KERNEL_USING_XMM
23 %ifndef IN_RING0
24 %error "What? We've got code assuming VBOX_WITH_KERNEL_USING_XMM is only defined in ring-0!"
25 %endif
26%endif
27
28;; For numeric expressions
29%ifdef RT_ARCH_AMD64
30 %define CPUM_IS_AMD64 1
31%else
32 %define CPUM_IS_AMD64 0
33%endif
34
35;; @def CPUM_CAN_USE_FPU_IN_R0
36; Indicates that we can use the FPU directly in ring-0.
37; Only defined in ring-0.
38%ifdef VBOX_WITH_KERNEL_USING_XMM
39 ; Systems using XMM registers as part of their kernel calling convention must
40 ; support saving and restoring the state while in ring-0. 64-bit Windows will
41 ; always switch the FPU state when context switching.
42 %define CPUM_CAN_USE_FPU_IN_R0 1
43%endif
44%ifdef RT_OS_WINDOWS
45 ; 32-bit Windows will load the FPU context of the current thread (user land).
46 %define CPUM_CAN_USE_FPU_IN_R0 1
47%endif
48%ifdef RT_OS_DARWIN
49 ; Intel Darwin kernels will load the FPU context of the current thread (user land).
50 %define CPUM_CAN_USE_FPU_IN_R0 1
51%endif
52%ifdef RT_OS_LINUX
53 ; Intel Linux kernels will load the FPU context of the current thread (user land),
54 ; at least that what my LXR research on 2.6.18+ indicates. It's possible this was
55 ; done differently at some point, I seems to recall issues with it ages and ages ago.
56; %define CPUM_CAN_USE_FPU_IN_R0 1 - test me first
57%endif
58%ifndef IN_RING0
59 %undef CPUM_CAN_USE_FPU_IN_R0
60%endif
61
62
63
64;;
65; CPU info
66struc CPUMINFO
67 .cMsrRanges resd 1 ; uint32_t
68 .fMsrMask resd 1 ; uint32_t
69 .cCpuIdLeaves resd 1 ; uint32_t
70 .iFirstExtCpuIdLeaf resd 1 ; uint32_t
71 .uPadding resd 1 ; uint32_t
72 .enmUnknownCpuIdMethod resd 1 ; CPUMUNKNOWNCPUID
73 .DefCpuId resb CPUMCPUID_size ; CPUMCPUID
74 .uScalableBusFreq resq 1 ; uint64_t
75 .paMsrRangesR0 RTR0PTR_RES 1 ; R0PTRTYPE(PCPUMMSRRANGE)
76 .paCpuIdLeavesR0 RTR0PTR_RES 1 ; R0PTRTYPE(PCPUMCPUIDLEAF)
77 .paMsrRangesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMMSRRANGE)
78 .paCpuIdLeavesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMCPUIDLEAF)
79 .paMsrRangesRC RTRCPTR_RES 1 ; RCPTRTYPE(PCPUMMSRRANGE)
80 .paCpuIdLeavesRC RTRCPTR_RES 1 ; RCPTRTYPE(PCPUMCPUIDLEAF)
81endstruc
82
83
84%define CPUM_USED_FPU_HOST RT_BIT(0)
85%define CPUM_USED_FPU_GUEST RT_BIT(10)
86%define CPUM_USED_FPU_SINCE_REM RT_BIT(1)
87%define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2)
88%define CPUM_USE_SYSENTER RT_BIT(3)
89%define CPUM_USE_SYSCALL RT_BIT(4)
90%define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5)
91%define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6)
92%define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7)
93%define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8)
94%define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9)
95%define CPUM_SYNC_FPU_STATE RT_BIT(16)
96%define CPUM_SYNC_DEBUG_REGS_GUEST RT_BIT(17)
97%define CPUM_SYNC_DEBUG_REGS_HYPER RT_BIT(18)
98%define CPUM_USE_FFXSR_LEAKY RT_BIT(19)
99%define CPUM_USE_SUPPORTS_LONGMODE RT_BIT(20)
100
101%define CPUM_HANDLER_DS 1
102%define CPUM_HANDLER_ES 2
103%define CPUM_HANDLER_FS 3
104%define CPUM_HANDLER_GS 4
105%define CPUM_HANDLER_IRET 5
106%define CPUM_HANDLER_TYPEMASK 0ffh
107%define CPUM_HANDLER_CTXCORE_IN_EBP RT_BIT(31)
108
109
110struc CPUM
111 ;...
112 .offCPUMCPU0 resd 1
113 .fHostUseFlags resd 1
114
115 ; CR4 masks
116 .CR4.AndMask resd 1
117 .CR4.OrMask resd 1
118 ; entered rawmode?
119 .u8PortableCpuIdLevel resb 1
120 .fPendingRestore resb 1
121
122 alignb 8
123 .fXStateGuestMask resq 1
124 .fXStateHostMask resq 1
125
126 alignb 64
127 .HostFeatures resb 32
128 .GuestFeatures resb 32
129 .GuestInfo resb RTHCPTR_CB*4 + RTRCPTR_CB*2 + 4*12
130
131 ; Patch manager saved state compatability CPUID leaf arrays
132 .aGuestCpuIdPatmStd resb 16*6
133 .aGuestCpuIdPatmExt resb 16*10
134 .aGuestCpuIdPatmCentaur resb 16*4
135
136 alignb 8
137 .cMsrWrites resq 1
138 .cMsrWritesToIgnoredBits resq 1
139 .cMsrWritesRaiseGp resq 1
140 .cMsrWritesUnknown resq 1
141 .cMsrReads resq 1
142 .cMsrReadsRaiseGp resq 1
143 .cMsrReadsUnknown resq 1
144endstruc
145
146struc CPUMCPU
147 ;
148 ; Guest context state
149 ; (Identical to the .Hyper chunk below.)
150 ;
151 .Guest resq 0
152 .Guest.eax resq 1
153 .Guest.ecx resq 1
154 .Guest.edx resq 1
155 .Guest.ebx resq 1
156 .Guest.esp resq 1
157 .Guest.ebp resq 1
158 .Guest.esi resq 1
159 .Guest.edi resq 1
160 .Guest.r8 resq 1
161 .Guest.r9 resq 1
162 .Guest.r10 resq 1
163 .Guest.r11 resq 1
164 .Guest.r12 resq 1
165 .Guest.r13 resq 1
166 .Guest.r14 resq 1
167 .Guest.r15 resq 1
168 .Guest.es.Sel resw 1
169 .Guest.es.PaddingSel resw 1
170 .Guest.es.ValidSel resw 1
171 .Guest.es.fFlags resw 1
172 .Guest.es.u64Base resq 1
173 .Guest.es.u32Limit resd 1
174 .Guest.es.Attr resd 1
175 .Guest.cs.Sel resw 1
176 .Guest.cs.PaddingSel resw 1
177 .Guest.cs.ValidSel resw 1
178 .Guest.cs.fFlags resw 1
179 .Guest.cs.u64Base resq 1
180 .Guest.cs.u32Limit resd 1
181 .Guest.cs.Attr resd 1
182 .Guest.ss.Sel resw 1
183 .Guest.ss.PaddingSel resw 1
184 .Guest.ss.ValidSel resw 1
185 .Guest.ss.fFlags resw 1
186 .Guest.ss.u64Base resq 1
187 .Guest.ss.u32Limit resd 1
188 .Guest.ss.Attr resd 1
189 .Guest.ds.Sel resw 1
190 .Guest.ds.PaddingSel resw 1
191 .Guest.ds.ValidSel resw 1
192 .Guest.ds.fFlags resw 1
193 .Guest.ds.u64Base resq 1
194 .Guest.ds.u32Limit resd 1
195 .Guest.ds.Attr resd 1
196 .Guest.fs.Sel resw 1
197 .Guest.fs.PaddingSel resw 1
198 .Guest.fs.ValidSel resw 1
199 .Guest.fs.fFlags resw 1
200 .Guest.fs.u64Base resq 1
201 .Guest.fs.u32Limit resd 1
202 .Guest.fs.Attr resd 1
203 .Guest.gs.Sel resw 1
204 .Guest.gs.PaddingSel resw 1
205 .Guest.gs.ValidSel resw 1
206 .Guest.gs.fFlags resw 1
207 .Guest.gs.u64Base resq 1
208 .Guest.gs.u32Limit resd 1
209 .Guest.gs.Attr resd 1
210 .Guest.eip resq 1
211 .Guest.eflags resq 1
212 .Guest.cr0 resq 1
213 .Guest.cr2 resq 1
214 .Guest.cr3 resq 1
215 .Guest.cr4 resq 1
216 .Guest.dr resq 8
217 .Guest.gdtrPadding resw 3
218 .Guest.gdtr resw 0
219 .Guest.gdtr.cbGdt resw 1
220 .Guest.gdtr.pGdt resq 1
221 .Guest.idtrPadding resw 3
222 .Guest.idtr resw 0
223 .Guest.idtr.cbIdt resw 1
224 .Guest.idtr.pIdt resq 1
225 .Guest.ldtr.Sel resw 1
226 .Guest.ldtr.PaddingSel resw 1
227 .Guest.ldtr.ValidSel resw 1
228 .Guest.ldtr.fFlags resw 1
229 .Guest.ldtr.u64Base resq 1
230 .Guest.ldtr.u32Limit resd 1
231 .Guest.ldtr.Attr resd 1
232 .Guest.tr.Sel resw 1
233 .Guest.tr.PaddingSel resw 1
234 .Guest.tr.ValidSel resw 1
235 .Guest.tr.fFlags resw 1
236 .Guest.tr.u64Base resq 1
237 .Guest.tr.u32Limit resd 1
238 .Guest.tr.Attr resd 1
239 .Guest.SysEnter.cs resb 8
240 .Guest.SysEnter.eip resb 8
241 .Guest.SysEnter.esp resb 8
242 .Guest.msrEFER resb 8
243 .Guest.msrSTAR resb 8
244 .Guest.msrPAT resb 8
245 .Guest.msrLSTAR resb 8
246 .Guest.msrCSTAR resb 8
247 .Guest.msrSFMASK resb 8
248 .Guest.msrKERNELGSBASE resb 8
249 .Guest.msrApicBase resb 8
250 .Guest.aXcr resq 2
251 .Guest.fXStateMask resq 1
252 .Guest.pXStateR0 RTR0PTR_RES 1
253 .Guest.pXStateR3 RTR3PTR_RES 1
254 .Guest.pXStateRC RTRCPTR_RES 1
255 .Guest.aoffXState resw 64
256
257 alignb 64
258 .GuestMsrs resq 0
259 .GuestMsrs.au64 resq 64
260
261 ;
262 ; Other stuff.
263 ;
264 .fUseFlags resd 1
265 .fChanged resd 1
266 .offCPUM resd 1
267 .u32RetCode resd 1
268
269%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
270 .pvApicBase RTR0PTR_RES 1
271 .fApicDisVectors resd 1
272 .fX2Apic resb 1
273%else
274 .abPadding3 resb (RTR0PTR_CB + 4 + 1)
275%endif
276
277 .fRawEntered resb 1
278 .fRemEntered resb 1
279
280 .abPadding2 resb (64 - 16 - RTR0PTR_CB - 4 - 1 - 2)
281
282 ;
283 ; Host context state
284 ;
285 alignb 64
286 .Host resb 0
287%if HC_ARCH_BITS == 64
288 ;.Host.rax resq 1 - scratch
289 .Host.rbx resq 1
290 ;.Host.rcx resq 1 - scratch
291 ;.Host.rdx resq 1 - scratch
292 .Host.rdi resq 1
293 .Host.rsi resq 1
294 .Host.rbp resq 1
295 .Host.rsp resq 1
296 ;.Host.r8 resq 1 - scratch
297 ;.Host.r9 resq 1 - scratch
298 .Host.r10 resq 1
299 .Host.r11 resq 1
300 .Host.r12 resq 1
301 .Host.r13 resq 1
302 .Host.r14 resq 1
303 .Host.r15 resq 1
304 ;.Host.rip resd 1 - scratch
305 .Host.rflags resq 1
306%endif
307%if HC_ARCH_BITS == 32
308 ;.Host.eax resd 1 - scratch
309 .Host.ebx resd 1
310 ;.Host.edx resd 1 - scratch
311 ;.Host.ecx resd 1 - scratch
312 .Host.edi resd 1
313 .Host.esi resd 1
314 .Host.ebp resd 1
315 .Host.eflags resd 1
316 ;.Host.eip resd 1 - scratch
317 ; lss pair!
318 .Host.esp resd 1
319%endif
320 .Host.ss resw 1
321 .Host.ssPadding resw 1
322 .Host.gs resw 1
323 .Host.gsPadding resw 1
324 .Host.fs resw 1
325 .Host.fsPadding resw 1
326 .Host.es resw 1
327 .Host.esPadding resw 1
328 .Host.ds resw 1
329 .Host.dsPadding resw 1
330 .Host.cs resw 1
331 .Host.csPadding resw 1
332
333%if HC_ARCH_BITS == 32
334 .Host.cr0 resd 1
335 ;.Host.cr2 resd 1 - scratch
336 .Host.cr3 resd 1
337 .Host.cr4 resd 1
338 .Host.cr0Fpu resd 1
339
340 .Host.dr0 resd 1
341 .Host.dr1 resd 1
342 .Host.dr2 resd 1
343 .Host.dr3 resd 1
344 .Host.dr6 resd 1
345 .Host.dr7 resd 1
346
347 .Host.gdtr resb 6 ; GDT limit + linear address
348 .Host.gdtrPadding resw 1
349 .Host.idtr resb 6 ; IDT limit + linear address
350 .Host.idtrPadding resw 1
351 .Host.ldtr resw 1
352 .Host.ldtrPadding resw 1
353 .Host.tr resw 1
354 .Host.trPadding resw 1
355
356 alignb 8
357 .Host.SysEnter.cs resq 1
358 .Host.SysEnter.eip resq 1
359 .Host.SysEnter.esp resq 1
360 .Host.efer resq 1
361 .Host.auPadding resb (20)
362
363%else ; 64-bit
364
365 .Host.cr0Fpu:
366 .Host.cr0 resq 1
367 ;.Host.cr2 resq 1 - scratch
368 .Host.cr3 resq 1
369 .Host.cr4 resq 1
370 .Host.cr8 resq 1
371
372 .Host.dr0 resq 1
373 .Host.dr1 resq 1
374 .Host.dr2 resq 1
375 .Host.dr3 resq 1
376 .Host.dr6 resq 1
377 .Host.dr7 resq 1
378
379 .Host.gdtr resb 10 ; GDT limit + linear address
380 .Host.gdtrPadding resw 1
381 .Host.idtr resb 10 ; IDT limit + linear address
382 .Host.idtrPadding resw 1
383 .Host.ldtr resw 1
384 .Host.ldtrPadding resw 1
385 .Host.tr resw 1
386 .Host.trPadding resw 1
387
388 .Host.SysEnter.cs resq 1
389 .Host.SysEnter.eip resq 1
390 .Host.SysEnter.esp resq 1
391 .Host.FSbase resq 1
392 .Host.GSbase resq 1
393 .Host.efer resq 1
394 .Host.auPadding resb 4
395%endif ; 64-bit
396 .Host.pXStateRC RTRCPTR_RES 1
397 alignb RTR0PTR_CB
398 .Host.pXStateR0 RTR0PTR_RES 1
399 .Host.pXStateR3 RTR3PTR_RES 1
400 alignb 8
401 .Host.xcr0 resq 1
402 .Host.fXStateMask resq 1
403
404 ;
405 ; Hypervisor Context (same as .Guest above).
406 ;
407 alignb 64
408 .Hyper resq 0
409 .Hyper.eax resq 1
410 .Hyper.ecx resq 1
411 .Hyper.edx resq 1
412 .Hyper.ebx resq 1
413 .Hyper.esp resq 1
414 .Hyper.ebp resq 1
415 .Hyper.esi resq 1
416 .Hyper.edi resq 1
417 .Hyper.r8 resq 1
418 .Hyper.r9 resq 1
419 .Hyper.r10 resq 1
420 .Hyper.r11 resq 1
421 .Hyper.r12 resq 1
422 .Hyper.r13 resq 1
423 .Hyper.r14 resq 1
424 .Hyper.r15 resq 1
425 .Hyper.es.Sel resw 1
426 .Hyper.es.PaddingSel resw 1
427 .Hyper.es.ValidSel resw 1
428 .Hyper.es.fFlags resw 1
429 .Hyper.es.u64Base resq 1
430 .Hyper.es.u32Limit resd 1
431 .Hyper.es.Attr resd 1
432 .Hyper.cs.Sel resw 1
433 .Hyper.cs.PaddingSel resw 1
434 .Hyper.cs.ValidSel resw 1
435 .Hyper.cs.fFlags resw 1
436 .Hyper.cs.u64Base resq 1
437 .Hyper.cs.u32Limit resd 1
438 .Hyper.cs.Attr resd 1
439 .Hyper.ss.Sel resw 1
440 .Hyper.ss.PaddingSel resw 1
441 .Hyper.ss.ValidSel resw 1
442 .Hyper.ss.fFlags resw 1
443 .Hyper.ss.u64Base resq 1
444 .Hyper.ss.u32Limit resd 1
445 .Hyper.ss.Attr resd 1
446 .Hyper.ds.Sel resw 1
447 .Hyper.ds.PaddingSel resw 1
448 .Hyper.ds.ValidSel resw 1
449 .Hyper.ds.fFlags resw 1
450 .Hyper.ds.u64Base resq 1
451 .Hyper.ds.u32Limit resd 1
452 .Hyper.ds.Attr resd 1
453 .Hyper.fs.Sel resw 1
454 .Hyper.fs.PaddingSel resw 1
455 .Hyper.fs.ValidSel resw 1
456 .Hyper.fs.fFlags resw 1
457 .Hyper.fs.u64Base resq 1
458 .Hyper.fs.u32Limit resd 1
459 .Hyper.fs.Attr resd 1
460 .Hyper.gs.Sel resw 1
461 .Hyper.gs.PaddingSel resw 1
462 .Hyper.gs.ValidSel resw 1
463 .Hyper.gs.fFlags resw 1
464 .Hyper.gs.u64Base resq 1
465 .Hyper.gs.u32Limit resd 1
466 .Hyper.gs.Attr resd 1
467 .Hyper.eip resq 1
468 .Hyper.eflags resq 1
469 .Hyper.cr0 resq 1
470 .Hyper.cr2 resq 1
471 .Hyper.cr3 resq 1
472 .Hyper.cr4 resq 1
473 .Hyper.dr resq 8
474 .Hyper.gdtrPadding resw 3
475 .Hyper.gdtr resw 0
476 .Hyper.gdtr.cbGdt resw 1
477 .Hyper.gdtr.pGdt resq 1
478 .Hyper.idtrPadding resw 3
479 .Hyper.idtr resw 0
480 .Hyper.idtr.cbIdt resw 1
481 .Hyper.idtr.pIdt resq 1
482 .Hyper.ldtr.Sel resw 1
483 .Hyper.ldtr.PaddingSel resw 1
484 .Hyper.ldtr.ValidSel resw 1
485 .Hyper.ldtr.fFlags resw 1
486 .Hyper.ldtr.u64Base resq 1
487 .Hyper.ldtr.u32Limit resd 1
488 .Hyper.ldtr.Attr resd 1
489 .Hyper.tr.Sel resw 1
490 .Hyper.tr.PaddingSel resw 1
491 .Hyper.tr.ValidSel resw 1
492 .Hyper.tr.fFlags resw 1
493 .Hyper.tr.u64Base resq 1
494 .Hyper.tr.u32Limit resd 1
495 .Hyper.tr.Attr resd 1
496 .Hyper.SysEnter.cs resb 8
497 .Hyper.SysEnter.eip resb 8
498 .Hyper.SysEnter.esp resb 8
499 .Hyper.msrEFER resb 8
500 .Hyper.msrSTAR resb 8
501 .Hyper.msrPAT resb 8
502 .Hyper.msrLSTAR resb 8
503 .Hyper.msrCSTAR resb 8
504 .Hyper.msrSFMASK resb 8
505 .Hyper.msrKERNELGSBASE resb 8
506 .Hyper.msrApicBase resb 8
507 .Hyper.aXcr resq 2
508 .Hyper.fXStateMask resq 1
509 .Hyper.pXStateR0 RTR0PTR_RES 1
510 .Hyper.pXStateR3 RTR3PTR_RES 1
511 .Hyper.pXStateRC RTRCPTR_RES 1
512 .Hyper.aoffXState resw 64
513 alignb 64
514
515%ifdef VBOX_WITH_CRASHDUMP_MAGIC
516 .aMagic resb 56
517 .uMagic resq 1
518%endif
519endstruc
520
521
522;;
523; Converts the CPUM pointer to CPUMCPU
524; @param %1 register name
525%macro CPUMCPU_FROM_CPUM 1
526 add %1, dword [%1 + CPUM.offCPUMCPU0]
527%endmacro
528
529;;
530; Converts the CPUM pointer to CPUMCPU
531; @param %1 register name (CPUM)
532; @param %2 register name (CPUMCPU offset)
533%macro CPUMCPU_FROM_CPUM_WITH_OFFSET 2
534 add %1, %2
535%endmacro
536
537;;
538; Converts the CPUMCPU pointer to CPUM
539; @param %1 register name
540%macro CPUM_FROM_CPUMCPU 1
541 sub %1, dword [%1 + CPUMCPU.offCPUM]
542%endmacro
543
544;;
545; Converts the CPUMCPU pointer to CPUM
546; @param %1 register name (CPUM)
547; @param %2 register name (CPUMCPU offset)
548%macro CPUM_FROM_CPUMCPU_WITH_OFFSET 2
549 sub %1, %2
550%endmacro
551
552
553
554%if 0 ; Currently not used anywhere.
555;;
556; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
557;
558; Cleans the FPU state, if necessary, before restoring the FPU.
559;
560; This macro ASSUMES CR0.TS is not set!
561;
562; @param xDX Pointer to CPUMCPU.
563; @uses xAX, EFLAGS
564;
565; Changes here should also be reflected in CPUMRCA.asm's copy!
566;
567%macro CLEANFPU 0
568 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
569 jz .nothing_to_clean
570
571 xor eax, eax
572 fnstsw ax ; FSW -> AX.
573 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
574 ; while clearing & loading the FPU bits in 'clean_fpu' below.
575 jz .clean_fpu
576 fnclex
577
578.clean_fpu:
579 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs.
580 ; for the upcoming push (load)
581 fild dword [g_r32_Zero xWrtRIP] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
582.nothing_to_clean:
583%endmacro
584%endif ; Unused.
585
586
587;;
588; Clears CR0.TS and CR0.EM if necessary, saving the previous result.
589;
590; This is used to avoid FPU exceptions when touching the FPU state.
591;
592; @param %1 Register to save the old CR0 in (pass to RESTORE_CR0).
593; @param %2 Temporary scratch register.
594; @uses EFLAGS, CR0
595;
596%macro SAVE_CR0_CLEAR_FPU_TRAPS 2
597 xor %1, %1
598 mov %2, cr0
599 test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state.
600 jz %%skip_cr0_write
601 mov %1, %2 ; Save old CR0
602 and %2, ~(X86_CR0_TS | X86_CR0_EM)
603 mov cr0, %2
604%%skip_cr0_write:
605%endmacro
606
607;;
608; Restore CR0.TS and CR0.EM state if SAVE_CR0_CLEAR_FPU_TRAPS change it.
609;
610; @param %1 The register that SAVE_CR0_CLEAR_FPU_TRAPS saved the old CR0 in.
611;
612%macro RESTORE_CR0 1
613 cmp %1, 0
614 je %%skip_cr0_restore
615 mov cr0, %1
616%%skip_cr0_restore:
617%endmacro
618
619
620;;
621; Saves the host state.
622;
623; @uses rax, rdx
624; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
625; @param pXState Define for the register containing the extended state pointer.
626;
627%macro CPUMR0_SAVE_HOST 0
628 ;
629 ; Load a couple of registers we'll use later in all branches.
630 ;
631 %ifdef IN_RING0
632 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
633 %elifdef IN_RC
634 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateRC]
635 %else
636 %error "Unsupported context!"
637 %endif
638 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
639
640 ;
641 ; XSAVE or FXSAVE?
642 ;
643 or eax, eax
644 jz %%host_fxsave
645
646 ; XSAVE
647 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
648 %ifdef RT_ARCH_AMD64
649 o64 xsave [pXState]
650 %else
651 xsave [pXState]
652 %endif
653 jmp %%host_done
654
655 ; FXSAVE
656%%host_fxsave:
657 %ifdef RT_ARCH_AMD64
658 o64 fxsave [pXState] ; Use explicit REX prefix. See @bugref{6398}.
659 %else
660 fxsave [pXState]
661 %endif
662
663%%host_done:
664%endmacro ; CPUMR0_SAVE_HOST
665
666
667;;
668; Loads the host state.
669;
670; @uses rax, rdx
671; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
672; @param pXState Define for the register containing the extended state pointer.
673;
674%macro CPUMR0_LOAD_HOST 0
675 ;
676 ; Load a couple of registers we'll use later in all branches.
677 ;
678 %ifdef IN_RING0
679 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0]
680 %elifdef IN_RC
681 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateRC]
682 %else
683 %error "Unsupported context!"
684 %endif
685 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
686
687 ;
688 ; XRSTOR or FXRSTOR?
689 ;
690 or eax, eax
691 jz %%host_fxrstor
692
693 ; XRSTOR
694 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
695 %ifdef RT_ARCH_AMD64
696 o64 xrstor [pXState]
697 %else
698 xrstor [pXState]
699 %endif
700 jmp %%host_done
701
702 ; FXRSTOR
703%%host_fxrstor:
704 %ifdef RT_ARCH_AMD64
705 o64 fxrstor [pXState] ; Use explicit REX prefix. See @bugref{6398}.
706 %else
707 fxrstor [pXState]
708 %endif
709
710%%host_done:
711%endmacro ; CPUMR0_LOAD_HOST
712
713
714
715;; Macro for XSAVE/FXSAVE for the guest FPU but tries to figure out whether to
716; save the 32-bit FPU state or 64-bit FPU state.
717;
718; @param %1 Pointer to CPUMCPU.
719; @param %2 Pointer to XState.
720; @param %3 Force AMD64
721; @param %4 The instruction to use (xsave or fxsave)
722; @uses xAX, xDX, EFLAGS, 20h of stack.
723;
724%macro SAVE_32_OR_64_FPU 4
725%if CPUM_IS_AMD64 || %3
726 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
727 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
728 jnz short %%save_long_mode_guest
729%endif
730 %4 [pXState]
731%if CPUM_IS_AMD64 || %3
732 jmp %%save_done_32bit_cs_ds
733
734%%save_long_mode_guest:
735 o64 %4 [pXState]
736
737 xor edx, edx
738 cmp dword [pXState + X86FXSTATE.FPUCS], 0
739 jne short %%save_done
740
741 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0).
742 fnstenv [rsp]
743 movzx eax, word [rsp + 10h]
744 mov [pXState + X86FXSTATE.FPUCS], eax
745 movzx eax, word [rsp + 18h]
746 add rsp, 20h
747 mov [pXState + X86FXSTATE.FPUDS], eax
748%endif
749%%save_done_32bit_cs_ds:
750 mov edx, X86_FXSTATE_RSVD_32BIT_MAGIC
751%%save_done:
752 mov dword [pXState + X86_OFF_FXSTATE_RSVD], edx
753%endmacro ; SAVE_32_OR_64_FPU
754
755
756;;
757; Save the guest state.
758;
759; @uses rax, rdx
760; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
761; @param pXState Define for the register containing the extended state pointer.
762;
763%macro CPUMR0_SAVE_GUEST 0
764 ;
765 ; Load a couple of registers we'll use later in all branches.
766 ;
767 %ifdef IN_RING0
768 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
769 %elifdef IN_RC
770 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateRC]
771 %else
772 %error "Unsupported context!"
773 %endif
774 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
775
776 ;
777 ; XSAVE or FXSAVE?
778 ;
779 or eax, eax
780 jz %%guest_fxsave
781
782 ; XSAVE
783 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
784 %ifdef VBOX_WITH_KERNEL_USING_XMM
785 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Already saved in HMR0A.asm.
786 %endif
787 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, xsave
788 jmp %%guest_done
789
790 ; FXSAVE
791%%guest_fxsave:
792 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, fxsave
793
794%%guest_done:
795%endmacro ; CPUMR0_SAVE_GUEST
796
797
798;;
799; Wrapper for selecting 32-bit or 64-bit XRSTOR/FXRSTOR according to what SAVE_32_OR_64_FPU did.
800;
801; @param %1 Pointer to CPUMCPU.
802; @param %2 Pointer to XState.
803; @param %3 Force AMD64.
804; @param %4 The instruction to use (xrstor or fxrstor).
805; @uses xAX, xDX, EFLAGS
806;
807%macro RESTORE_32_OR_64_FPU 4
808%if CPUM_IS_AMD64 || %3
809 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
810 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
811 jz %%restore_32bit_fpu
812 cmp dword [pXState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC
813 jne short %%restore_64bit_fpu
814%%restore_32bit_fpu:
815%endif
816 %4 [pXState]
817%if CPUM_IS_AMD64 || %3
818 ; TODO: Restore XMM8-XMM15!
819 jmp short %%restore_fpu_done
820%%restore_64bit_fpu:
821 o64 %4 [pXState]
822%%restore_fpu_done:
823%endif
824%endmacro ; RESTORE_32_OR_64_FPU
825
826
827;;
828; Loads the guest state.
829;
830; @uses rax, rdx
831; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
832; @param pXState Define for the register containing the extended state pointer.
833;
834%macro CPUMR0_LOAD_GUEST 0
835 ;
836 ; Load a couple of registers we'll use later in all branches.
837 ;
838 %ifdef IN_RING0
839 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0]
840 %elifdef IN_RC
841 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateRC]
842 %else
843 %error "Unsupported context!"
844 %endif
845 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
846
847 ;
848 ; XRSTOR or FXRSTOR?
849 ;
850 or eax, eax
851 jz %%guest_fxrstor
852
853 ; XRSTOR
854 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
855 %ifdef VBOX_WITH_KERNEL_USING_XMM
856 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Will be loaded by HMR0A.asm.
857 %endif
858 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, xrstor
859 jmp %%guest_done
860
861 ; FXRSTOR
862%%guest_fxrstor:
863 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, fxrstor
864
865%%guest_done:
866%endmacro ; CPUMR0_LOAD_GUEST
867
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette