VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/CPUMRCA.asm@ 54898

Last change on this file since 54898 was 54898, checked in by vboxsync, 10 years ago

CPUMCTX,CPUMHOST: Replaced the fpu (X86FXSAVE) member with an XState (X86XSAVEAREA) member.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 16.1 KB
Line 
1; $Id: CPUMRCA.asm 54898 2015-03-22 23:47:07Z vboxsync $
2;; @file
3; CPUM - Raw-mode Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2015 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VMMRC.mac"
22%include "VBox/vmm/vm.mac"
23%include "VBox/err.mac"
24%include "VBox/vmm/stam.mac"
25%include "CPUMInternal.mac"
26%include "iprt/x86.mac"
27%include "VBox/vmm/cpum.mac"
28
29
30;*******************************************************************************
31;* External Symbols *
32;*******************************************************************************
33extern IMPNAME(g_CPUM) ; VMM GC Builtin import
34extern IMPNAME(g_VM) ; VMM GC Builtin import
35extern NAME(cpumRCHandleNPAndGP) ; CPUMGC.cpp
36extern NAME(CPUMRCAssertPreExecutionSanity)
37
38
39;
40; Enables write protection of Hypervisor memory pages.
41; !note! Must be commented out for Trap8 debug handler.
42;
43%define ENABLE_WRITE_PROTECTION 1
44
45BEGINCODE
46
47;; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
48; Cleans the FPU state, if necessary, before restoring the FPU.
49;
50; This macro ASSUMES CR0.TS is not set!
51; @remarks Trashes xAX!!
52; Changes here should also be reflected in CPUMR0A.asm's copy!
53%macro CLEANFPU 0
54 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
55 jz .nothing_to_clean
56
57 xor eax, eax
58 fnstsw ax ; Get FSW
59 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
60 ; while clearing & loading the FPU bits in 'clean_fpu'
61 jz clean_fpu
62 fnclex
63
64.clean_fpu:
65 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs
66 ; for the upcoming push (load)
67 fild dword [xDX + CPUMCPU.Guest.XState] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
68
69.nothing_to_clean:
70%endmacro
71
72
73;;
74; Handles lazy FPU saving and restoring.
75;
76; This handler will implement lazy fpu (sse/mmx/stuff) saving.
77; Two actions may be taken in this handler since the Guest OS may
78; be doing lazy fpu switching. So, we'll have to generate those
79; traps which the Guest CPU CTX shall have according to the
80; its CR0 flags. If no traps for the Guest OS, we'll save the host
81; context and restore the guest context.
82;
83; @returns 0 if caller should continue execution.
84; @returns VINF_EM_RAW_GUEST_TRAP if a guest trap should be generated.
85; @param pCPUMCPU x86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer
86;
87align 16
88BEGINPROC cpumHandleLazyFPUAsm
89 ;
90 ; Figure out what to do.
91 ;
92 ; There are two basic actions:
93 ; 1. Save host fpu and restore guest fpu.
94 ; 2. Generate guest trap.
95 ;
96 ; When entering the hypervisor we'll always enable MP (for proper wait
97 ; trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
98 ; is taken from the guest OS in order to get proper SSE handling.
99 ;
100 ;
101 ; Actions taken depending on the guest CR0 flags:
102 ;
103 ; 3 2 1
104 ; TS | EM | MP | FPUInstr | WAIT :: VMM Action
105 ; ------------------------------------------------------------------------
106 ; 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
107 ; 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
108 ; 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC;
109 ; 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
110 ; 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
111 ; 1 | 0 | 1 | #NM | #NM :: Go to host taking trap there.
112 ; 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
113 ; 1 | 1 | 1 | #NM | #NM :: Go to host taking trap there.
114
115 ;
116 ; Before taking any of these actions we're checking if we have already
117 ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3.
118 ;
119%ifdef RT_ARCH_AMD64
120 %ifdef RT_OS_WINDOWS
121 mov xDX, rcx
122 %else
123 mov xDX, rdi
124 %endif
125%else
126 mov xDX, dword [esp + 4]
127%endif
128 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
129 jz hlfpua_not_loaded
130 jmp hlfpua_to_host
131
132 ;
133 ; Take action.
134 ;
135align 16
136hlfpua_not_loaded:
137 mov eax, [xDX + CPUMCPU.Guest.cr0]
138 and eax, X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
139%ifdef RT_ARCH_AMD64
140 lea r8, [hlfpuajmp1 wrt rip]
141 jmp qword [rax*4 + r8]
142%else
143 jmp dword [eax*2 + hlfpuajmp1]
144%endif
145align 16
146;; jump table using fpu related cr0 flags as index.
147hlfpuajmp1:
148 RTCCPTR_DEF hlfpua_switch_fpu_ctx
149 RTCCPTR_DEF hlfpua_switch_fpu_ctx
150 RTCCPTR_DEF hlfpua_switch_fpu_ctx
151 RTCCPTR_DEF hlfpua_switch_fpu_ctx
152 RTCCPTR_DEF hlfpua_switch_fpu_ctx
153 RTCCPTR_DEF hlfpua_to_host
154 RTCCPTR_DEF hlfpua_switch_fpu_ctx
155 RTCCPTR_DEF hlfpua_to_host
156;; and mask for cr0.
157hlfpu_afFlags:
158 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
159 RTCCPTR_DEF ~(X86_CR0_TS)
160 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
161 RTCCPTR_DEF ~(X86_CR0_TS)
162 RTCCPTR_DEF ~(X86_CR0_MP)
163 RTCCPTR_DEF 0
164 RTCCPTR_DEF ~(X86_CR0_MP)
165 RTCCPTR_DEF 0
166
167 ;
168 ; Action - switch FPU context and change cr0 flags.
169 ;
170align 16
171hlfpua_switch_fpu_ctx:
172 ; Paranoia. This function was previously used in ring-0, not any longer.
173%ifdef IN_RING3
174%error "This function is not written for ring-3"
175%endif
176%ifdef IN_RING0
177%error "This function is not written for ring-0"
178%endif
179
180 mov xCX, cr0
181%ifdef RT_ARCH_AMD64
182 lea r8, [hlfpu_afFlags wrt rip]
183 and rcx, [rax*4 + r8] ; calc the new cr0 flags.
184%else
185 and ecx, [eax*2 + hlfpu_afFlags] ; calc the new cr0 flags.
186%endif
187 mov xAX, cr0
188 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
189 mov cr0, xAX ; clear flags so we don't trap here.
190%ifndef RT_ARCH_AMD64
191 mov eax, edx ; Calculate the PCPUM pointer
192 sub eax, [edx + CPUMCPU.offCPUM]
193 test dword [eax + CPUM.CPUFeatures.edx], X86_CPUID_FEATURE_EDX_FXSR
194 jz short hlfpua_no_fxsave
195%endif
196
197%ifdef RT_ARCH_AMD64
198 ; Use explicit REX prefix. See @bugref{6398}.
199 o64 fxsave [xDX + CPUMCPU.Host.XState]
200%else
201 fxsave [xDX + CPUMCPU.Host.XState]
202%endif
203 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
204 fxrstor [xDX + CPUMCPU.Guest.XState] ; raw-mode guest is always 32-bit. See @bugref{7138}.
205
206hlfpua_finished_switch:
207
208 ; Load new CR0 value.
209 ;; @todo Optimize the many unconditional CR0 writes.
210 mov cr0, xCX ; load the new cr0 flags.
211
212 ; return continue execution.
213 xor eax, eax
214 ret
215
216%ifndef RT_ARCH_AMD64
217; legacy support.
218hlfpua_no_fxsave:
219 fnsave [xDX + CPUMCPU.Host.XState]
220 or dword [xDX + CPUMCPU.fUseFlags], dword (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) ; yasm / nasm
221 mov eax, [xDX + CPUMCPU.Guest.XState] ; control word
222 not eax ; 1 means exception ignored (6 LS bits)
223 and eax, byte 03Fh ; 6 LS bits only
224 test eax, [xDX + CPUMCPU.Guest.XState + 4] ; status word
225 jz short hlfpua_no_exceptions_pending
226 ; technically incorrect, but we certainly don't want any exceptions now!!
227 and dword [xDX + CPUMCPU.Guest.XState + 4], ~03Fh
228hlfpua_no_exceptions_pending:
229 frstor [xDX + CPUMCPU.Guest.XState]
230 jmp near hlfpua_finished_switch
231%endif ; !RT_ARCH_AMD64
232
233
234 ;
235 ; Action - Generate Guest trap.
236 ;
237hlfpua_action_4:
238hlfpua_to_host:
239 mov eax, VINF_EM_RAW_GUEST_TRAP
240 ret
241ENDPROC cpumHandleLazyFPUAsm
242
243
244;;
245; Calls a guest trap/interrupt handler directly
246; Assumes a trap stack frame has already been setup on the guest's stack!
247;
248; @param pRegFrame [esp + 4] Original trap/interrupt context
249; @param selCS [esp + 8] Code selector of handler
250; @param pHandler [esp + 12] GC virtual address of handler
251; @param eflags [esp + 16] Callee's EFLAGS
252; @param selSS [esp + 20] Stack selector for handler
253; @param pEsp [esp + 24] Stack address for handler
254;
255; @remark This call never returns!
256;
257; VMMRCDECL(void) CPUMGCCallGuestTrapHandler(PCPUMCTXCORE pRegFrame, uint32_t selCS, RTGCPTR pHandler, uint32_t eflags, uint32_t selSS, RTGCPTR pEsp);
258align 16
259BEGINPROC_EXPORTED CPUMGCCallGuestTrapHandler
260 mov ebp, esp
261
262 ; construct iret stack frame
263 push dword [ebp + 20] ; SS
264 push dword [ebp + 24] ; ESP
265 push dword [ebp + 16] ; EFLAGS
266 push dword [ebp + 8] ; CS
267 push dword [ebp + 12] ; EIP
268
269 ;
270 ; enable WP
271 ;
272%ifdef ENABLE_WRITE_PROTECTION
273 mov eax, cr0
274 or eax, X86_CR0_WRITE_PROTECT
275 mov cr0, eax
276%endif
277
278 ; restore CPU context (all except cs, eip, ss, esp & eflags; which are restored or overwritten by iret)
279 mov ebp, [ebp + 4] ; pRegFrame
280 mov ebx, [ebp + CPUMCTXCORE.ebx]
281 mov ecx, [ebp + CPUMCTXCORE.ecx]
282 mov edx, [ebp + CPUMCTXCORE.edx]
283 mov esi, [ebp + CPUMCTXCORE.esi]
284 mov edi, [ebp + CPUMCTXCORE.edi]
285
286 ;; @todo load segment registers *before* enabling WP.
287 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS | CPUM_HANDLER_CTXCORE_IN_EBP
288 mov gs, [ebp + CPUMCTXCORE.gs.Sel]
289 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS | CPUM_HANDLER_CTXCORE_IN_EBP
290 mov fs, [ebp + CPUMCTXCORE.fs.Sel]
291 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES | CPUM_HANDLER_CTXCORE_IN_EBP
292 mov es, [ebp + CPUMCTXCORE.es.Sel]
293 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS | CPUM_HANDLER_CTXCORE_IN_EBP
294 mov ds, [ebp + CPUMCTXCORE.ds.Sel]
295
296 mov eax, [ebp + CPUMCTXCORE.eax]
297 mov ebp, [ebp + CPUMCTXCORE.ebp]
298
299 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
300 iret
301ENDPROC CPUMGCCallGuestTrapHandler
302
303
304;;
305; Performs an iret to V86 code
306; Assumes a trap stack frame has already been setup on the guest's stack!
307;
308; @param pRegFrame Original trap/interrupt context
309;
310; This function does not return!
311;
312;VMMRCDECL(void) CPUMGCCallV86Code(PCPUMCTXCORE pRegFrame);
313align 16
314BEGINPROC CPUMGCCallV86Code
315 mov ebp, [esp + 4] ; pRegFrame
316
317 ; construct iret stack frame
318 push dword [ebp + CPUMCTXCORE.gs.Sel]
319 push dword [ebp + CPUMCTXCORE.fs.Sel]
320 push dword [ebp + CPUMCTXCORE.ds.Sel]
321 push dword [ebp + CPUMCTXCORE.es.Sel]
322 push dword [ebp + CPUMCTXCORE.ss.Sel]
323 push dword [ebp + CPUMCTXCORE.esp]
324 push dword [ebp + CPUMCTXCORE.eflags]
325 push dword [ebp + CPUMCTXCORE.cs.Sel]
326 push dword [ebp + CPUMCTXCORE.eip]
327
328 ;
329 ; enable WP
330 ;
331%ifdef ENABLE_WRITE_PROTECTION
332 mov eax, cr0
333 or eax, X86_CR0_WRITE_PROTECT
334 mov cr0, eax
335%endif
336
337 ; restore CPU context (all except cs, eip, ss, esp, eflags, ds, es, fs & gs; which are restored or overwritten by iret)
338 mov eax, [ebp + CPUMCTXCORE.eax]
339 mov ebx, [ebp + CPUMCTXCORE.ebx]
340 mov ecx, [ebp + CPUMCTXCORE.ecx]
341 mov edx, [ebp + CPUMCTXCORE.edx]
342 mov esi, [ebp + CPUMCTXCORE.esi]
343 mov edi, [ebp + CPUMCTXCORE.edi]
344 mov ebp, [ebp + CPUMCTXCORE.ebp]
345
346 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
347 iret
348ENDPROC CPUMGCCallV86Code
349
350
351;;
352; This is a main entry point for resuming (or starting) guest
353; code execution.
354;
355; We get here directly from VMMSwitcher.asm (jmp at the end
356; of VMMSwitcher_HostToGuest).
357;
358; This call never returns!
359;
360; @param edx Pointer to CPUM structure.
361;
362align 16
363BEGINPROC_EXPORTED CPUMGCResumeGuest
364%ifdef VBOX_STRICT
365 ; Call CPUM to check sanity.
366 push edx
367 mov edx, IMP(g_VM)
368 push edx
369 call NAME(CPUMRCAssertPreExecutionSanity)
370 add esp, 4
371 pop edx
372%endif
373
374 ; Convert to CPUMCPU pointer
375 add edx, [edx + CPUM.offCPUMCPU0]
376 ;
377 ; Setup iretd
378 ;
379 push dword [edx + CPUMCPU.Guest.ss.Sel]
380 push dword [edx + CPUMCPU.Guest.esp]
381 push dword [edx + CPUMCPU.Guest.eflags]
382 push dword [edx + CPUMCPU.Guest.cs.Sel]
383 push dword [edx + CPUMCPU.Guest.eip]
384
385 ;
386 ; Restore registers.
387 ;
388 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES
389 mov es, [edx + CPUMCPU.Guest.es.Sel]
390 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS
391 mov fs, [edx + CPUMCPU.Guest.fs.Sel]
392 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS
393 mov gs, [edx + CPUMCPU.Guest.gs.Sel]
394
395%ifdef VBOX_WITH_STATISTICS
396 ;
397 ; Statistics.
398 ;
399 push edx
400 mov edx, IMP(g_VM)
401 lea edx, [edx + VM.StatTotalQemuToGC]
402 STAM_PROFILE_ADV_STOP edx
403
404 mov edx, IMP(g_VM)
405 lea edx, [edx + VM.StatTotalInGC]
406 STAM_PROFILE_ADV_START edx
407 pop edx
408%endif
409
410 ;
411 ; enable WP
412 ;
413%ifdef ENABLE_WRITE_PROTECTION
414 mov eax, cr0
415 or eax, X86_CR0_WRITE_PROTECT
416 mov cr0, eax
417%endif
418
419 ;
420 ; Continue restore.
421 ;
422 mov esi, [edx + CPUMCPU.Guest.esi]
423 mov edi, [edx + CPUMCPU.Guest.edi]
424 mov ebp, [edx + CPUMCPU.Guest.ebp]
425 mov ebx, [edx + CPUMCPU.Guest.ebx]
426 mov ecx, [edx + CPUMCPU.Guest.ecx]
427 mov eax, [edx + CPUMCPU.Guest.eax]
428 push dword [edx + CPUMCPU.Guest.ds.Sel]
429 mov edx, [edx + CPUMCPU.Guest.edx]
430 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS
431 pop ds
432
433 ; restart execution.
434 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
435 iretd
436ENDPROC CPUMGCResumeGuest
437
438
439;;
440; This is a main entry point for resuming (or starting) guest
441; code execution for raw V86 mode
442;
443; We get here directly from VMMSwitcher.asm (jmp at the end
444; of VMMSwitcher_HostToGuest).
445;
446; This call never returns!
447;
448; @param edx Pointer to CPUM structure.
449;
450align 16
451BEGINPROC_EXPORTED CPUMGCResumeGuestV86
452%ifdef VBOX_STRICT
453 ; Call CPUM to check sanity.
454 push edx
455 mov edx, IMP(g_VM)
456 push edx
457 call NAME(CPUMRCAssertPreExecutionSanity)
458 add esp, 4
459 pop edx
460%endif
461
462 ; Convert to CPUMCPU pointer
463 add edx, [edx + CPUM.offCPUMCPU0]
464 ;
465 ; Setup iretd
466 ;
467 push dword [edx + CPUMCPU.Guest.gs.Sel]
468 push dword [edx + CPUMCPU.Guest.fs.Sel]
469 push dword [edx + CPUMCPU.Guest.ds.Sel]
470 push dword [edx + CPUMCPU.Guest.es.Sel]
471
472 push dword [edx + CPUMCPU.Guest.ss.Sel]
473 push dword [edx + CPUMCPU.Guest.esp]
474
475 push dword [edx + CPUMCPU.Guest.eflags]
476 push dword [edx + CPUMCPU.Guest.cs.Sel]
477 push dword [edx + CPUMCPU.Guest.eip]
478
479 ;
480 ; Restore registers.
481 ;
482
483%ifdef VBOX_WITH_STATISTICS
484 ;
485 ; Statistics.
486 ;
487 push edx
488 mov edx, IMP(g_VM)
489 lea edx, [edx + VM.StatTotalQemuToGC]
490 STAM_PROFILE_ADV_STOP edx
491
492 mov edx, IMP(g_VM)
493 lea edx, [edx + VM.StatTotalInGC]
494 STAM_PROFILE_ADV_START edx
495 pop edx
496%endif
497
498 ;
499 ; enable WP
500 ;
501%ifdef ENABLE_WRITE_PROTECTION
502 mov eax, cr0
503 or eax, X86_CR0_WRITE_PROTECT
504 mov cr0, eax
505%endif
506
507 ;
508 ; Continue restore.
509 ;
510 mov esi, [edx + CPUMCPU.Guest.esi]
511 mov edi, [edx + CPUMCPU.Guest.edi]
512 mov ebp, [edx + CPUMCPU.Guest.ebp]
513 mov ecx, [edx + CPUMCPU.Guest.ecx]
514 mov ebx, [edx + CPUMCPU.Guest.ebx]
515 mov eax, [edx + CPUMCPU.Guest.eax]
516 mov edx, [edx + CPUMCPU.Guest.edx]
517
518 ; restart execution.
519 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
520 iretd
521ENDPROC CPUMGCResumeGuestV86
522
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette