VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/CPUMRCA.asm@ 54977

Last change on this file since 54977 was 54899, checked in by vboxsync, 10 years ago

CPUMGCResumeGuest and CPUMGCResumeGuestV86 has been fed CPUMCPU pointers for a while now? Guess we never noticed because there was usually a zero dword at the start of CPUMCPU until I moved the Hyper (and the host) states to the end. Hyper never used the fpu part of its CPUMCTX structure.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 16.0 KB
Line 
1; $Id: CPUMRCA.asm 54899 2015-03-23 00:21:30Z vboxsync $
2;; @file
3; CPUM - Raw-mode Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2015 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VMMRC.mac"
22%include "VBox/vmm/vm.mac"
23%include "VBox/err.mac"
24%include "VBox/vmm/stam.mac"
25%include "CPUMInternal.mac"
26%include "iprt/x86.mac"
27%include "VBox/vmm/cpum.mac"
28
29
30;*******************************************************************************
31;* External Symbols *
32;*******************************************************************************
33extern IMPNAME(g_CPUM) ; VMM GC Builtin import
34extern IMPNAME(g_VM) ; VMM GC Builtin import
35extern NAME(cpumRCHandleNPAndGP) ; CPUMGC.cpp
36extern NAME(CPUMRCAssertPreExecutionSanity)
37
38
39;
40; Enables write protection of Hypervisor memory pages.
41; !note! Must be commented out for Trap8 debug handler.
42;
43%define ENABLE_WRITE_PROTECTION 1
44
45BEGINCODE
46
47;; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
48; Cleans the FPU state, if necessary, before restoring the FPU.
49;
50; This macro ASSUMES CR0.TS is not set!
51; @remarks Trashes xAX!!
52; Changes here should also be reflected in CPUMR0A.asm's copy!
53%macro CLEANFPU 0
54 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
55 jz .nothing_to_clean
56
57 xor eax, eax
58 fnstsw ax ; Get FSW
59 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
60 ; while clearing & loading the FPU bits in 'clean_fpu'
61 jz clean_fpu
62 fnclex
63
64.clean_fpu:
65 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs
66 ; for the upcoming push (load)
67 fild dword [xDX + CPUMCPU.Guest.XState] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
68
69.nothing_to_clean:
70%endmacro
71
72
73;;
74; Handles lazy FPU saving and restoring.
75;
76; This handler will implement lazy fpu (sse/mmx/stuff) saving.
77; Two actions may be taken in this handler since the Guest OS may
78; be doing lazy fpu switching. So, we'll have to generate those
79; traps which the Guest CPU CTX shall have according to the
80; its CR0 flags. If no traps for the Guest OS, we'll save the host
81; context and restore the guest context.
82;
83; @returns 0 if caller should continue execution.
84; @returns VINF_EM_RAW_GUEST_TRAP if a guest trap should be generated.
85; @param pCPUMCPU x86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer
86;
87align 16
88BEGINPROC cpumHandleLazyFPUAsm
89 ;
90 ; Figure out what to do.
91 ;
92 ; There are two basic actions:
93 ; 1. Save host fpu and restore guest fpu.
94 ; 2. Generate guest trap.
95 ;
96 ; When entering the hypervisor we'll always enable MP (for proper wait
97 ; trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
98 ; is taken from the guest OS in order to get proper SSE handling.
99 ;
100 ;
101 ; Actions taken depending on the guest CR0 flags:
102 ;
103 ; 3 2 1
104 ; TS | EM | MP | FPUInstr | WAIT :: VMM Action
105 ; ------------------------------------------------------------------------
106 ; 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
107 ; 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
108 ; 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC;
109 ; 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
110 ; 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
111 ; 1 | 0 | 1 | #NM | #NM :: Go to host taking trap there.
112 ; 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
113 ; 1 | 1 | 1 | #NM | #NM :: Go to host taking trap there.
114
115 ;
116 ; Before taking any of these actions we're checking if we have already
117 ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3.
118 ;
119%ifdef RT_ARCH_AMD64
120 %ifdef RT_OS_WINDOWS
121 mov xDX, rcx
122 %else
123 mov xDX, rdi
124 %endif
125%else
126 mov xDX, dword [esp + 4]
127%endif
128 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
129 jz hlfpua_not_loaded
130 jmp hlfpua_to_host
131
132 ;
133 ; Take action.
134 ;
135align 16
136hlfpua_not_loaded:
137 mov eax, [xDX + CPUMCPU.Guest.cr0]
138 and eax, X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
139%ifdef RT_ARCH_AMD64
140 lea r8, [hlfpuajmp1 wrt rip]
141 jmp qword [rax*4 + r8]
142%else
143 jmp dword [eax*2 + hlfpuajmp1]
144%endif
145align 16
146;; jump table using fpu related cr0 flags as index.
147hlfpuajmp1:
148 RTCCPTR_DEF hlfpua_switch_fpu_ctx
149 RTCCPTR_DEF hlfpua_switch_fpu_ctx
150 RTCCPTR_DEF hlfpua_switch_fpu_ctx
151 RTCCPTR_DEF hlfpua_switch_fpu_ctx
152 RTCCPTR_DEF hlfpua_switch_fpu_ctx
153 RTCCPTR_DEF hlfpua_to_host
154 RTCCPTR_DEF hlfpua_switch_fpu_ctx
155 RTCCPTR_DEF hlfpua_to_host
156;; and mask for cr0.
157hlfpu_afFlags:
158 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
159 RTCCPTR_DEF ~(X86_CR0_TS)
160 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
161 RTCCPTR_DEF ~(X86_CR0_TS)
162 RTCCPTR_DEF ~(X86_CR0_MP)
163 RTCCPTR_DEF 0
164 RTCCPTR_DEF ~(X86_CR0_MP)
165 RTCCPTR_DEF 0
166
167 ;
168 ; Action - switch FPU context and change cr0 flags.
169 ;
170align 16
171hlfpua_switch_fpu_ctx:
172 ; Paranoia. This function was previously used in ring-0, not any longer.
173%ifdef IN_RING3
174%error "This function is not written for ring-3"
175%endif
176%ifdef IN_RING0
177%error "This function is not written for ring-0"
178%endif
179
180 mov xCX, cr0
181%ifdef RT_ARCH_AMD64
182 lea r8, [hlfpu_afFlags wrt rip]
183 and rcx, [rax*4 + r8] ; calc the new cr0 flags.
184%else
185 and ecx, [eax*2 + hlfpu_afFlags] ; calc the new cr0 flags.
186%endif
187 mov xAX, cr0
188 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
189 mov cr0, xAX ; clear flags so we don't trap here.
190%ifndef RT_ARCH_AMD64
191 mov eax, edx ; Calculate the PCPUM pointer
192 sub eax, [edx + CPUMCPU.offCPUM]
193 test dword [eax + CPUM.CPUFeatures.edx], X86_CPUID_FEATURE_EDX_FXSR
194 jz short hlfpua_no_fxsave
195%endif
196
197%ifdef RT_ARCH_AMD64
198 ; Use explicit REX prefix. See @bugref{6398}.
199 o64 fxsave [xDX + CPUMCPU.Host.XState]
200%else
201 fxsave [xDX + CPUMCPU.Host.XState]
202%endif
203 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
204 fxrstor [xDX + CPUMCPU.Guest.XState] ; raw-mode guest is always 32-bit. See @bugref{7138}.
205
206hlfpua_finished_switch:
207
208 ; Load new CR0 value.
209 ;; @todo Optimize the many unconditional CR0 writes.
210 mov cr0, xCX ; load the new cr0 flags.
211
212 ; return continue execution.
213 xor eax, eax
214 ret
215
216%ifndef RT_ARCH_AMD64
217; legacy support.
218hlfpua_no_fxsave:
219 fnsave [xDX + CPUMCPU.Host.XState]
220 or dword [xDX + CPUMCPU.fUseFlags], dword (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) ; yasm / nasm
221 mov eax, [xDX + CPUMCPU.Guest.XState] ; control word
222 not eax ; 1 means exception ignored (6 LS bits)
223 and eax, byte 03Fh ; 6 LS bits only
224 test eax, [xDX + CPUMCPU.Guest.XState + 4] ; status word
225 jz short hlfpua_no_exceptions_pending
226 ; technically incorrect, but we certainly don't want any exceptions now!!
227 and dword [xDX + CPUMCPU.Guest.XState + 4], ~03Fh
228hlfpua_no_exceptions_pending:
229 frstor [xDX + CPUMCPU.Guest.XState]
230 jmp near hlfpua_finished_switch
231%endif ; !RT_ARCH_AMD64
232
233
234 ;
235 ; Action - Generate Guest trap.
236 ;
237hlfpua_action_4:
238hlfpua_to_host:
239 mov eax, VINF_EM_RAW_GUEST_TRAP
240 ret
241ENDPROC cpumHandleLazyFPUAsm
242
243
244;;
245; Calls a guest trap/interrupt handler directly
246; Assumes a trap stack frame has already been setup on the guest's stack!
247;
248; @param pRegFrame [esp + 4] Original trap/interrupt context
249; @param selCS [esp + 8] Code selector of handler
250; @param pHandler [esp + 12] GC virtual address of handler
251; @param eflags [esp + 16] Callee's EFLAGS
252; @param selSS [esp + 20] Stack selector for handler
253; @param pEsp [esp + 24] Stack address for handler
254;
255; @remark This call never returns!
256;
257; VMMRCDECL(void) CPUMGCCallGuestTrapHandler(PCPUMCTXCORE pRegFrame, uint32_t selCS, RTGCPTR pHandler, uint32_t eflags, uint32_t selSS, RTGCPTR pEsp);
258align 16
259BEGINPROC_EXPORTED CPUMGCCallGuestTrapHandler
260 mov ebp, esp
261
262 ; construct iret stack frame
263 push dword [ebp + 20] ; SS
264 push dword [ebp + 24] ; ESP
265 push dword [ebp + 16] ; EFLAGS
266 push dword [ebp + 8] ; CS
267 push dword [ebp + 12] ; EIP
268
269 ;
270 ; enable WP
271 ;
272%ifdef ENABLE_WRITE_PROTECTION
273 mov eax, cr0
274 or eax, X86_CR0_WRITE_PROTECT
275 mov cr0, eax
276%endif
277
278 ; restore CPU context (all except cs, eip, ss, esp & eflags; which are restored or overwritten by iret)
279 mov ebp, [ebp + 4] ; pRegFrame
280 mov ebx, [ebp + CPUMCTXCORE.ebx]
281 mov ecx, [ebp + CPUMCTXCORE.ecx]
282 mov edx, [ebp + CPUMCTXCORE.edx]
283 mov esi, [ebp + CPUMCTXCORE.esi]
284 mov edi, [ebp + CPUMCTXCORE.edi]
285
286 ;; @todo load segment registers *before* enabling WP.
287 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS | CPUM_HANDLER_CTXCORE_IN_EBP
288 mov gs, [ebp + CPUMCTXCORE.gs.Sel]
289 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS | CPUM_HANDLER_CTXCORE_IN_EBP
290 mov fs, [ebp + CPUMCTXCORE.fs.Sel]
291 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES | CPUM_HANDLER_CTXCORE_IN_EBP
292 mov es, [ebp + CPUMCTXCORE.es.Sel]
293 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS | CPUM_HANDLER_CTXCORE_IN_EBP
294 mov ds, [ebp + CPUMCTXCORE.ds.Sel]
295
296 mov eax, [ebp + CPUMCTXCORE.eax]
297 mov ebp, [ebp + CPUMCTXCORE.ebp]
298
299 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
300 iret
301ENDPROC CPUMGCCallGuestTrapHandler
302
303
304;;
305; Performs an iret to V86 code
306; Assumes a trap stack frame has already been setup on the guest's stack!
307;
308; @param pRegFrame Original trap/interrupt context
309;
310; This function does not return!
311;
312;VMMRCDECL(void) CPUMGCCallV86Code(PCPUMCTXCORE pRegFrame);
313align 16
314BEGINPROC CPUMGCCallV86Code
315 mov ebp, [esp + 4] ; pRegFrame
316
317 ; construct iret stack frame
318 push dword [ebp + CPUMCTXCORE.gs.Sel]
319 push dword [ebp + CPUMCTXCORE.fs.Sel]
320 push dword [ebp + CPUMCTXCORE.ds.Sel]
321 push dword [ebp + CPUMCTXCORE.es.Sel]
322 push dword [ebp + CPUMCTXCORE.ss.Sel]
323 push dword [ebp + CPUMCTXCORE.esp]
324 push dword [ebp + CPUMCTXCORE.eflags]
325 push dword [ebp + CPUMCTXCORE.cs.Sel]
326 push dword [ebp + CPUMCTXCORE.eip]
327
328 ;
329 ; enable WP
330 ;
331%ifdef ENABLE_WRITE_PROTECTION
332 mov eax, cr0
333 or eax, X86_CR0_WRITE_PROTECT
334 mov cr0, eax
335%endif
336
337 ; restore CPU context (all except cs, eip, ss, esp, eflags, ds, es, fs & gs; which are restored or overwritten by iret)
338 mov eax, [ebp + CPUMCTXCORE.eax]
339 mov ebx, [ebp + CPUMCTXCORE.ebx]
340 mov ecx, [ebp + CPUMCTXCORE.ecx]
341 mov edx, [ebp + CPUMCTXCORE.edx]
342 mov esi, [ebp + CPUMCTXCORE.esi]
343 mov edi, [ebp + CPUMCTXCORE.edi]
344 mov ebp, [ebp + CPUMCTXCORE.ebp]
345
346 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
347 iret
348ENDPROC CPUMGCCallV86Code
349
350
351;;
352; This is a main entry point for resuming (or starting) guest
353; code execution.
354;
355; We get here directly from VMMSwitcher.asm (jmp at the end
356; of VMMSwitcher_HostToGuest).
357;
358; This call never returns!
359;
360; @param edx Pointer to CPUMCPU structure.
361;
362align 16
363BEGINPROC_EXPORTED CPUMGCResumeGuest
364%ifdef VBOX_STRICT
365 ; Call CPUM to check sanity.
366 push edx
367 mov edx, IMP(g_VM)
368 push edx
369 call NAME(CPUMRCAssertPreExecutionSanity)
370 add esp, 4
371 pop edx
372%endif
373
374 ;
375 ; Setup iretd
376 ;
377 push dword [edx + CPUMCPU.Guest.ss.Sel]
378 push dword [edx + CPUMCPU.Guest.esp]
379 push dword [edx + CPUMCPU.Guest.eflags]
380 push dword [edx + CPUMCPU.Guest.cs.Sel]
381 push dword [edx + CPUMCPU.Guest.eip]
382
383 ;
384 ; Restore registers.
385 ;
386 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES
387 mov es, [edx + CPUMCPU.Guest.es.Sel]
388 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS
389 mov fs, [edx + CPUMCPU.Guest.fs.Sel]
390 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS
391 mov gs, [edx + CPUMCPU.Guest.gs.Sel]
392
393%ifdef VBOX_WITH_STATISTICS
394 ;
395 ; Statistics.
396 ;
397 push edx
398 mov edx, IMP(g_VM)
399 lea edx, [edx + VM.StatTotalQemuToGC]
400 STAM_PROFILE_ADV_STOP edx
401
402 mov edx, IMP(g_VM)
403 lea edx, [edx + VM.StatTotalInGC]
404 STAM_PROFILE_ADV_START edx
405 pop edx
406%endif
407
408 ;
409 ; enable WP
410 ;
411%ifdef ENABLE_WRITE_PROTECTION
412 mov eax, cr0
413 or eax, X86_CR0_WRITE_PROTECT
414 mov cr0, eax
415%endif
416
417 ;
418 ; Continue restore.
419 ;
420 mov esi, [edx + CPUMCPU.Guest.esi]
421 mov edi, [edx + CPUMCPU.Guest.edi]
422 mov ebp, [edx + CPUMCPU.Guest.ebp]
423 mov ebx, [edx + CPUMCPU.Guest.ebx]
424 mov ecx, [edx + CPUMCPU.Guest.ecx]
425 mov eax, [edx + CPUMCPU.Guest.eax]
426 push dword [edx + CPUMCPU.Guest.ds.Sel]
427 mov edx, [edx + CPUMCPU.Guest.edx]
428 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS
429 pop ds
430
431 ; restart execution.
432 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
433 iretd
434ENDPROC CPUMGCResumeGuest
435
436
437;;
438; This is a main entry point for resuming (or starting) guest
439; code execution for raw V86 mode
440;
441; We get here directly from VMMSwitcher.asm (jmp at the end
442; of VMMSwitcher_HostToGuest).
443;
444; This call never returns!
445;
446; @param edx Pointer to CPUMCPU structure.
447;
448align 16
449BEGINPROC_EXPORTED CPUMGCResumeGuestV86
450%ifdef VBOX_STRICT
451 ; Call CPUM to check sanity.
452 push edx
453 mov edx, IMP(g_VM)
454 push edx
455 call NAME(CPUMRCAssertPreExecutionSanity)
456 add esp, 4
457 pop edx
458%endif
459
460 ;
461 ; Setup iretd
462 ;
463 push dword [edx + CPUMCPU.Guest.gs.Sel]
464 push dword [edx + CPUMCPU.Guest.fs.Sel]
465 push dword [edx + CPUMCPU.Guest.ds.Sel]
466 push dword [edx + CPUMCPU.Guest.es.Sel]
467
468 push dword [edx + CPUMCPU.Guest.ss.Sel]
469 push dword [edx + CPUMCPU.Guest.esp]
470
471 push dword [edx + CPUMCPU.Guest.eflags]
472 push dword [edx + CPUMCPU.Guest.cs.Sel]
473 push dword [edx + CPUMCPU.Guest.eip]
474
475 ;
476 ; Restore registers.
477 ;
478
479%ifdef VBOX_WITH_STATISTICS
480 ;
481 ; Statistics.
482 ;
483 push edx
484 mov edx, IMP(g_VM)
485 lea edx, [edx + VM.StatTotalQemuToGC]
486 STAM_PROFILE_ADV_STOP edx
487
488 mov edx, IMP(g_VM)
489 lea edx, [edx + VM.StatTotalInGC]
490 STAM_PROFILE_ADV_START edx
491 pop edx
492%endif
493
494 ;
495 ; enable WP
496 ;
497%ifdef ENABLE_WRITE_PROTECTION
498 mov eax, cr0
499 or eax, X86_CR0_WRITE_PROTECT
500 mov cr0, eax
501%endif
502
503 ;
504 ; Continue restore.
505 ;
506 mov esi, [edx + CPUMCPU.Guest.esi]
507 mov edi, [edx + CPUMCPU.Guest.edi]
508 mov ebp, [edx + CPUMCPU.Guest.ebp]
509 mov ecx, [edx + CPUMCPU.Guest.ecx]
510 mov ebx, [edx + CPUMCPU.Guest.ebx]
511 mov eax, [edx + CPUMCPU.Guest.eax]
512 mov edx, [edx + CPUMCPU.Guest.edx]
513
514 ; restart execution.
515 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
516 iretd
517ENDPROC CPUMGCResumeGuestV86
518
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette