VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/CPUMRCA.asm@ 49966

Last change on this file since 49966 was 49021, checked in by vboxsync, 11 years ago

VMM/CPUMRCA.asm: Only IN_RC, don't leave legacy ifndef IN_RING3 lying around.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 16.1 KB
Line 
1; $Id: CPUMRCA.asm 49021 2013-10-10 09:02:04Z vboxsync $
2;; @file
3; CPUM - Raw-mode Context Assembly Routines.
4;
5
6; Copyright (C) 2006-2012 Oracle Corporation
7;
8; This file is part of VirtualBox Open Source Edition (OSE), as
9; available from http://www.virtualbox.org. This file is free software;
10; you can redistribute it and/or modify it under the terms of the GNU
11; General Public License (GPL) as published by the Free Software
12; Foundation, in version 2 as it comes in the "COPYING" file of the
13; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15;
16
17;*******************************************************************************
18;* Header Files *
19;*******************************************************************************
20%include "VMMRC.mac"
21%include "VBox/vmm/vm.mac"
22%include "VBox/err.mac"
23%include "VBox/vmm/stam.mac"
24%include "CPUMInternal.mac"
25%include "iprt/x86.mac"
26%include "VBox/vmm/cpum.mac"
27
28
29;*******************************************************************************
30;* External Symbols *
31;*******************************************************************************
32extern IMPNAME(g_CPUM) ; VMM GC Builtin import
33extern IMPNAME(g_VM) ; VMM GC Builtin import
34extern NAME(cpumRCHandleNPAndGP) ; CPUMGC.cpp
35extern NAME(CPUMRCAssertPreExecutionSanity)
36
37
38;
39; Enables write protection of Hypervisor memory pages.
40; !note! Must be commented out for Trap8 debug handler.
41;
42%define ENABLE_WRITE_PROTECTION 1
43
44BEGINCODE
45
46;; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
47; Cleans the FPU state, if necessary, before restoring the FPU.
48;
49; This macro ASSUMES CR0.TS is not set!
50; @remarks Trashes xAX!!
51; Changes here should also be reflected in CPUMR0A.asm's copy!
52%macro CLEANFPU 0
53 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
54 jz .nothing_to_clean
55
56 xor eax, eax
57 fnstsw ax ; Get FSW
58 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
59 ; while clearing & loading the FPU bits in 'clean_fpu'
60 jz clean_fpu
61 fnclex
62
63.clean_fpu:
64 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs
65 ; for the upcoming push (load)
66 fild dword [xDX + CPUMCPU.Guest.fpu] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
67
68.nothing_to_clean:
69%endmacro
70
71
72;;
73; Handles lazy FPU saving and restoring.
74;
75; This handler will implement lazy fpu (sse/mmx/stuff) saving.
76; Two actions may be taken in this handler since the Guest OS may
77; be doing lazy fpu switching. So, we'll have to generate those
78; traps which the Guest CPU CTX shall have according to the
79; its CR0 flags. If no traps for the Guest OS, we'll save the host
80; context and restore the guest context.
81;
82; @returns 0 if caller should continue execution.
83; @returns VINF_EM_RAW_GUEST_TRAP if a guest trap should be generated.
84; @param pCPUMCPU x86:[esp+4] GCC:rdi MSC:rcx CPUMCPU pointer
85;
86align 16
87BEGINPROC cpumHandleLazyFPUAsm
88 ;
89 ; Figure out what to do.
90 ;
91 ; There are two basic actions:
92 ; 1. Save host fpu and restore guest fpu.
93 ; 2. Generate guest trap.
94 ;
95 ; When entering the hypervisor we'll always enable MP (for proper wait
96 ; trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
97 ; is taken from the guest OS in order to get proper SSE handling.
98 ;
99 ;
100 ; Actions taken depending on the guest CR0 flags:
101 ;
102 ; 3 2 1
103 ; TS | EM | MP | FPUInstr | WAIT :: VMM Action
104 ; ------------------------------------------------------------------------
105 ; 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
106 ; 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
107 ; 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC;
108 ; 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
109 ; 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
110 ; 1 | 0 | 1 | #NM | #NM :: Go to host taking trap there.
111 ; 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
112 ; 1 | 1 | 1 | #NM | #NM :: Go to host taking trap there.
113
114 ;
115 ; Before taking any of these actions we're checking if we have already
116 ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3.
117 ;
118%ifdef RT_ARCH_AMD64
119 %ifdef RT_OS_WINDOWS
120 mov xDX, rcx
121 %else
122 mov xDX, rdi
123 %endif
124%else
125 mov xDX, dword [esp + 4]
126%endif
127 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
128 jz hlfpua_not_loaded
129 jmp hlfpua_to_host
130
131 ;
132 ; Take action.
133 ;
134align 16
135hlfpua_not_loaded:
136 mov eax, [xDX + CPUMCPU.Guest.cr0]
137 and eax, X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
138%ifdef RT_ARCH_AMD64
139 lea r8, [hlfpuajmp1 wrt rip]
140 jmp qword [rax*4 + r8]
141%else
142 jmp dword [eax*2 + hlfpuajmp1]
143%endif
144align 16
145;; jump table using fpu related cr0 flags as index.
146hlfpuajmp1:
147 RTCCPTR_DEF hlfpua_switch_fpu_ctx
148 RTCCPTR_DEF hlfpua_switch_fpu_ctx
149 RTCCPTR_DEF hlfpua_switch_fpu_ctx
150 RTCCPTR_DEF hlfpua_switch_fpu_ctx
151 RTCCPTR_DEF hlfpua_switch_fpu_ctx
152 RTCCPTR_DEF hlfpua_to_host
153 RTCCPTR_DEF hlfpua_switch_fpu_ctx
154 RTCCPTR_DEF hlfpua_to_host
155;; and mask for cr0.
156hlfpu_afFlags:
157 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
158 RTCCPTR_DEF ~(X86_CR0_TS)
159 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
160 RTCCPTR_DEF ~(X86_CR0_TS)
161 RTCCPTR_DEF ~(X86_CR0_MP)
162 RTCCPTR_DEF 0
163 RTCCPTR_DEF ~(X86_CR0_MP)
164 RTCCPTR_DEF 0
165
166 ;
167 ; Action - switch FPU context and change cr0 flags.
168 ;
169align 16
170hlfpua_switch_fpu_ctx:
171 ; Paranoia. This function was previously used in ring-0, not any longer.
172%ifdef IN_RING3
173%error "This function is not written for ring-3"
174%endif
175%ifdef IN_RING0
176%error "This function is not written for ring-0"
177%endif
178
179 mov xCX, cr0
180%ifdef RT_ARCH_AMD64
181 lea r8, [hlfpu_afFlags wrt rip]
182 and rcx, [rax*4 + r8] ; calc the new cr0 flags.
183%else
184 and ecx, [eax*2 + hlfpu_afFlags] ; calc the new cr0 flags.
185%endif
186 mov xAX, cr0
187 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
188 mov cr0, xAX ; clear flags so we don't trap here.
189%ifndef RT_ARCH_AMD64
190 mov eax, edx ; Calculate the PCPUM pointer
191 sub eax, [edx + CPUMCPU.offCPUM]
192 test dword [eax + CPUM.CPUFeatures.edx], X86_CPUID_FEATURE_EDX_FXSR
193 jz short hlfpua_no_fxsave
194%endif
195
196%ifdef RT_ARCH_AMD64
197 ; Use explicit REX prefix. See @bugref{6398}.
198 o64 fxsave [xDX + CPUMCPU.Host.fpu]
199%else
200 fxsave [xDX + CPUMCPU.Host.fpu]
201%endif
202 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
203%ifdef RT_ARCH_AMD64
204 o64 fxrstor [xDX + CPUMCPU.Guest.fpu]
205%else
206 fxrstor [xDX + CPUMCPU.Guest.fpu]
207%endif
208hlfpua_finished_switch:
209
210 ; Load new CR0 value.
211 ;; @todo Optimize the many unconditional CR0 writes.
212 mov cr0, xCX ; load the new cr0 flags.
213
214 ; return continue execution.
215 xor eax, eax
216 ret
217
218%ifndef RT_ARCH_AMD64
219; legacy support.
220hlfpua_no_fxsave:
221 fnsave [xDX + CPUMCPU.Host.fpu]
222 or dword [xDX + CPUMCPU.fUseFlags], dword (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) ; yasm / nasm
223 mov eax, [xDX + CPUMCPU.Guest.fpu] ; control word
224 not eax ; 1 means exception ignored (6 LS bits)
225 and eax, byte 03Fh ; 6 LS bits only
226 test eax, [xDX + CPUMCPU.Guest.fpu + 4] ; status word
227 jz short hlfpua_no_exceptions_pending
228 ; technically incorrect, but we certainly don't want any exceptions now!!
229 and dword [xDX + CPUMCPU.Guest.fpu + 4], ~03Fh
230hlfpua_no_exceptions_pending:
231 frstor [xDX + CPUMCPU.Guest.fpu]
232 jmp near hlfpua_finished_switch
233%endif ; !RT_ARCH_AMD64
234
235
236 ;
237 ; Action - Generate Guest trap.
238 ;
239hlfpua_action_4:
240hlfpua_to_host:
241 mov eax, VINF_EM_RAW_GUEST_TRAP
242 ret
243ENDPROC cpumHandleLazyFPUAsm
244
245
246;;
247; Calls a guest trap/interrupt handler directly
248; Assumes a trap stack frame has already been setup on the guest's stack!
249;
250; @param pRegFrame [esp + 4] Original trap/interrupt context
251; @param selCS [esp + 8] Code selector of handler
252; @param pHandler [esp + 12] GC virtual address of handler
253; @param eflags [esp + 16] Callee's EFLAGS
254; @param selSS [esp + 20] Stack selector for handler
255; @param pEsp [esp + 24] Stack address for handler
256;
257; @remark This call never returns!
258;
259; VMMRCDECL(void) CPUMGCCallGuestTrapHandler(PCPUMCTXCORE pRegFrame, uint32_t selCS, RTGCPTR pHandler, uint32_t eflags, uint32_t selSS, RTGCPTR pEsp);
260align 16
261BEGINPROC_EXPORTED CPUMGCCallGuestTrapHandler
262 mov ebp, esp
263
264 ; construct iret stack frame
265 push dword [ebp + 20] ; SS
266 push dword [ebp + 24] ; ESP
267 push dword [ebp + 16] ; EFLAGS
268 push dword [ebp + 8] ; CS
269 push dword [ebp + 12] ; EIP
270
271 ;
272 ; enable WP
273 ;
274%ifdef ENABLE_WRITE_PROTECTION
275 mov eax, cr0
276 or eax, X86_CR0_WRITE_PROTECT
277 mov cr0, eax
278%endif
279
280 ; restore CPU context (all except cs, eip, ss, esp & eflags; which are restored or overwritten by iret)
281 mov ebp, [ebp + 4] ; pRegFrame
282 mov ebx, [ebp + CPUMCTXCORE.ebx]
283 mov ecx, [ebp + CPUMCTXCORE.ecx]
284 mov edx, [ebp + CPUMCTXCORE.edx]
285 mov esi, [ebp + CPUMCTXCORE.esi]
286 mov edi, [ebp + CPUMCTXCORE.edi]
287
288 ;; @todo load segment registers *before* enabling WP.
289 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS | CPUM_HANDLER_CTXCORE_IN_EBP
290 mov gs, [ebp + CPUMCTXCORE.gs.Sel]
291 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS | CPUM_HANDLER_CTXCORE_IN_EBP
292 mov fs, [ebp + CPUMCTXCORE.fs.Sel]
293 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES | CPUM_HANDLER_CTXCORE_IN_EBP
294 mov es, [ebp + CPUMCTXCORE.es.Sel]
295 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS | CPUM_HANDLER_CTXCORE_IN_EBP
296 mov ds, [ebp + CPUMCTXCORE.ds.Sel]
297
298 mov eax, [ebp + CPUMCTXCORE.eax]
299 mov ebp, [ebp + CPUMCTXCORE.ebp]
300
301 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
302 iret
303ENDPROC CPUMGCCallGuestTrapHandler
304
305
306;;
307; Performs an iret to V86 code
308; Assumes a trap stack frame has already been setup on the guest's stack!
309;
310; @param pRegFrame Original trap/interrupt context
311;
312; This function does not return!
313;
314;VMMRCDECL(void) CPUMGCCallV86Code(PCPUMCTXCORE pRegFrame);
315align 16
316BEGINPROC CPUMGCCallV86Code
317 mov ebp, [esp + 4] ; pRegFrame
318
319 ; construct iret stack frame
320 push dword [ebp + CPUMCTXCORE.gs.Sel]
321 push dword [ebp + CPUMCTXCORE.fs.Sel]
322 push dword [ebp + CPUMCTXCORE.ds.Sel]
323 push dword [ebp + CPUMCTXCORE.es.Sel]
324 push dword [ebp + CPUMCTXCORE.ss.Sel]
325 push dword [ebp + CPUMCTXCORE.esp]
326 push dword [ebp + CPUMCTXCORE.eflags]
327 push dword [ebp + CPUMCTXCORE.cs.Sel]
328 push dword [ebp + CPUMCTXCORE.eip]
329
330 ;
331 ; enable WP
332 ;
333%ifdef ENABLE_WRITE_PROTECTION
334 mov eax, cr0
335 or eax, X86_CR0_WRITE_PROTECT
336 mov cr0, eax
337%endif
338
339 ; restore CPU context (all except cs, eip, ss, esp, eflags, ds, es, fs & gs; which are restored or overwritten by iret)
340 mov eax, [ebp + CPUMCTXCORE.eax]
341 mov ebx, [ebp + CPUMCTXCORE.ebx]
342 mov ecx, [ebp + CPUMCTXCORE.ecx]
343 mov edx, [ebp + CPUMCTXCORE.edx]
344 mov esi, [ebp + CPUMCTXCORE.esi]
345 mov edi, [ebp + CPUMCTXCORE.edi]
346 mov ebp, [ebp + CPUMCTXCORE.ebp]
347
348 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
349 iret
350ENDPROC CPUMGCCallV86Code
351
352
353;;
354; This is a main entry point for resuming (or starting) guest
355; code execution.
356;
357; We get here directly from VMMSwitcher.asm (jmp at the end
358; of VMMSwitcher_HostToGuest).
359;
360; This call never returns!
361;
362; @param edx Pointer to CPUM structure.
363;
364align 16
365BEGINPROC_EXPORTED CPUMGCResumeGuest
366%ifdef VBOX_STRICT
367 ; Call CPUM to check sanity.
368 push edx
369 mov edx, IMP(g_VM)
370 push edx
371 call NAME(CPUMRCAssertPreExecutionSanity)
372 add esp, 4
373 pop edx
374%endif
375
376 ; Convert to CPUMCPU pointer
377 add edx, [edx + CPUM.offCPUMCPU0]
378 ;
379 ; Setup iretd
380 ;
381 push dword [edx + CPUMCPU.Guest.ss.Sel]
382 push dword [edx + CPUMCPU.Guest.esp]
383 push dword [edx + CPUMCPU.Guest.eflags]
384 push dword [edx + CPUMCPU.Guest.cs.Sel]
385 push dword [edx + CPUMCPU.Guest.eip]
386
387 ;
388 ; Restore registers.
389 ;
390 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES
391 mov es, [edx + CPUMCPU.Guest.es.Sel]
392 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS
393 mov fs, [edx + CPUMCPU.Guest.fs.Sel]
394 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS
395 mov gs, [edx + CPUMCPU.Guest.gs.Sel]
396
397%ifdef VBOX_WITH_STATISTICS
398 ;
399 ; Statistics.
400 ;
401 push edx
402 mov edx, IMP(g_VM)
403 lea edx, [edx + VM.StatTotalQemuToGC]
404 STAM_PROFILE_ADV_STOP edx
405
406 mov edx, IMP(g_VM)
407 lea edx, [edx + VM.StatTotalInGC]
408 STAM_PROFILE_ADV_START edx
409 pop edx
410%endif
411
412 ;
413 ; enable WP
414 ;
415%ifdef ENABLE_WRITE_PROTECTION
416 mov eax, cr0
417 or eax, X86_CR0_WRITE_PROTECT
418 mov cr0, eax
419%endif
420
421 ;
422 ; Continue restore.
423 ;
424 mov esi, [edx + CPUMCPU.Guest.esi]
425 mov edi, [edx + CPUMCPU.Guest.edi]
426 mov ebp, [edx + CPUMCPU.Guest.ebp]
427 mov ebx, [edx + CPUMCPU.Guest.ebx]
428 mov ecx, [edx + CPUMCPU.Guest.ecx]
429 mov eax, [edx + CPUMCPU.Guest.eax]
430 push dword [edx + CPUMCPU.Guest.ds.Sel]
431 mov edx, [edx + CPUMCPU.Guest.edx]
432 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS
433 pop ds
434
435 ; restart execution.
436 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
437 iretd
438ENDPROC CPUMGCResumeGuest
439
440
441;;
442; This is a main entry point for resuming (or starting) guest
443; code execution for raw V86 mode
444;
445; We get here directly from VMMSwitcher.asm (jmp at the end
446; of VMMSwitcher_HostToGuest).
447;
448; This call never returns!
449;
450; @param edx Pointer to CPUM structure.
451;
452align 16
453BEGINPROC_EXPORTED CPUMGCResumeGuestV86
454%ifdef VBOX_STRICT
455 ; Call CPUM to check sanity.
456 push edx
457 mov edx, IMP(g_VM)
458 push edx
459 call NAME(CPUMRCAssertPreExecutionSanity)
460 add esp, 4
461 pop edx
462%endif
463
464 ; Convert to CPUMCPU pointer
465 add edx, [edx + CPUM.offCPUMCPU0]
466 ;
467 ; Setup iretd
468 ;
469 push dword [edx + CPUMCPU.Guest.gs.Sel]
470 push dword [edx + CPUMCPU.Guest.fs.Sel]
471 push dword [edx + CPUMCPU.Guest.ds.Sel]
472 push dword [edx + CPUMCPU.Guest.es.Sel]
473
474 push dword [edx + CPUMCPU.Guest.ss.Sel]
475 push dword [edx + CPUMCPU.Guest.esp]
476
477 push dword [edx + CPUMCPU.Guest.eflags]
478 push dword [edx + CPUMCPU.Guest.cs.Sel]
479 push dword [edx + CPUMCPU.Guest.eip]
480
481 ;
482 ; Restore registers.
483 ;
484
485%ifdef VBOX_WITH_STATISTICS
486 ;
487 ; Statistics.
488 ;
489 push edx
490 mov edx, IMP(g_VM)
491 lea edx, [edx + VM.StatTotalQemuToGC]
492 STAM_PROFILE_ADV_STOP edx
493
494 mov edx, IMP(g_VM)
495 lea edx, [edx + VM.StatTotalInGC]
496 STAM_PROFILE_ADV_START edx
497 pop edx
498%endif
499
500 ;
501 ; enable WP
502 ;
503%ifdef ENABLE_WRITE_PROTECTION
504 mov eax, cr0
505 or eax, X86_CR0_WRITE_PROTECT
506 mov cr0, eax
507%endif
508
509 ;
510 ; Continue restore.
511 ;
512 mov esi, [edx + CPUMCPU.Guest.esi]
513 mov edi, [edx + CPUMCPU.Guest.edi]
514 mov ebp, [edx + CPUMCPU.Guest.ebp]
515 mov ecx, [edx + CPUMCPU.Guest.ecx]
516 mov ebx, [edx + CPUMCPU.Guest.ebx]
517 mov eax, [edx + CPUMCPU.Guest.eax]
518 mov edx, [edx + CPUMCPU.Guest.edx]
519
520 ; restart execution.
521 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
522 iretd
523ENDPROC CPUMGCResumeGuestV86
524
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette