VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/CPUMRCA.asm@ 53349

Last change on this file since 53349 was 52296, checked in by vboxsync, 10 years ago

VMM: Missed copyright header update in r95407.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 16.1 KB
Line 
1; $Id: CPUMRCA.asm 52296 2014-08-06 13:54:49Z vboxsync $
2;; @file
3; CPUM - Raw-mode Context Assembly Routines.
4;
5
6; Copyright (C) 2006-2014 Oracle Corporation
7;
8; This file is part of VirtualBox Open Source Edition (OSE), as
9; available from http://www.virtualbox.org. This file is free software;
10; you can redistribute it and/or modify it under the terms of the GNU
11; General Public License (GPL) as published by the Free Software
12; Foundation, in version 2 as it comes in the "COPYING" file of the
13; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15;
16
17;*******************************************************************************
18;* Header Files *
19;*******************************************************************************
20%include "VMMRC.mac"
21%include "VBox/vmm/vm.mac"
22%include "VBox/err.mac"
23%include "VBox/vmm/stam.mac"
24%include "CPUMInternal.mac"
25%include "iprt/x86.mac"
26%include "VBox/vmm/cpum.mac"
27
28
29;*******************************************************************************
30;* External Symbols *
31;*******************************************************************************
32extern IMPNAME(g_CPUM) ; VMM GC Builtin import
33extern IMPNAME(g_VM) ; VMM GC Builtin import
34extern NAME(cpumRCHandleNPAndGP) ; CPUMGC.cpp
35extern NAME(CPUMRCAssertPreExecutionSanity)
36
37
38;
39; Enables write protection of Hypervisor memory pages.
40; !note! Must be commented out for Trap8 debug handler.
41;
42%define ENABLE_WRITE_PROTECTION 1
43
44BEGINCODE
45
46;; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
47; Cleans the FPU state, if necessary, before restoring the FPU.
48;
49; This macro ASSUMES CR0.TS is not set!
50; @remarks Trashes xAX!!
51; Changes here should also be reflected in CPUMR0A.asm's copy!
52%macro CLEANFPU 0
53 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
54 jz .nothing_to_clean
55
56 xor eax, eax
57 fnstsw ax ; Get FSW
58 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
59 ; while clearing & loading the FPU bits in 'clean_fpu'
60 jz clean_fpu
61 fnclex
62
63.clean_fpu:
64 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs
65 ; for the upcoming push (load)
66 fild dword [xDX + CPUMCPU.Guest.fpu] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
67
68.nothing_to_clean:
69%endmacro
70
71
72;;
73; Handles lazy FPU saving and restoring.
74;
75; This handler will implement lazy fpu (sse/mmx/stuff) saving.
76; Two actions may be taken in this handler since the Guest OS may
77; be doing lazy fpu switching. So, we'll have to generate those
78; traps which the Guest CPU CTX shall have according to the
79; its CR0 flags. If no traps for the Guest OS, we'll save the host
80; context and restore the guest context.
81;
82; @returns 0 if caller should continue execution.
83; @returns VINF_EM_RAW_GUEST_TRAP if a guest trap should be generated.
84; @param pCPUMCPU x86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer
85;
86align 16
87BEGINPROC cpumHandleLazyFPUAsm
88 ;
89 ; Figure out what to do.
90 ;
91 ; There are two basic actions:
92 ; 1. Save host fpu and restore guest fpu.
93 ; 2. Generate guest trap.
94 ;
95 ; When entering the hypervisor we'll always enable MP (for proper wait
96 ; trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
97 ; is taken from the guest OS in order to get proper SSE handling.
98 ;
99 ;
100 ; Actions taken depending on the guest CR0 flags:
101 ;
102 ; 3 2 1
103 ; TS | EM | MP | FPUInstr | WAIT :: VMM Action
104 ; ------------------------------------------------------------------------
105 ; 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
106 ; 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
107 ; 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC;
108 ; 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
109 ; 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
110 ; 1 | 0 | 1 | #NM | #NM :: Go to host taking trap there.
111 ; 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
112 ; 1 | 1 | 1 | #NM | #NM :: Go to host taking trap there.
113
114 ;
115 ; Before taking any of these actions we're checking if we have already
116 ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3.
117 ;
118%ifdef RT_ARCH_AMD64
119 %ifdef RT_OS_WINDOWS
120 mov xDX, rcx
121 %else
122 mov xDX, rdi
123 %endif
124%else
125 mov xDX, dword [esp + 4]
126%endif
127 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
128 jz hlfpua_not_loaded
129 jmp hlfpua_to_host
130
131 ;
132 ; Take action.
133 ;
134align 16
135hlfpua_not_loaded:
136 mov eax, [xDX + CPUMCPU.Guest.cr0]
137 and eax, X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
138%ifdef RT_ARCH_AMD64
139 lea r8, [hlfpuajmp1 wrt rip]
140 jmp qword [rax*4 + r8]
141%else
142 jmp dword [eax*2 + hlfpuajmp1]
143%endif
144align 16
145;; jump table using fpu related cr0 flags as index.
146hlfpuajmp1:
147 RTCCPTR_DEF hlfpua_switch_fpu_ctx
148 RTCCPTR_DEF hlfpua_switch_fpu_ctx
149 RTCCPTR_DEF hlfpua_switch_fpu_ctx
150 RTCCPTR_DEF hlfpua_switch_fpu_ctx
151 RTCCPTR_DEF hlfpua_switch_fpu_ctx
152 RTCCPTR_DEF hlfpua_to_host
153 RTCCPTR_DEF hlfpua_switch_fpu_ctx
154 RTCCPTR_DEF hlfpua_to_host
155;; and mask for cr0.
156hlfpu_afFlags:
157 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
158 RTCCPTR_DEF ~(X86_CR0_TS)
159 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
160 RTCCPTR_DEF ~(X86_CR0_TS)
161 RTCCPTR_DEF ~(X86_CR0_MP)
162 RTCCPTR_DEF 0
163 RTCCPTR_DEF ~(X86_CR0_MP)
164 RTCCPTR_DEF 0
165
166 ;
167 ; Action - switch FPU context and change cr0 flags.
168 ;
169align 16
170hlfpua_switch_fpu_ctx:
171 ; Paranoia. This function was previously used in ring-0, not any longer.
172%ifdef IN_RING3
173%error "This function is not written for ring-3"
174%endif
175%ifdef IN_RING0
176%error "This function is not written for ring-0"
177%endif
178
179 mov xCX, cr0
180%ifdef RT_ARCH_AMD64
181 lea r8, [hlfpu_afFlags wrt rip]
182 and rcx, [rax*4 + r8] ; calc the new cr0 flags.
183%else
184 and ecx, [eax*2 + hlfpu_afFlags] ; calc the new cr0 flags.
185%endif
186 mov xAX, cr0
187 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
188 mov cr0, xAX ; clear flags so we don't trap here.
189%ifndef RT_ARCH_AMD64
190 mov eax, edx ; Calculate the PCPUM pointer
191 sub eax, [edx + CPUMCPU.offCPUM]
192 test dword [eax + CPUM.CPUFeatures.edx], X86_CPUID_FEATURE_EDX_FXSR
193 jz short hlfpua_no_fxsave
194%endif
195
196%ifdef RT_ARCH_AMD64
197 ; Use explicit REX prefix. See @bugref{6398}.
198 o64 fxsave [xDX + CPUMCPU.Host.fpu]
199%else
200 fxsave [xDX + CPUMCPU.Host.fpu]
201%endif
202 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
203 fxrstor [xDX + CPUMCPU.Guest.fpu] ; raw-mode guest is always 32-bit. See @bugref{7138}.
204
205hlfpua_finished_switch:
206
207 ; Load new CR0 value.
208 ;; @todo Optimize the many unconditional CR0 writes.
209 mov cr0, xCX ; load the new cr0 flags.
210
211 ; return continue execution.
212 xor eax, eax
213 ret
214
215%ifndef RT_ARCH_AMD64
216; legacy support.
217hlfpua_no_fxsave:
218 fnsave [xDX + CPUMCPU.Host.fpu]
219 or dword [xDX + CPUMCPU.fUseFlags], dword (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) ; yasm / nasm
220 mov eax, [xDX + CPUMCPU.Guest.fpu] ; control word
221 not eax ; 1 means exception ignored (6 LS bits)
222 and eax, byte 03Fh ; 6 LS bits only
223 test eax, [xDX + CPUMCPU.Guest.fpu + 4] ; status word
224 jz short hlfpua_no_exceptions_pending
225 ; technically incorrect, but we certainly don't want any exceptions now!!
226 and dword [xDX + CPUMCPU.Guest.fpu + 4], ~03Fh
227hlfpua_no_exceptions_pending:
228 frstor [xDX + CPUMCPU.Guest.fpu]
229 jmp near hlfpua_finished_switch
230%endif ; !RT_ARCH_AMD64
231
232
233 ;
234 ; Action - Generate Guest trap.
235 ;
236hlfpua_action_4:
237hlfpua_to_host:
238 mov eax, VINF_EM_RAW_GUEST_TRAP
239 ret
240ENDPROC cpumHandleLazyFPUAsm
241
242
243;;
244; Calls a guest trap/interrupt handler directly
245; Assumes a trap stack frame has already been setup on the guest's stack!
246;
247; @param pRegFrame [esp + 4] Original trap/interrupt context
248; @param selCS [esp + 8] Code selector of handler
249; @param pHandler [esp + 12] GC virtual address of handler
250; @param eflags [esp + 16] Callee's EFLAGS
251; @param selSS [esp + 20] Stack selector for handler
252; @param pEsp [esp + 24] Stack address for handler
253;
254; @remark This call never returns!
255;
256; VMMRCDECL(void) CPUMGCCallGuestTrapHandler(PCPUMCTXCORE pRegFrame, uint32_t selCS, RTGCPTR pHandler, uint32_t eflags, uint32_t selSS, RTGCPTR pEsp);
257align 16
258BEGINPROC_EXPORTED CPUMGCCallGuestTrapHandler
259 mov ebp, esp
260
261 ; construct iret stack frame
262 push dword [ebp + 20] ; SS
263 push dword [ebp + 24] ; ESP
264 push dword [ebp + 16] ; EFLAGS
265 push dword [ebp + 8] ; CS
266 push dword [ebp + 12] ; EIP
267
268 ;
269 ; enable WP
270 ;
271%ifdef ENABLE_WRITE_PROTECTION
272 mov eax, cr0
273 or eax, X86_CR0_WRITE_PROTECT
274 mov cr0, eax
275%endif
276
277 ; restore CPU context (all except cs, eip, ss, esp & eflags; which are restored or overwritten by iret)
278 mov ebp, [ebp + 4] ; pRegFrame
279 mov ebx, [ebp + CPUMCTXCORE.ebx]
280 mov ecx, [ebp + CPUMCTXCORE.ecx]
281 mov edx, [ebp + CPUMCTXCORE.edx]
282 mov esi, [ebp + CPUMCTXCORE.esi]
283 mov edi, [ebp + CPUMCTXCORE.edi]
284
285 ;; @todo load segment registers *before* enabling WP.
286 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS | CPUM_HANDLER_CTXCORE_IN_EBP
287 mov gs, [ebp + CPUMCTXCORE.gs.Sel]
288 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS | CPUM_HANDLER_CTXCORE_IN_EBP
289 mov fs, [ebp + CPUMCTXCORE.fs.Sel]
290 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES | CPUM_HANDLER_CTXCORE_IN_EBP
291 mov es, [ebp + CPUMCTXCORE.es.Sel]
292 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS | CPUM_HANDLER_CTXCORE_IN_EBP
293 mov ds, [ebp + CPUMCTXCORE.ds.Sel]
294
295 mov eax, [ebp + CPUMCTXCORE.eax]
296 mov ebp, [ebp + CPUMCTXCORE.ebp]
297
298 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
299 iret
300ENDPROC CPUMGCCallGuestTrapHandler
301
302
303;;
304; Performs an iret to V86 code
305; Assumes a trap stack frame has already been setup on the guest's stack!
306;
307; @param pRegFrame Original trap/interrupt context
308;
309; This function does not return!
310;
311;VMMRCDECL(void) CPUMGCCallV86Code(PCPUMCTXCORE pRegFrame);
312align 16
313BEGINPROC CPUMGCCallV86Code
314 mov ebp, [esp + 4] ; pRegFrame
315
316 ; construct iret stack frame
317 push dword [ebp + CPUMCTXCORE.gs.Sel]
318 push dword [ebp + CPUMCTXCORE.fs.Sel]
319 push dword [ebp + CPUMCTXCORE.ds.Sel]
320 push dword [ebp + CPUMCTXCORE.es.Sel]
321 push dword [ebp + CPUMCTXCORE.ss.Sel]
322 push dword [ebp + CPUMCTXCORE.esp]
323 push dword [ebp + CPUMCTXCORE.eflags]
324 push dword [ebp + CPUMCTXCORE.cs.Sel]
325 push dword [ebp + CPUMCTXCORE.eip]
326
327 ;
328 ; enable WP
329 ;
330%ifdef ENABLE_WRITE_PROTECTION
331 mov eax, cr0
332 or eax, X86_CR0_WRITE_PROTECT
333 mov cr0, eax
334%endif
335
336 ; restore CPU context (all except cs, eip, ss, esp, eflags, ds, es, fs & gs; which are restored or overwritten by iret)
337 mov eax, [ebp + CPUMCTXCORE.eax]
338 mov ebx, [ebp + CPUMCTXCORE.ebx]
339 mov ecx, [ebp + CPUMCTXCORE.ecx]
340 mov edx, [ebp + CPUMCTXCORE.edx]
341 mov esi, [ebp + CPUMCTXCORE.esi]
342 mov edi, [ebp + CPUMCTXCORE.edi]
343 mov ebp, [ebp + CPUMCTXCORE.ebp]
344
345 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
346 iret
347ENDPROC CPUMGCCallV86Code
348
349
350;;
351; This is a main entry point for resuming (or starting) guest
352; code execution.
353;
354; We get here directly from VMMSwitcher.asm (jmp at the end
355; of VMMSwitcher_HostToGuest).
356;
357; This call never returns!
358;
359; @param edx Pointer to CPUM structure.
360;
361align 16
362BEGINPROC_EXPORTED CPUMGCResumeGuest
363%ifdef VBOX_STRICT
364 ; Call CPUM to check sanity.
365 push edx
366 mov edx, IMP(g_VM)
367 push edx
368 call NAME(CPUMRCAssertPreExecutionSanity)
369 add esp, 4
370 pop edx
371%endif
372
373 ; Convert to CPUMCPU pointer
374 add edx, [edx + CPUM.offCPUMCPU0]
375 ;
376 ; Setup iretd
377 ;
378 push dword [edx + CPUMCPU.Guest.ss.Sel]
379 push dword [edx + CPUMCPU.Guest.esp]
380 push dword [edx + CPUMCPU.Guest.eflags]
381 push dword [edx + CPUMCPU.Guest.cs.Sel]
382 push dword [edx + CPUMCPU.Guest.eip]
383
384 ;
385 ; Restore registers.
386 ;
387 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES
388 mov es, [edx + CPUMCPU.Guest.es.Sel]
389 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS
390 mov fs, [edx + CPUMCPU.Guest.fs.Sel]
391 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS
392 mov gs, [edx + CPUMCPU.Guest.gs.Sel]
393
394%ifdef VBOX_WITH_STATISTICS
395 ;
396 ; Statistics.
397 ;
398 push edx
399 mov edx, IMP(g_VM)
400 lea edx, [edx + VM.StatTotalQemuToGC]
401 STAM_PROFILE_ADV_STOP edx
402
403 mov edx, IMP(g_VM)
404 lea edx, [edx + VM.StatTotalInGC]
405 STAM_PROFILE_ADV_START edx
406 pop edx
407%endif
408
409 ;
410 ; enable WP
411 ;
412%ifdef ENABLE_WRITE_PROTECTION
413 mov eax, cr0
414 or eax, X86_CR0_WRITE_PROTECT
415 mov cr0, eax
416%endif
417
418 ;
419 ; Continue restore.
420 ;
421 mov esi, [edx + CPUMCPU.Guest.esi]
422 mov edi, [edx + CPUMCPU.Guest.edi]
423 mov ebp, [edx + CPUMCPU.Guest.ebp]
424 mov ebx, [edx + CPUMCPU.Guest.ebx]
425 mov ecx, [edx + CPUMCPU.Guest.ecx]
426 mov eax, [edx + CPUMCPU.Guest.eax]
427 push dword [edx + CPUMCPU.Guest.ds.Sel]
428 mov edx, [edx + CPUMCPU.Guest.edx]
429 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS
430 pop ds
431
432 ; restart execution.
433 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
434 iretd
435ENDPROC CPUMGCResumeGuest
436
437
438;;
439; This is a main entry point for resuming (or starting) guest
440; code execution for raw V86 mode
441;
442; We get here directly from VMMSwitcher.asm (jmp at the end
443; of VMMSwitcher_HostToGuest).
444;
445; This call never returns!
446;
447; @param edx Pointer to CPUM structure.
448;
449align 16
450BEGINPROC_EXPORTED CPUMGCResumeGuestV86
451%ifdef VBOX_STRICT
452 ; Call CPUM to check sanity.
453 push edx
454 mov edx, IMP(g_VM)
455 push edx
456 call NAME(CPUMRCAssertPreExecutionSanity)
457 add esp, 4
458 pop edx
459%endif
460
461 ; Convert to CPUMCPU pointer
462 add edx, [edx + CPUM.offCPUMCPU0]
463 ;
464 ; Setup iretd
465 ;
466 push dword [edx + CPUMCPU.Guest.gs.Sel]
467 push dword [edx + CPUMCPU.Guest.fs.Sel]
468 push dword [edx + CPUMCPU.Guest.ds.Sel]
469 push dword [edx + CPUMCPU.Guest.es.Sel]
470
471 push dword [edx + CPUMCPU.Guest.ss.Sel]
472 push dword [edx + CPUMCPU.Guest.esp]
473
474 push dword [edx + CPUMCPU.Guest.eflags]
475 push dword [edx + CPUMCPU.Guest.cs.Sel]
476 push dword [edx + CPUMCPU.Guest.eip]
477
478 ;
479 ; Restore registers.
480 ;
481
482%ifdef VBOX_WITH_STATISTICS
483 ;
484 ; Statistics.
485 ;
486 push edx
487 mov edx, IMP(g_VM)
488 lea edx, [edx + VM.StatTotalQemuToGC]
489 STAM_PROFILE_ADV_STOP edx
490
491 mov edx, IMP(g_VM)
492 lea edx, [edx + VM.StatTotalInGC]
493 STAM_PROFILE_ADV_START edx
494 pop edx
495%endif
496
497 ;
498 ; enable WP
499 ;
500%ifdef ENABLE_WRITE_PROTECTION
501 mov eax, cr0
502 or eax, X86_CR0_WRITE_PROTECT
503 mov cr0, eax
504%endif
505
506 ;
507 ; Continue restore.
508 ;
509 mov esi, [edx + CPUMCPU.Guest.esi]
510 mov edi, [edx + CPUMCPU.Guest.edi]
511 mov ebp, [edx + CPUMCPU.Guest.ebp]
512 mov ecx, [edx + CPUMCPU.Guest.ecx]
513 mov ebx, [edx + CPUMCPU.Guest.ebx]
514 mov eax, [edx + CPUMCPU.Guest.eax]
515 mov edx, [edx + CPUMCPU.Guest.edx]
516
517 ; restart execution.
518 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
519 iretd
520ENDPROC CPUMGCResumeGuestV86
521
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette