VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/CPUMRCA.asm@ 55027

Last change on this file since 55027 was 55027, checked in by vboxsync, 10 years ago

CPUMRCA.asm: indent, remove unused+duplicated CLEANFPU macro.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 15.9 KB
Line 
1; $Id: CPUMRCA.asm 55027 2015-03-31 12:14:36Z vboxsync $
2;; @file
3; CPUM - Raw-mode Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2015 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VMMRC.mac"
22%include "VBox/vmm/vm.mac"
23%include "VBox/err.mac"
24%include "VBox/vmm/stam.mac"
25%include "CPUMInternal.mac"
26%include "iprt/x86.mac"
27%include "VBox/vmm/cpum.mac"
28
29
30;*******************************************************************************
31;* External Symbols *
32;*******************************************************************************
33extern IMPNAME(g_CPUM) ; VMM GC Builtin import
34extern IMPNAME(g_VM) ; VMM GC Builtin import
35extern NAME(cpumRCHandleNPAndGP) ; CPUMGC.cpp
36extern NAME(CPUMRCAssertPreExecutionSanity)
37
38
39;
40; Enables write protection of Hypervisor memory pages.
41; !note! Must be commented out for Trap8 debug handler.
42;
43%define ENABLE_WRITE_PROTECTION 1
44
45BEGINCODE
46
47
48;;
49; Handles lazy FPU saving and restoring.
50;
51; This handler will implement lazy fpu (sse/mmx/stuff) saving.
52; Two actions may be taken in this handler since the Guest OS may
53; be doing lazy fpu switching. So, we'll have to generate those
54; traps which the Guest CPU CTX shall have according to the
55; its CR0 flags. If no traps for the Guest OS, we'll save the host
56; context and restore the guest context.
57;
58; @returns 0 if caller should continue execution.
59; @returns VINF_EM_RAW_GUEST_TRAP if a guest trap should be generated.
60; @param pCPUMCPU x86:[esp+4] gcc:rdi msc:rcx CPUMCPU pointer
61;
62align 16
63BEGINPROC cpumHandleLazyFPUAsm
64 ;
65 ; Figure out what to do.
66 ;
67 ; There are two basic actions:
68 ; 1. Save host fpu and restore guest fpu.
69 ; 2. Generate guest trap.
70 ;
71 ; When entering the hypervisor we'll always enable MP (for proper wait
72 ; trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
73 ; is taken from the guest OS in order to get proper SSE handling.
74 ;
75 ;
76 ; Actions taken depending on the guest CR0 flags:
77 ;
78 ; 3 2 1
79 ; TS | EM | MP | FPUInstr | WAIT :: VMM Action
80 ; ------------------------------------------------------------------------
81 ; 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
82 ; 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
83 ; 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC;
84 ; 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
85 ; 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
86 ; 1 | 0 | 1 | #NM | #NM :: Go to host taking trap there.
87 ; 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
88 ; 1 | 1 | 1 | #NM | #NM :: Go to host taking trap there.
89
90 ;
91 ; Before taking any of these actions we're checking if we have already
92 ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3.
93 ;
94%ifdef RT_ARCH_AMD64
95 %ifdef RT_OS_WINDOWS
96 mov xDX, rcx
97 %else
98 mov xDX, rdi
99 %endif
100%else
101 mov xDX, dword [esp + 4]
102%endif
103 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
104 jz hlfpua_not_loaded
105 jmp hlfpua_to_host
106
107 ;
108 ; Take action.
109 ;
110align 16
111hlfpua_not_loaded:
112 mov eax, [xDX + CPUMCPU.Guest.cr0]
113 and eax, X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
114%ifdef RT_ARCH_AMD64
115 lea r8, [hlfpuajmp1 wrt rip]
116 jmp qword [rax*4 + r8]
117%else
118 jmp dword [eax*2 + hlfpuajmp1]
119%endif
120align 16
121;; jump table using fpu related cr0 flags as index.
122hlfpuajmp1:
123 RTCCPTR_DEF hlfpua_switch_fpu_ctx
124 RTCCPTR_DEF hlfpua_switch_fpu_ctx
125 RTCCPTR_DEF hlfpua_switch_fpu_ctx
126 RTCCPTR_DEF hlfpua_switch_fpu_ctx
127 RTCCPTR_DEF hlfpua_switch_fpu_ctx
128 RTCCPTR_DEF hlfpua_to_host
129 RTCCPTR_DEF hlfpua_switch_fpu_ctx
130 RTCCPTR_DEF hlfpua_to_host
131;; and mask for cr0.
132hlfpu_afFlags:
133 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
134 RTCCPTR_DEF ~(X86_CR0_TS)
135 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
136 RTCCPTR_DEF ~(X86_CR0_TS)
137 RTCCPTR_DEF ~(X86_CR0_MP)
138 RTCCPTR_DEF 0
139 RTCCPTR_DEF ~(X86_CR0_MP)
140 RTCCPTR_DEF 0
141
142 ;
143 ; Action - switch FPU context and change cr0 flags.
144 ;
145align 16
146hlfpua_switch_fpu_ctx:
147 ; Paranoia. This function was previously used in ring-0, not any longer.
148%ifdef IN_RING3
149%error "This function is not written for ring-3"
150%endif
151%ifdef IN_RING0
152%error "This function is not written for ring-0"
153%endif
154
155 mov xCX, cr0
156%ifdef RT_ARCH_AMD64
157 lea r8, [hlfpu_afFlags wrt rip]
158 and rcx, [rax*4 + r8] ; calc the new cr0 flags.
159%else
160 and ecx, [eax*2 + hlfpu_afFlags] ; calc the new cr0 flags.
161%endif
162 mov xAX, cr0
163 and xAX, ~(X86_CR0_TS | X86_CR0_EM)
164 mov cr0, xAX ; clear flags so we don't trap here.
165%ifndef RT_ARCH_AMD64
166 mov eax, edx ; Calculate the PCPUM pointer
167 sub eax, [edx + CPUMCPU.offCPUM]
168 test dword [eax + CPUM.CPUFeatures.edx], X86_CPUID_FEATURE_EDX_FXSR
169 jz short hlfpua_no_fxsave
170%endif
171
172%ifdef RT_ARCH_AMD64
173 ; Use explicit REX prefix. See @bugref{6398}.
174 o64 fxsave [xDX + CPUMCPU.Host.XState]
175%else
176 fxsave [xDX + CPUMCPU.Host.XState]
177%endif
178 or dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
179 fxrstor [xDX + CPUMCPU.Guest.XState] ; raw-mode guest is always 32-bit. See @bugref{7138}.
180
181hlfpua_finished_switch:
182
183 ; Load new CR0 value.
184 ;; @todo Optimize the many unconditional CR0 writes.
185 mov cr0, xCX ; load the new cr0 flags.
186
187 ; return continue execution.
188 xor eax, eax
189 ret
190
191%ifndef RT_ARCH_AMD64
192; legacy support.
193hlfpua_no_fxsave:
194 fnsave [xDX + CPUMCPU.Host.XState]
195 or dword [xDX + CPUMCPU.fUseFlags], dword (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) ; yasm / nasm
196 mov eax, [xDX + CPUMCPU.Guest.XState] ; control word
197 not eax ; 1 means exception ignored (6 LS bits)
198 and eax, byte 03Fh ; 6 LS bits only
199 test eax, [xDX + CPUMCPU.Guest.XState + 4] ; status word
200 jz short hlfpua_no_exceptions_pending
201 ; technically incorrect, but we certainly don't want any exceptions now!!
202 and dword [xDX + CPUMCPU.Guest.XState + 4], ~03Fh
203hlfpua_no_exceptions_pending:
204 frstor [xDX + CPUMCPU.Guest.XState]
205 jmp near hlfpua_finished_switch
206%endif ; !RT_ARCH_AMD64
207
208
209 ;
210 ; Action - Generate Guest trap.
211 ;
212hlfpua_action_4:
213hlfpua_to_host:
214 mov eax, VINF_EM_RAW_GUEST_TRAP
215 ret
216ENDPROC cpumHandleLazyFPUAsm
217
218
219;;
220; Calls a guest trap/interrupt handler directly
221; Assumes a trap stack frame has already been setup on the guest's stack!
222;
223; @param pRegFrame [esp + 4] Original trap/interrupt context
224; @param selCS [esp + 8] Code selector of handler
225; @param pHandler [esp + 12] GC virtual address of handler
226; @param eflags [esp + 16] Callee's EFLAGS
227; @param selSS [esp + 20] Stack selector for handler
228; @param pEsp [esp + 24] Stack address for handler
229;
230; @remark This call never returns!
231;
232; VMMRCDECL(void) CPUMGCCallGuestTrapHandler(PCPUMCTXCORE pRegFrame, uint32_t selCS, RTGCPTR pHandler, uint32_t eflags, uint32_t selSS, RTGCPTR pEsp);
233align 16
234BEGINPROC_EXPORTED CPUMGCCallGuestTrapHandler
235 mov ebp, esp
236
237 ; construct iret stack frame
238 push dword [ebp + 20] ; SS
239 push dword [ebp + 24] ; ESP
240 push dword [ebp + 16] ; EFLAGS
241 push dword [ebp + 8] ; CS
242 push dword [ebp + 12] ; EIP
243
244 ;
245 ; enable WP
246 ;
247%ifdef ENABLE_WRITE_PROTECTION
248 mov eax, cr0
249 or eax, X86_CR0_WRITE_PROTECT
250 mov cr0, eax
251%endif
252
253 ; restore CPU context (all except cs, eip, ss, esp & eflags; which are restored or overwritten by iret)
254 mov ebp, [ebp + 4] ; pRegFrame
255 mov ebx, [ebp + CPUMCTXCORE.ebx]
256 mov ecx, [ebp + CPUMCTXCORE.ecx]
257 mov edx, [ebp + CPUMCTXCORE.edx]
258 mov esi, [ebp + CPUMCTXCORE.esi]
259 mov edi, [ebp + CPUMCTXCORE.edi]
260
261 ;; @todo load segment registers *before* enabling WP.
262 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS | CPUM_HANDLER_CTXCORE_IN_EBP
263 mov gs, [ebp + CPUMCTXCORE.gs.Sel]
264 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS | CPUM_HANDLER_CTXCORE_IN_EBP
265 mov fs, [ebp + CPUMCTXCORE.fs.Sel]
266 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES | CPUM_HANDLER_CTXCORE_IN_EBP
267 mov es, [ebp + CPUMCTXCORE.es.Sel]
268 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS | CPUM_HANDLER_CTXCORE_IN_EBP
269 mov ds, [ebp + CPUMCTXCORE.ds.Sel]
270
271 mov eax, [ebp + CPUMCTXCORE.eax]
272 mov ebp, [ebp + CPUMCTXCORE.ebp]
273
274 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
275 iret
276ENDPROC CPUMGCCallGuestTrapHandler
277
278
279;;
280; Performs an iret to V86 code
281; Assumes a trap stack frame has already been setup on the guest's stack!
282;
283; @param pRegFrame Original trap/interrupt context
284;
285; This function does not return!
286;
287;VMMRCDECL(void) CPUMGCCallV86Code(PCPUMCTXCORE pRegFrame);
288align 16
289BEGINPROC CPUMGCCallV86Code
290 mov ebp, [esp + 4] ; pRegFrame
291
292 ; construct iret stack frame
293 push dword [ebp + CPUMCTXCORE.gs.Sel]
294 push dword [ebp + CPUMCTXCORE.fs.Sel]
295 push dword [ebp + CPUMCTXCORE.ds.Sel]
296 push dword [ebp + CPUMCTXCORE.es.Sel]
297 push dword [ebp + CPUMCTXCORE.ss.Sel]
298 push dword [ebp + CPUMCTXCORE.esp]
299 push dword [ebp + CPUMCTXCORE.eflags]
300 push dword [ebp + CPUMCTXCORE.cs.Sel]
301 push dword [ebp + CPUMCTXCORE.eip]
302
303 ;
304 ; enable WP
305 ;
306%ifdef ENABLE_WRITE_PROTECTION
307 mov eax, cr0
308 or eax, X86_CR0_WRITE_PROTECT
309 mov cr0, eax
310%endif
311
312 ; restore CPU context (all except cs, eip, ss, esp, eflags, ds, es, fs & gs; which are restored or overwritten by iret)
313 mov eax, [ebp + CPUMCTXCORE.eax]
314 mov ebx, [ebp + CPUMCTXCORE.ebx]
315 mov ecx, [ebp + CPUMCTXCORE.ecx]
316 mov edx, [ebp + CPUMCTXCORE.edx]
317 mov esi, [ebp + CPUMCTXCORE.esi]
318 mov edi, [ebp + CPUMCTXCORE.edi]
319 mov ebp, [ebp + CPUMCTXCORE.ebp]
320
321 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
322 iret
323ENDPROC CPUMGCCallV86Code
324
325
326;;
327; This is a main entry point for resuming (or starting) guest
328; code execution.
329;
330; We get here directly from VMMSwitcher.asm (jmp at the end
331; of VMMSwitcher_HostToGuest).
332;
333; This call never returns!
334;
335; @param edx Pointer to CPUMCPU structure.
336;
337align 16
338BEGINPROC_EXPORTED CPUMGCResumeGuest
339%ifdef VBOX_STRICT
340 ; Call CPUM to check sanity.
341 push edx
342 mov edx, IMP(g_VM)
343 push edx
344 call NAME(CPUMRCAssertPreExecutionSanity)
345 add esp, 4
346 pop edx
347%endif
348
349 ;
350 ; Setup iretd
351 ;
352 push dword [edx + CPUMCPU.Guest.ss.Sel]
353 push dword [edx + CPUMCPU.Guest.esp]
354 push dword [edx + CPUMCPU.Guest.eflags]
355 push dword [edx + CPUMCPU.Guest.cs.Sel]
356 push dword [edx + CPUMCPU.Guest.eip]
357
358 ;
359 ; Restore registers.
360 ;
361 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES
362 mov es, [edx + CPUMCPU.Guest.es.Sel]
363 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS
364 mov fs, [edx + CPUMCPU.Guest.fs.Sel]
365 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS
366 mov gs, [edx + CPUMCPU.Guest.gs.Sel]
367
368%ifdef VBOX_WITH_STATISTICS
369 ;
370 ; Statistics.
371 ;
372 push edx
373 mov edx, IMP(g_VM)
374 lea edx, [edx + VM.StatTotalQemuToGC]
375 STAM_PROFILE_ADV_STOP edx
376
377 mov edx, IMP(g_VM)
378 lea edx, [edx + VM.StatTotalInGC]
379 STAM_PROFILE_ADV_START edx
380 pop edx
381%endif
382
383 ;
384 ; enable WP
385 ;
386%ifdef ENABLE_WRITE_PROTECTION
387 mov eax, cr0
388 or eax, X86_CR0_WRITE_PROTECT
389 mov cr0, eax
390%endif
391
392 ;
393 ; Continue restore.
394 ;
395 mov esi, [edx + CPUMCPU.Guest.esi]
396 mov edi, [edx + CPUMCPU.Guest.edi]
397 mov ebp, [edx + CPUMCPU.Guest.ebp]
398 mov ebx, [edx + CPUMCPU.Guest.ebx]
399 mov ecx, [edx + CPUMCPU.Guest.ecx]
400 mov eax, [edx + CPUMCPU.Guest.eax]
401 push dword [edx + CPUMCPU.Guest.ds.Sel]
402 mov edx, [edx + CPUMCPU.Guest.edx]
403 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS
404 pop ds
405
406 ; restart execution.
407 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
408 iretd
409ENDPROC CPUMGCResumeGuest
410
411
412;;
413; This is a main entry point for resuming (or starting) guest
414; code execution for raw V86 mode
415;
416; We get here directly from VMMSwitcher.asm (jmp at the end
417; of VMMSwitcher_HostToGuest).
418;
419; This call never returns!
420;
421; @param edx Pointer to CPUMCPU structure.
422;
423align 16
424BEGINPROC_EXPORTED CPUMGCResumeGuestV86
425%ifdef VBOX_STRICT
426 ; Call CPUM to check sanity.
427 push edx
428 mov edx, IMP(g_VM)
429 push edx
430 call NAME(CPUMRCAssertPreExecutionSanity)
431 add esp, 4
432 pop edx
433%endif
434
435 ;
436 ; Setup iretd
437 ;
438 push dword [edx + CPUMCPU.Guest.gs.Sel]
439 push dword [edx + CPUMCPU.Guest.fs.Sel]
440 push dword [edx + CPUMCPU.Guest.ds.Sel]
441 push dword [edx + CPUMCPU.Guest.es.Sel]
442
443 push dword [edx + CPUMCPU.Guest.ss.Sel]
444 push dword [edx + CPUMCPU.Guest.esp]
445
446 push dword [edx + CPUMCPU.Guest.eflags]
447 push dword [edx + CPUMCPU.Guest.cs.Sel]
448 push dword [edx + CPUMCPU.Guest.eip]
449
450 ;
451 ; Restore registers.
452 ;
453
454%ifdef VBOX_WITH_STATISTICS
455 ;
456 ; Statistics.
457 ;
458 push edx
459 mov edx, IMP(g_VM)
460 lea edx, [edx + VM.StatTotalQemuToGC]
461 STAM_PROFILE_ADV_STOP edx
462
463 mov edx, IMP(g_VM)
464 lea edx, [edx + VM.StatTotalInGC]
465 STAM_PROFILE_ADV_START edx
466 pop edx
467%endif
468
469 ;
470 ; enable WP
471 ;
472%ifdef ENABLE_WRITE_PROTECTION
473 mov eax, cr0
474 or eax, X86_CR0_WRITE_PROTECT
475 mov cr0, eax
476%endif
477
478 ;
479 ; Continue restore.
480 ;
481 mov esi, [edx + CPUMCPU.Guest.esi]
482 mov edi, [edx + CPUMCPU.Guest.edi]
483 mov ebp, [edx + CPUMCPU.Guest.ebp]
484 mov ecx, [edx + CPUMCPU.Guest.ecx]
485 mov ebx, [edx + CPUMCPU.Guest.ebx]
486 mov eax, [edx + CPUMCPU.Guest.eax]
487 mov edx, [edx + CPUMCPU.Guest.edx]
488
489 ; restart execution.
490 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
491 iretd
492ENDPROC CPUMGCResumeGuestV86
493
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette