VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/CPUMRCA.asm@ 60189

Last change on this file since 60189 was 55106, checked in by vboxsync, 10 years ago

VMM: host+guest xsave/xrstor state handling - not enabled.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 15.0 KB
Line 
1; $Id: CPUMRCA.asm 55106 2015-04-06 19:58:37Z vboxsync $
2;; @file
3; CPUM - Raw-mode Context Assembly Routines.
4;
5
6;
7; Copyright (C) 2006-2015 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VMMRC.mac"
22%include "VBox/vmm/vm.mac"
23%include "VBox/err.mac"
24%include "VBox/vmm/stam.mac"
25%include "CPUMInternal.mac"
26%include "iprt/x86.mac"
27%include "VBox/vmm/cpum.mac"
28
29
30;*******************************************************************************
31;* External Symbols *
32;*******************************************************************************
33extern IMPNAME(g_CPUM) ; VMM GC Builtin import
34extern IMPNAME(g_VM) ; VMM GC Builtin import
35extern NAME(cpumRCHandleNPAndGP) ; CPUMGC.cpp
36extern NAME(CPUMRCAssertPreExecutionSanity)
37
38
39;
40; Enables write protection of Hypervisor memory pages.
41; !note! Must be commented out for Trap8 debug handler.
42;
43%define ENABLE_WRITE_PROTECTION 1
44
45BEGINCODE
46
47
48;;
49; Handles lazy FPU saving and restoring.
50;
51; This handler will implement lazy fpu (sse/mmx/stuff) saving.
52; Two actions may be taken in this handler since the Guest OS may
53; be doing lazy fpu switching. So, we'll have to generate those
54; traps which the Guest CPU CTX shall have according to the
55; its CR0 flags. If no traps for the Guest OS, we'll save the host
56; context and restore the guest context.
57;
58; @returns 0 if caller should continue execution.
59; @returns VINF_EM_RAW_GUEST_TRAP if a guest trap should be generated.
60; @param pCpumCpu [ebp+8] Pointer to the CPUMCPU.
61;
62align 16
63BEGINPROC cpumHandleLazyFPUAsm
64 push ebp
65 mov ebp, esp
66 push ebx
67 push esi
68 mov ebx, [ebp + 8]
69%define pCpumCpu ebx
70%define pXState esi
71
72 ;
73 ; Figure out what to do.
74 ;
75 ; There are two basic actions:
76 ; 1. Save host fpu and restore guest fpu.
77 ; 2. Generate guest trap.
78 ;
79 ; When entering the hypervisor we'll always enable MP (for proper wait
80 ; trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
81 ; is taken from the guest OS in order to get proper SSE handling.
82 ;
83 ;
84 ; Actions taken depending on the guest CR0 flags:
85 ;
86 ; 3 2 1
87 ; TS | EM | MP | FPUInstr | WAIT :: VMM Action
88 ; ------------------------------------------------------------------------
89 ; 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
90 ; 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
91 ; 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC;
92 ; 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
93 ; 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
94 ; 1 | 0 | 1 | #NM | #NM :: Go to host taking trap there.
95 ; 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
96 ; 1 | 1 | 1 | #NM | #NM :: Go to host taking trap there.
97
98 ;
99 ; Before taking any of these actions we're checking if we have already
100 ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3.
101 ;
102 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU
103 jz hlfpua_not_loaded
104 jmp hlfpua_guest_trap
105
106 ;
107 ; Take action.
108 ;
109align 16
110hlfpua_not_loaded:
111 mov eax, [pCpumCpu + CPUMCPU.Guest.cr0]
112 and eax, X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
113 jmp dword [eax*2 + hlfpuajmp1]
114align 16
115;; jump table using fpu related cr0 flags as index.
116hlfpuajmp1:
117 RTCCPTR_DEF hlfpua_switch_fpu_ctx
118 RTCCPTR_DEF hlfpua_switch_fpu_ctx
119 RTCCPTR_DEF hlfpua_switch_fpu_ctx
120 RTCCPTR_DEF hlfpua_switch_fpu_ctx
121 RTCCPTR_DEF hlfpua_switch_fpu_ctx
122 RTCCPTR_DEF hlfpua_guest_trap
123 RTCCPTR_DEF hlfpua_switch_fpu_ctx
124 RTCCPTR_DEF hlfpua_guest_trap
125;; and mask for cr0.
126hlfpu_afFlags:
127 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
128 RTCCPTR_DEF ~(X86_CR0_TS)
129 RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
130 RTCCPTR_DEF ~(X86_CR0_TS)
131 RTCCPTR_DEF ~(X86_CR0_MP)
132 RTCCPTR_DEF 0
133 RTCCPTR_DEF ~(X86_CR0_MP)
134 RTCCPTR_DEF 0
135
136 ;
137 ; Action - switch FPU context and change cr0 flags.
138 ;
139align 16
140hlfpua_switch_fpu_ctx:
141 mov ecx, cr0
142 mov edx, ecx
143 and ecx, [eax*2 + hlfpu_afFlags] ; Calc the new cr0 flags. Do NOT use ECX until we restore it!
144 and edx, ~(X86_CR0_TS | X86_CR0_EM)
145 mov cr0, edx ; Clear flags so we don't trap here.
146
147 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
148 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateRC]
149 or eax, eax
150 jz hlfpua_host_fxsave
151 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
152 xsave [pXState]
153 jmp hlfpua_host_done
154hlfpua_host_fxsave:
155 fxsave [pXState]
156hlfpua_host_done:
157
158 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
159 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateRC]
160 or eax, eax
161 jz hlfpua_guest_fxrstor
162 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
163 xrstor [pXState]
164 jmp hlfpua_guest_done
165hlfpua_guest_fxrstor:
166 fxrstor [pXState]
167hlfpua_guest_done:
168
169hlfpua_finished_switch:
170 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
171
172 ; Load new CR0 value.
173 mov cr0, ecx ; load the new cr0 flags.
174
175 ; return continue execution.
176 pop esi
177 pop ebx
178 xor eax, eax
179 leave
180 ret
181
182 ;
183 ; Action - Generate Guest trap.
184 ;
185hlfpua_action_4:
186hlfpua_guest_trap:
187 pop esi
188 pop ebx
189 mov eax, VINF_EM_RAW_GUEST_TRAP
190 leave
191 ret
192ENDPROC cpumHandleLazyFPUAsm
193
194
195;;
196; Calls a guest trap/interrupt handler directly
197; Assumes a trap stack frame has already been setup on the guest's stack!
198;
199; @param pRegFrame [esp + 4] Original trap/interrupt context
200; @param selCS [esp + 8] Code selector of handler
201; @param pHandler [esp + 12] GC virtual address of handler
202; @param eflags [esp + 16] Callee's EFLAGS
203; @param selSS [esp + 20] Stack selector for handler
204; @param pEsp [esp + 24] Stack address for handler
205;
206; @remark This call never returns!
207;
208; VMMRCDECL(void) CPUMGCCallGuestTrapHandler(PCPUMCTXCORE pRegFrame, uint32_t selCS, RTGCPTR pHandler, uint32_t eflags, uint32_t selSS, RTGCPTR pEsp);
209align 16
210BEGINPROC_EXPORTED CPUMGCCallGuestTrapHandler
211 mov ebp, esp
212
213 ; construct iret stack frame
214 push dword [ebp + 20] ; SS
215 push dword [ebp + 24] ; ESP
216 push dword [ebp + 16] ; EFLAGS
217 push dword [ebp + 8] ; CS
218 push dword [ebp + 12] ; EIP
219
220 ;
221 ; enable WP
222 ;
223%ifdef ENABLE_WRITE_PROTECTION
224 mov eax, cr0
225 or eax, X86_CR0_WRITE_PROTECT
226 mov cr0, eax
227%endif
228
229 ; restore CPU context (all except cs, eip, ss, esp & eflags; which are restored or overwritten by iret)
230 mov ebp, [ebp + 4] ; pRegFrame
231 mov ebx, [ebp + CPUMCTXCORE.ebx]
232 mov ecx, [ebp + CPUMCTXCORE.ecx]
233 mov edx, [ebp + CPUMCTXCORE.edx]
234 mov esi, [ebp + CPUMCTXCORE.esi]
235 mov edi, [ebp + CPUMCTXCORE.edi]
236
237 ;; @todo load segment registers *before* enabling WP.
238 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS | CPUM_HANDLER_CTXCORE_IN_EBP
239 mov gs, [ebp + CPUMCTXCORE.gs.Sel]
240 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS | CPUM_HANDLER_CTXCORE_IN_EBP
241 mov fs, [ebp + CPUMCTXCORE.fs.Sel]
242 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES | CPUM_HANDLER_CTXCORE_IN_EBP
243 mov es, [ebp + CPUMCTXCORE.es.Sel]
244 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS | CPUM_HANDLER_CTXCORE_IN_EBP
245 mov ds, [ebp + CPUMCTXCORE.ds.Sel]
246
247 mov eax, [ebp + CPUMCTXCORE.eax]
248 mov ebp, [ebp + CPUMCTXCORE.ebp]
249
250 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
251 iret
252ENDPROC CPUMGCCallGuestTrapHandler
253
254
255;;
256; Performs an iret to V86 code
257; Assumes a trap stack frame has already been setup on the guest's stack!
258;
259; @param pRegFrame Original trap/interrupt context
260;
261; This function does not return!
262;
263;VMMRCDECL(void) CPUMGCCallV86Code(PCPUMCTXCORE pRegFrame);
264align 16
265BEGINPROC CPUMGCCallV86Code
266 mov ebp, [esp + 4] ; pRegFrame
267
268 ; construct iret stack frame
269 push dword [ebp + CPUMCTXCORE.gs.Sel]
270 push dword [ebp + CPUMCTXCORE.fs.Sel]
271 push dword [ebp + CPUMCTXCORE.ds.Sel]
272 push dword [ebp + CPUMCTXCORE.es.Sel]
273 push dword [ebp + CPUMCTXCORE.ss.Sel]
274 push dword [ebp + CPUMCTXCORE.esp]
275 push dword [ebp + CPUMCTXCORE.eflags]
276 push dword [ebp + CPUMCTXCORE.cs.Sel]
277 push dword [ebp + CPUMCTXCORE.eip]
278
279 ;
280 ; enable WP
281 ;
282%ifdef ENABLE_WRITE_PROTECTION
283 mov eax, cr0
284 or eax, X86_CR0_WRITE_PROTECT
285 mov cr0, eax
286%endif
287
288 ; restore CPU context (all except cs, eip, ss, esp, eflags, ds, es, fs & gs; which are restored or overwritten by iret)
289 mov eax, [ebp + CPUMCTXCORE.eax]
290 mov ebx, [ebp + CPUMCTXCORE.ebx]
291 mov ecx, [ebp + CPUMCTXCORE.ecx]
292 mov edx, [ebp + CPUMCTXCORE.edx]
293 mov esi, [ebp + CPUMCTXCORE.esi]
294 mov edi, [ebp + CPUMCTXCORE.edi]
295 mov ebp, [ebp + CPUMCTXCORE.ebp]
296
297 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
298 iret
299ENDPROC CPUMGCCallV86Code
300
301
302;;
303; This is a main entry point for resuming (or starting) guest
304; code execution.
305;
306; We get here directly from VMMSwitcher.asm (jmp at the end
307; of VMMSwitcher_HostToGuest).
308;
309; This call never returns!
310;
311; @param edx Pointer to CPUMCPU structure.
312;
313align 16
314BEGINPROC_EXPORTED CPUMGCResumeGuest
315%ifdef VBOX_STRICT
316 ; Call CPUM to check sanity.
317 push edx
318 mov edx, IMP(g_VM)
319 push edx
320 call NAME(CPUMRCAssertPreExecutionSanity)
321 add esp, 4
322 pop edx
323%endif
324
325 ;
326 ; Setup iretd
327 ;
328 push dword [edx + CPUMCPU.Guest.ss.Sel]
329 push dword [edx + CPUMCPU.Guest.esp]
330 push dword [edx + CPUMCPU.Guest.eflags]
331 push dword [edx + CPUMCPU.Guest.cs.Sel]
332 push dword [edx + CPUMCPU.Guest.eip]
333
334 ;
335 ; Restore registers.
336 ;
337 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES
338 mov es, [edx + CPUMCPU.Guest.es.Sel]
339 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS
340 mov fs, [edx + CPUMCPU.Guest.fs.Sel]
341 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS
342 mov gs, [edx + CPUMCPU.Guest.gs.Sel]
343
344%ifdef VBOX_WITH_STATISTICS
345 ;
346 ; Statistics.
347 ;
348 push edx
349 mov edx, IMP(g_VM)
350 lea edx, [edx + VM.StatTotalQemuToGC]
351 STAM_PROFILE_ADV_STOP edx
352
353 mov edx, IMP(g_VM)
354 lea edx, [edx + VM.StatTotalInGC]
355 STAM_PROFILE_ADV_START edx
356 pop edx
357%endif
358
359 ;
360 ; enable WP
361 ;
362%ifdef ENABLE_WRITE_PROTECTION
363 mov eax, cr0
364 or eax, X86_CR0_WRITE_PROTECT
365 mov cr0, eax
366%endif
367
368 ;
369 ; Continue restore.
370 ;
371 mov esi, [edx + CPUMCPU.Guest.esi]
372 mov edi, [edx + CPUMCPU.Guest.edi]
373 mov ebp, [edx + CPUMCPU.Guest.ebp]
374 mov ebx, [edx + CPUMCPU.Guest.ebx]
375 mov ecx, [edx + CPUMCPU.Guest.ecx]
376 mov eax, [edx + CPUMCPU.Guest.eax]
377 push dword [edx + CPUMCPU.Guest.ds.Sel]
378 mov edx, [edx + CPUMCPU.Guest.edx]
379 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS
380 pop ds
381
382 ; restart execution.
383 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
384 iretd
385ENDPROC CPUMGCResumeGuest
386
387
388;;
389; This is a main entry point for resuming (or starting) guest
390; code execution for raw V86 mode
391;
392; We get here directly from VMMSwitcher.asm (jmp at the end
393; of VMMSwitcher_HostToGuest).
394;
395; This call never returns!
396;
397; @param edx Pointer to CPUMCPU structure.
398;
399align 16
400BEGINPROC_EXPORTED CPUMGCResumeGuestV86
401%ifdef VBOX_STRICT
402 ; Call CPUM to check sanity.
403 push edx
404 mov edx, IMP(g_VM)
405 push edx
406 call NAME(CPUMRCAssertPreExecutionSanity)
407 add esp, 4
408 pop edx
409%endif
410
411 ;
412 ; Setup iretd
413 ;
414 push dword [edx + CPUMCPU.Guest.gs.Sel]
415 push dword [edx + CPUMCPU.Guest.fs.Sel]
416 push dword [edx + CPUMCPU.Guest.ds.Sel]
417 push dword [edx + CPUMCPU.Guest.es.Sel]
418
419 push dword [edx + CPUMCPU.Guest.ss.Sel]
420 push dword [edx + CPUMCPU.Guest.esp]
421
422 push dword [edx + CPUMCPU.Guest.eflags]
423 push dword [edx + CPUMCPU.Guest.cs.Sel]
424 push dword [edx + CPUMCPU.Guest.eip]
425
426 ;
427 ; Restore registers.
428 ;
429
430%ifdef VBOX_WITH_STATISTICS
431 ;
432 ; Statistics.
433 ;
434 push edx
435 mov edx, IMP(g_VM)
436 lea edx, [edx + VM.StatTotalQemuToGC]
437 STAM_PROFILE_ADV_STOP edx
438
439 mov edx, IMP(g_VM)
440 lea edx, [edx + VM.StatTotalInGC]
441 STAM_PROFILE_ADV_START edx
442 pop edx
443%endif
444
445 ;
446 ; enable WP
447 ;
448%ifdef ENABLE_WRITE_PROTECTION
449 mov eax, cr0
450 or eax, X86_CR0_WRITE_PROTECT
451 mov cr0, eax
452%endif
453
454 ;
455 ; Continue restore.
456 ;
457 mov esi, [edx + CPUMCPU.Guest.esi]
458 mov edi, [edx + CPUMCPU.Guest.edi]
459 mov ebp, [edx + CPUMCPU.Guest.ebp]
460 mov ecx, [edx + CPUMCPU.Guest.ecx]
461 mov ebx, [edx + CPUMCPU.Guest.ebx]
462 mov eax, [edx + CPUMCPU.Guest.eax]
463 mov edx, [edx + CPUMCPU.Guest.edx]
464
465 ; restart execution.
466 TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
467 iretd
468ENDPROC CPUMGCResumeGuestV86
469
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette