VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMGC/HWACCMGCA.asm@ 15331

Last change on this file since 15331 was 15331, checked in by vboxsync, 16 years ago

More fixes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.0 KB
Line 
1; $Id: HWACCMGCA.asm 15331 2008-12-11 18:38:19Z vboxsync $
2;; @file
3; VMXM - GC vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%undef RT_ARCH_X86
26%define RT_ARCH_AMD64
27%include "VBox/asmdefs.mac"
28%include "VBox/err.mac"
29%include "VBox/hwacc_vmx.mac"
30%include "VBox/cpum.mac"
31%include "VBox/x86.mac"
32%include "../HWACCMInternal.mac"
33
34%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
35 %macro vmwrite 2,
36 int3
37 %endmacro
38 %define vmlaunch int3
39 %define vmresume int3
40 %define vmsave int3
41 %define vmload int3
42 %define vmrun int3
43 %define clgi int3
44 %define stgi int3
45 %macro invlpga 2,
46 int3
47 %endmacro
48%endif
49
50;; @def MYPUSHSEGS
51; Macro saving all segment registers on the stack.
52; @param 1 full width register name
53
54;; @def MYPOPSEGS
55; Macro restoring all segment registers on the stack
56; @param 1 full width register name
57
58 ; Load the corresponding guest MSR (trashes rdx & rcx)
59 %macro LOADGUESTMSR 2
60 mov rcx, %1
61 mov edx, dword [rsi + %2 + 4]
62 mov eax, dword [rsi + %2]
63 wrmsr
64 %endmacro
65
66 ; Save a guest MSR (trashes rdx & rcx)
67 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
68 %macro SAVEGUESTMSR 2
69 mov rcx, %1
70 rdmsr
71 mov dword [rsi + %2], eax
72 mov dword [rsi + %2 + 4], edx
73 %endmacro
74
75 %macro MYPUSHSEGS 1
76 mov %1, es
77 push %1
78 mov %1, ds
79 push %1
80 %endmacro
81
82 %macro MYPOPSEGS 1
83 pop %1
84 mov ds, %1
85 pop %1
86 mov es, %1
87 %endmacro
88
89BEGINCODE
90BITS 64
91
92
93;/**
94; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
95; *
96; * @returns VBox status code
97; * @param pPageCpuPhys VMXON physical address [rsp+8]
98; * @param pVMCSPhys VMCS physical address [rsp+16]
99; * @param pCache VMCS cache [rsp+24]
100; * @param pCtx Guest context (rsi)
101; */
102BEGINPROC VMXGCStartVM64
103 push rbp
104 mov rbp, rsp
105
106 ; Make sure VT-x instructions are allowed
107 mov rax, cr4
108 or rax, X86_CR4_VMXE
109 mov cr4, rax
110
111 ;/* Enter VMX Root Mode */
112 vmxon [rbp + 8 + 8]
113 jnc .vmxon_success
114 mov rax, VERR_VMX_INVALID_VMXON_PTR
115 jmp .vmstart64_vmxon_failed
116
117.vmxon_success:
118 ; Activate the VMCS pointer
119 vmptrld [rbp + 16 + 8]
120 jnc .vmptrld_success
121 mov rax, VERR_VMX_INVALID_VMCS_PTR
122 jmp .vmstart64_vmoff_end
123
124.vmptrld_success:
125
126 ; Save the VMCS pointer on the stack
127 push qword [rbp + 16 + 8];
128
129 ; Signal that we're in 64 bits mode now!
130 mov eax, VMX_VMCS_CTRL_EXIT_CONTROLS
131 vmread rdx, rax
132 or rdx, VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64
133 vmwrite rax, rdx
134
135 ; Save the host state that's relevant in the temporary 64 bits mode
136 mov rdx, cr0
137 mov eax, VMX_VMCS_HOST_CR0
138 vmwrite rax, rdx
139
140 mov rdx, cr3
141 mov eax, VMX_VMCS_HOST_CR3
142 vmwrite rax, rdx
143
144 mov rdx, cr4
145 mov eax, VMX_VMCS_HOST_CR4
146 vmwrite rax, rdx
147
148 mov rdx, cs
149 mov eax, VMX_VMCS_HOST_FIELD_CS
150 vmwrite rax, rdx
151
152 mov rdx, ss
153 mov eax, VMX_VMCS_HOST_FIELD_SS
154 vmwrite rax, rdx
155
156 sub rsp, 8*2
157 sgdt [rsp]
158 mov eax, VMX_VMCS_HOST_GDTR_BASE
159 vmwrite rax, [rsp+2]
160 add rsp, 8*2
161
162 ; hopefully we can ignore TR (we restore it anyway on the way back to 32 bits mode)
163
164 ;/* First we have to save some final CPU context registers. */
165 lea rdx, [.vmlaunch64_done wrt rip]
166 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
167 vmwrite rax, rdx
168 ;/* Note: assumes success... */
169
170 ;/* Manual save and restore:
171 ; * - General purpose registers except RIP, RSP
172 ; *
173 ; * Trashed:
174 ; * - CR2 (we don't care)
175 ; * - LDTR (reset to 0)
176 ; * - DRx (presumably not changed at all)
177 ; * - DR7 (reset to 0x400)
178 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
179 ; *
180 ; */
181
182 ;/* Save segment registers */
183 MYPUSHSEGS rax
184
185
186%ifdef VMX_USE_CACHED_VMCS_ACCESSES
187 mov rbx, [rbp + 24 + 8] ; pCache
188 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
189 cmp ecx, 0
190 je .no_cached_writes
191 mov edx, ecx
192 mov ecx, 0
193 jmp .cached_write
194
195ALIGN(16)
196.cached_write:
197 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
198 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
199 inc xCX
200 cmp xCX, xDX
201 jl .cached_write
202
203 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
204.no_cached_writes:
205
206 ; Save the pCache pointer
207 push xBX
208%endif
209
210 ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
211 ;; @todo use the automatic load feature for MSRs
212 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
213%if 0 ; not supported on Intel CPUs
214 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
215%endif
216 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
217 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
218 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
219
220 ; Save the pCtx pointer
221 push rsi
222
223 ; Restore CR2
224 mov rbx, qword [rsi + CPUMCTX.cr2]
225 mov cr2, rbx
226
227 mov eax, VMX_VMCS_HOST_RSP
228 vmwrite rax, rsp
229 ;/* Note: assumes success... */
230 ;/* Don't mess with ESP anymore!! */
231
232 ;/* Restore Guest's general purpose registers. */
233 mov rax, qword [rsi + CPUMCTX.eax]
234 mov rbx, qword [rsi + CPUMCTX.ebx]
235 mov rcx, qword [rsi + CPUMCTX.ecx]
236 mov rdx, qword [rsi + CPUMCTX.edx]
237 mov rbp, qword [rsi + CPUMCTX.ebp]
238 mov r8, qword [rsi + CPUMCTX.r8]
239 mov r9, qword [rsi + CPUMCTX.r9]
240 mov r10, qword [rsi + CPUMCTX.r10]
241 mov r11, qword [rsi + CPUMCTX.r11]
242 mov r12, qword [rsi + CPUMCTX.r12]
243 mov r13, qword [rsi + CPUMCTX.r13]
244 mov r14, qword [rsi + CPUMCTX.r14]
245 mov r15, qword [rsi + CPUMCTX.r15]
246
247 ;/* Restore rdi & rsi. */
248 mov rdi, qword [rsi + CPUMCTX.edi]
249 mov rsi, qword [rsi + CPUMCTX.esi]
250
251 vmlaunch
252 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
253
254ALIGNCODE(16)
255.vmlaunch64_done:
256 jc near .vmstart64_invalid_vmxon_ptr
257 jz near .vmstart64_start_failed
258
259 push rdi
260 mov rdi, [rsp + 8 * 2] ; pCtx
261
262 mov qword [rdi + CPUMCTX.eax], rax
263 mov qword [rdi + CPUMCTX.ebx], rbx
264 mov qword [rdi + CPUMCTX.ecx], rcx
265 mov qword [rdi + CPUMCTX.edx], rdx
266 mov qword [rdi + CPUMCTX.esi], rsi
267 mov qword [rdi + CPUMCTX.ebp], rbp
268 mov qword [rdi + CPUMCTX.r8], r8
269 mov qword [rdi + CPUMCTX.r9], r9
270 mov qword [rdi + CPUMCTX.r10], r10
271 mov qword [rdi + CPUMCTX.r11], r11
272 mov qword [rdi + CPUMCTX.r12], r12
273 mov qword [rdi + CPUMCTX.r13], r13
274 mov qword [rdi + CPUMCTX.r14], r14
275 mov qword [rdi + CPUMCTX.r15], r15
276
277 pop rax ; the guest edi we pushed above
278 mov qword [rdi + CPUMCTX.edi], rax
279
280 pop rsi ; pCtx (needed in rsi by the macros below)
281
282 ;; @todo use the automatic load feature for MSRs
283 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
284
285%ifdef VMX_USE_CACHED_VMCS_ACCESSES
286 pop xDX ; saved pCache
287
288 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
289 cmp ecx, 0 ; can't happen
290 je .no_cached_reads
291 jmp .cached_read
292
293ALIGN(16)
294.cached_read:
295 dec xCX
296 mov eax, [xDX + VMCSCACHE.Read.aField + xCX*4]
297 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
298 cmp xCX, 0
299 jnz .cached_read
300.no_cached_reads:
301%endif
302
303 ; Restore segment registers
304 MYPOPSEGS rax
305
306 mov eax, VINF_SUCCESS
307
308.vmstart64_end:
309 ; Signal that we're going back to 32 bits mode!
310 mov eax, VMX_VMCS_CTRL_EXIT_CONTROLS
311 vmread rdx, rax
312 and rdx, ~VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64
313 vmwrite rax, rdx
314
315 ; Write back the data and disable the VMCS
316 vmclear qword [rsp] ;Pushed pVMCS
317 add rsp, 8
318
319.vmstart64_vmoff_end:
320 ; Disable VMX root mode
321 vmxoff
322.vmstart64_vmxon_failed:
323 pop rbp
324 ret
325
326
327.vmstart64_invalid_vmxon_ptr:
328 pop rsi ; pCtx (needed in rsi by the macros below)
329
330%ifdef VMX_USE_CACHED_VMCS_ACCESSES
331 add xSP, xS ; pCache
332%endif
333
334 ; Restore segment registers
335 MYPOPSEGS rax
336
337 ; Restore all general purpose host registers.
338 mov eax, VERR_VMX_INVALID_VMXON_PTR
339 jmp .vmstart64_end
340
341.vmstart64_start_failed:
342 pop rsi ; pCtx (needed in rsi by the macros below)
343
344%ifdef VMX_USE_CACHED_VMCS_ACCESSES
345 add xSP, xS ; pCache
346%endif
347
348 ; Restore segment registers
349 MYPOPSEGS rax
350
351 ; Restore all general purpose host registers.
352 mov eax, VERR_VMX_UNABLE_TO_START_VM
353 jmp .vmstart64_end
354ENDPROC VMXGCStartVM64
355
356
357;/**
358; * Prepares for and executes VMRUN (64 bits guests)
359; *
360; * @returns VBox status code
361; * @param HCPhysVMCB Physical address of host VMCB (rsp+8)
362; * @param HCPhysVMCB Physical address of guest VMCB (rsp+16)
363; * @param pCtx Guest context (rsi)
364; */
365BEGINPROC SVMGCVMRun64
366 push rbp
367 mov rbp, rsp
368 pushf
369
370 ;/* Manual save and restore:
371 ; * - General purpose registers except RIP, RSP, RAX
372 ; *
373 ; * Trashed:
374 ; * - CR2 (we don't care)
375 ; * - LDTR (reset to 0)
376 ; * - DRx (presumably not changed at all)
377 ; * - DR7 (reset to 0x400)
378 ; */
379
380 ;/* Save the Guest CPU context pointer. */
381 push rsi ; push for saving the state at the end
382
383 ; save host fs, gs, sysenter msr etc
384 mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address)
385 push rax ; save for the vmload after vmrun
386 vmsave
387
388 ; setup eax for VMLOAD
389 mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address)
390
391 ;/* Restore Guest's general purpose registers. */
392 ;/* RAX is loaded from the VMCB by VMRUN */
393 mov rbx, qword [rsi + CPUMCTX.ebx]
394 mov rcx, qword [rsi + CPUMCTX.ecx]
395 mov rdx, qword [rsi + CPUMCTX.edx]
396 mov rdi, qword [rsi + CPUMCTX.edi]
397 mov rbp, qword [rsi + CPUMCTX.ebp]
398 mov r8, qword [rsi + CPUMCTX.r8]
399 mov r9, qword [rsi + CPUMCTX.r9]
400 mov r10, qword [rsi + CPUMCTX.r10]
401 mov r11, qword [rsi + CPUMCTX.r11]
402 mov r12, qword [rsi + CPUMCTX.r12]
403 mov r13, qword [rsi + CPUMCTX.r13]
404 mov r14, qword [rsi + CPUMCTX.r14]
405 mov r15, qword [rsi + CPUMCTX.r15]
406 mov rsi, qword [rsi + CPUMCTX.esi]
407
408 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
409 clgi
410 sti
411
412 ; load guest fs, gs, sysenter msr etc
413 vmload
414 ; run the VM
415 vmrun
416
417 ;/* RAX is in the VMCB already; we can use it here. */
418
419 ; save guest fs, gs, sysenter msr etc
420 vmsave
421
422 ; load host fs, gs, sysenter msr etc
423 pop rax ; pushed above
424 vmload
425
426 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
427 cli
428 stgi
429
430 pop rax ; pCtx
431
432 mov qword [rax + CPUMCTX.ebx], rbx
433 mov qword [rax + CPUMCTX.ecx], rcx
434 mov qword [rax + CPUMCTX.edx], rdx
435 mov qword [rax + CPUMCTX.esi], rsi
436 mov qword [rax + CPUMCTX.edi], rdi
437 mov qword [rax + CPUMCTX.ebp], rbp
438 mov qword [rax + CPUMCTX.r8], r8
439 mov qword [rax + CPUMCTX.r9], r9
440 mov qword [rax + CPUMCTX.r10], r10
441 mov qword [rax + CPUMCTX.r11], r11
442 mov qword [rax + CPUMCTX.r12], r12
443 mov qword [rax + CPUMCTX.r13], r13
444 mov qword [rax + CPUMCTX.r14], r14
445 mov qword [rax + CPUMCTX.r15], r15
446
447 mov eax, VINF_SUCCESS
448
449 popf
450 pop rbp
451 ret
452ENDPROC SVMGCVMRun64
453
454;/**
455; * Saves the guest FPU context
456; *
457; * @returns VBox status code
458; * @param pCtx Guest context [rsi]
459; */
460BEGINPROC HWACCMSaveGuestFPU64
461 mov rax, cr0
462 mov rcx, rax ; save old CR0
463 and rax, ~(X86_CR0_TS | X86_CR0_EM)
464 mov cr0, rax
465
466 fxsave [rsi + CPUMCTX.fpu]
467
468 mov cr0, rcx ; and restore old CR0 again
469
470 mov eax, VINF_SUCCESS
471 ret
472ENDPROC HWACCMSaveGuestFPU64
473
474;/**
475; * Saves the guest debug context (DR0-3, DR6)
476; *
477; * @returns VBox status code
478; * @param pCtx Guest context [rsi]
479; */
480BEGINPROC HWACCMSaveGuestDebug64
481 mov rax, dr0
482 mov qword [rsi + CPUMCTX.dr + 0*8], rax
483 mov rax, dr1
484 mov qword [rsi + CPUMCTX.dr + 1*8], rax
485 mov rax, dr2
486 mov qword [rsi + CPUMCTX.dr + 2*8], rax
487 mov rax, dr3
488 mov qword [rsi + CPUMCTX.dr + 3*8], rax
489 mov rax, dr6
490 mov qword [rsi + CPUMCTX.dr + 6*8], rax
491 mov eax, VINF_SUCCESS
492 ret
493ENDPROC HWACCMSaveGuestDebug64
494
495;/**
496; * Dummy callback handler
497; *
498; * @returns VBox status code
499; * @param param1 Parameter 1 [rsp+8]
500; * @param param2 Parameter 2 [rsp+12]
501; * @param param3 Parameter 3 [rsp+16]
502; * @param param4 Parameter 4 [rsp+20]
503; * @param param5 Parameter 5 [rsp+24]
504; * @param pCtx Guest context [rsi]
505; */
506BEGINPROC HWACCMTestSwitcher64
507 mov eax, [rsp+8]
508 ret
509ENDPROC HWACCMTestSwitcher64
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette