VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMGC/HWACCMGCA.asm@ 15141

Last change on this file since 15141 was 15141, checked in by vboxsync, 16 years ago

Cleaned up

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.1 KB
Line 
1; $Id: HWACCMGCA.asm 15141 2008-12-09 09:19:20Z vboxsync $
2;; @file
3; VMXM - GC vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%undef RT_ARCH_X86
26%define RT_ARCH_AMD64
27%include "VBox/asmdefs.mac"
28%include "VBox/err.mac"
29%include "VBox/hwacc_vmx.mac"
30%include "VBox/cpum.mac"
31%include "VBox/x86.mac"
32
33%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
34 %macro vmwrite 2,
35 int3
36 %endmacro
37 %define vmlaunch int3
38 %define vmresume int3
39 %define vmsave int3
40 %define vmload int3
41 %define vmrun int3
42 %define clgi int3
43 %define stgi int3
44 %macro invlpga 2,
45 int3
46 %endmacro
47%endif
48
49;; @def MYPUSHSEGS
50; Macro saving all segment registers on the stack.
51; @param 1 full width register name
52
53;; @def MYPOPSEGS
54; Macro restoring all segment registers on the stack
55; @param 1 full width register name
56
57 ; Load the corresponding guest MSR (trashes rdx & rcx)
58 %macro LOADGUESTMSR 2
59 mov edx, dword [rsi + %2 + 4]
60 mov eax, dword [rsi + %2]
61 wrmsr
62 %endmacro
63
64 ; Save a guest MSR (trashes rdx & rcx)
65 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
66 %macro SAVEGUESTMSR 2
67 mov rcx, %1
68 rdmsr
69 mov dword [rsi + %2], eax
70 mov dword [rsi + %2 + 4], edx
71 %endmacro
72
73 %macro MYPUSHSEGS 1
74 mov %1, es
75 push %1
76 mov %1, ds
77 push %1
78 %endmacro
79
80 %macro MYPOPSEGS 1
81 pop %1
82 mov ds, %1
83 pop %1
84 mov es, %1
85 %endmacro
86
87; trashes rax & rdx
88 %macro VMCSWRITE 2
89 mov eax, %1
90 vmwrite rax, %2
91 %endmacro
92
93; trashes rax & rdx
94 %macro VMCSREAD 2
95 mov eax, %1
96 vmwrite rax, %2
97 %endmacro
98
99BEGINCODE
100BITS 64
101
102
103;/**
104; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
105; *
106; * @returns VBox status code
107; * @param pPageCpuPhys VMXON physical address [rsp+8]
108; * @param pVMCSPhys VMCS physical address [rsp+16]
109; * @param pCtx Guest context (rsi)
110; */
111BEGINPROC VMXGCStartVM64
112 push rbp
113 mov rbp, rsp
114
115 ; Make sure VT-x instructions are allowed
116 mov rax, cr4
117 or rax, X86_CR4_VMXE
118 mov cr4, rax
119
120 ;/* Enter VMX Root Mode */
121 vmxon [rbp + 8 + 8]
122 jnc .vmxon_success
123 mov rax, VERR_VMX_INVALID_VMXON_PTR
124 jmp .vmstart64_vmxon_failed
125
126.vmxon_success:
127 ; Activate the VMCS pointer
128 vmptrld [rbp + 16 + 8]
129 jnc .vmptrld_success
130 mov rax, VERR_VMX_INVALID_VMCS_PTR
131 jmp .vmstart64_vmoff_end
132
133.vmptrld_success:
134 ; Signal that we're in 64 bits mode now!
135 VMCSREAD VMX_VMCS_CTRL_EXIT_CONTROLS, rdx
136 or rdx, VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64
137 VMCSWRITE VMX_VMCS_CTRL_EXIT_CONTROLS, rdx
138
139 ; Have to sync half the guest state as we can't access most of the 64 bits state in 32 bits mode. Sigh.
140 VMCSWRITE VMX_VMCS64_GUEST_CS_BASE, [rsi + CPUMCTX.csHid.u64Base]
141 VMCSWRITE VMX_VMCS64_GUEST_DS_BASE, [rsi + CPUMCTX.dsHid.u64Base]
142 VMCSWRITE VMX_VMCS64_GUEST_ES_BASE, [rsi + CPUMCTX.esHid.u64Base]
143 VMCSWRITE VMX_VMCS64_GUEST_FS_BASE, [rsi + CPUMCTX.fsHid.u64Base]
144 VMCSWRITE VMX_VMCS64_GUEST_GS_BASE, [rsi + CPUMCTX.gsHid.u64Base]
145 VMCSWRITE VMX_VMCS64_GUEST_SS_BASE, [rsi + CPUMCTX.ssHid.u64Base]
146 VMCSWRITE VMX_VMCS64_GUEST_GDTR_BASE, [rsi + CPUMCTX.gdtr.pGdt]
147 VMCSWRITE VMX_VMCS64_GUEST_IDTR_BASE, [rsi + CPUMCTX.idtr.pIdt]
148 VMCSWRITE VMX_VMCS64_GUEST_LDTR_BASE, [rsi + CPUMCTX.ldtrHid.u64Base]
149 VMCSWRITE VMX_VMCS64_GUEST_TR_BASE, [rsi + CPUMCTX.trHid.u64Base]
150
151 VMCSWRITE VMX_VMCS64_GUEST_SYSENTER_EIP, [rsi + CPUMCTX.SysEnter.eip]
152 VMCSWRITE VMX_VMCS64_GUEST_SYSENTER_ESP, [rsi + CPUMCTX.SysEnter.esp]
153
154 VMCSWRITE VMX_VMCS64_GUEST_RIP, [rsi + CPUMCTX.eip]
155 VMCSWRITE VMX_VMCS64_GUEST_RSP, [rsi + CPUMCTX.esp]
156
157 ; Save the host state that's relevant in the temporary 64 bits mode
158 mov rax, cr0
159 VMCSWRITE VMX_VMCS_HOST_CR0, rax
160 mov rax, cr3
161 VMCSWRITE VMX_VMCS_HOST_CR3, rax
162 mov rax, cr4
163 VMCSWRITE VMX_VMCS_HOST_CR4, rax
164 mov rax, cs
165 VMCSWRITE VMX_VMCS_HOST_FIELD_CS, rax
166 mov rax, ss
167 VMCSWRITE VMX_VMCS_HOST_FIELD_SS, rax
168
169 sub rsp, 8*2
170 sgdt [rsp]
171 mov rax, [rsp+2]
172 VMCSWRITE VMX_VMCS_HOST_GDTR_BASE, rax
173 add rsp, 8*2
174
175 ; hopefully we can ignore TR (we restore it anyway on the way back to 32 bits mode)
176
177 ;/* First we have to save some final CPU context registers. */
178 lea rdx, [.vmlaunch64_done wrt rip]
179 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
180 vmwrite rax, rdx
181 ;/* Note: assumes success... */
182
183 ;/* Manual save and restore:
184 ; * - General purpose registers except RIP, RSP
185 ; *
186 ; * Trashed:
187 ; * - CR2 (we don't care)
188 ; * - LDTR (reset to 0)
189 ; * - DRx (presumably not changed at all)
190 ; * - DR7 (reset to 0x400)
191 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
192 ; *
193 ; */
194
195 ;/* Save segment registers */
196 MYPUSHSEGS rax
197
198 ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
199 ;; @todo use the automatic load feature for MSRs
200 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
201%if 0 ; not supported on Intel CPUs
202 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
203%endif
204 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
205 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
206 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
207
208 ; Save the pCtx pointer
209 push rsi
210
211 ; Restore CR2
212 mov rbx, qword [rsi + CPUMCTX.cr2]
213 mov cr2, rbx
214
215 mov eax, VMX_VMCS_HOST_RSP
216 vmwrite rax, rsp
217 ;/* Note: assumes success... */
218 ;/* Don't mess with ESP anymore!! */
219
220 ;/* Restore Guest's general purpose registers. */
221 mov rax, qword [rsi + CPUMCTX.eax]
222 mov rbx, qword [rsi + CPUMCTX.ebx]
223 mov rcx, qword [rsi + CPUMCTX.ecx]
224 mov rdx, qword [rsi + CPUMCTX.edx]
225 mov rbp, qword [rsi + CPUMCTX.ebp]
226 mov r8, qword [rsi + CPUMCTX.r8]
227 mov r9, qword [rsi + CPUMCTX.r9]
228 mov r10, qword [rsi + CPUMCTX.r10]
229 mov r11, qword [rsi + CPUMCTX.r11]
230 mov r12, qword [rsi + CPUMCTX.r12]
231 mov r13, qword [rsi + CPUMCTX.r13]
232 mov r14, qword [rsi + CPUMCTX.r14]
233 mov r15, qword [rsi + CPUMCTX.r15]
234
235 ;/* Restore rdi & rsi. */
236 mov rdi, qword [rsi + CPUMCTX.edi]
237 mov rsi, qword [rsi + CPUMCTX.esi]
238
239 vmlaunch
240 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
241
242ALIGNCODE(16)
243.vmlaunch64_done:
244 jc near .vmstart64_invalid_vmxon_ptr
245 jz near .vmstart64_start_failed
246
247 push rdi
248 mov rdi, [rsp + 8 * 2] ; pCtx
249
250 mov qword [rdi + CPUMCTX.eax], rax
251 mov qword [rdi + CPUMCTX.ebx], rbx
252 mov qword [rdi + CPUMCTX.ecx], rcx
253 mov qword [rdi + CPUMCTX.edx], rdx
254 mov qword [rdi + CPUMCTX.esi], rsi
255 mov qword [rdi + CPUMCTX.ebp], rbp
256 mov qword [rdi + CPUMCTX.r8], r8
257 mov qword [rdi + CPUMCTX.r9], r9
258 mov qword [rdi + CPUMCTX.r10], r10
259 mov qword [rdi + CPUMCTX.r11], r11
260 mov qword [rdi + CPUMCTX.r12], r12
261 mov qword [rdi + CPUMCTX.r13], r13
262 mov qword [rdi + CPUMCTX.r14], r14
263 mov qword [rdi + CPUMCTX.r15], r15
264
265 pop rax ; the guest edi we pushed above
266 mov qword [rdi + CPUMCTX.edi], rax
267
268 pop rsi ; pCtx (needed in rsi by the macros below)
269
270 ;; @todo use the automatic load feature for MSRs
271 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
272
273 ; Restore segment registers
274 MYPOPSEGS rax
275
276 mov eax, VINF_SUCCESS
277
278.vmstart64_end:
279 ; Signal that we're going back to 32 bits mode!
280 VMCSREAD VMX_VMCS_CTRL_EXIT_CONTROLS, rdx
281 and rdx, ~VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64
282 VMCSWRITE VMX_VMCS_CTRL_EXIT_CONTROLS, rdx
283
284 ; Write back the data and disable the VMCS
285 vmclear [rbp + 16 + 8] ;pVMCS
286
287.vmstart64_vmoff_end:
288 ; Disable VMX root mode
289 vmxoff
290.vmstart64_vmxon_failed:
291 pop rbp
292 ret
293
294
295.vmstart64_invalid_vmxon_ptr:
296 pop rsi ; pCtx (needed in rsi by the macros below)
297
298 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
299 ;; @todo use the automatic load feature for MSRs
300 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
301
302 ; Restore segment registers
303 MYPOPSEGS rax
304
305 ; Restore all general purpose host registers.
306 mov eax, VERR_VMX_INVALID_VMXON_PTR
307 jmp .vmstart64_end
308
309.vmstart64_start_failed:
310 pop rsi ; pCtx (needed in rsi by the macros below)
311
312 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
313 ;; @todo use the automatic load feature for MSRs
314 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
315
316 ; Restore segment registers
317 MYPOPSEGS rax
318
319 ; Restore all general purpose host registers.
320 mov eax, VERR_VMX_UNABLE_TO_START_VM
321 jmp .vmstart64_end
322ENDPROC VMXGCStartVM64
323
324
325;/**
326; * Prepares for and executes VMRUN (64 bits guests)
327; *
328; * @returns VBox status code
329; * @param HCPhysVMCB Physical address of host VMCB (rsp+8)
330; * @param HCPhysVMCB Physical address of guest VMCB (rsp+16)
331; * @param pCtx Guest context (rsi)
332; */
333BEGINPROC SVMGCVMRun64
334 push rbp
335 mov rbp, rsp
336 pushf
337
338 ;/* Manual save and restore:
339 ; * - General purpose registers except RIP, RSP, RAX
340 ; *
341 ; * Trashed:
342 ; * - CR2 (we don't care)
343 ; * - LDTR (reset to 0)
344 ; * - DRx (presumably not changed at all)
345 ; * - DR7 (reset to 0x400)
346 ; */
347
348 ;/* Save the Guest CPU context pointer. */
349 push rsi ; push for saving the state at the end
350
351 ; save host fs, gs, sysenter msr etc
352 mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address)
353 push rax ; save for the vmload after vmrun
354 vmsave
355
356 ; setup eax for VMLOAD
357 mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address)
358
359 ;/* Restore Guest's general purpose registers. */
360 ;/* RAX is loaded from the VMCB by VMRUN */
361 mov rbx, qword [rsi + CPUMCTX.ebx]
362 mov rcx, qword [rsi + CPUMCTX.ecx]
363 mov rdx, qword [rsi + CPUMCTX.edx]
364 mov rdi, qword [rsi + CPUMCTX.edi]
365 mov rbp, qword [rsi + CPUMCTX.ebp]
366 mov r8, qword [rsi + CPUMCTX.r8]
367 mov r9, qword [rsi + CPUMCTX.r9]
368 mov r10, qword [rsi + CPUMCTX.r10]
369 mov r11, qword [rsi + CPUMCTX.r11]
370 mov r12, qword [rsi + CPUMCTX.r12]
371 mov r13, qword [rsi + CPUMCTX.r13]
372 mov r14, qword [rsi + CPUMCTX.r14]
373 mov r15, qword [rsi + CPUMCTX.r15]
374 mov rsi, qword [rsi + CPUMCTX.esi]
375
376 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
377 clgi
378 sti
379
380 ; load guest fs, gs, sysenter msr etc
381 vmload
382 ; run the VM
383 vmrun
384
385 ;/* RAX is in the VMCB already; we can use it here. */
386
387 ; save guest fs, gs, sysenter msr etc
388 vmsave
389
390 ; load host fs, gs, sysenter msr etc
391 pop rax ; pushed above
392 vmload
393
394 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
395 cli
396 stgi
397
398 pop rax ; pCtx
399
400 mov qword [rax + CPUMCTX.ebx], rbx
401 mov qword [rax + CPUMCTX.ecx], rcx
402 mov qword [rax + CPUMCTX.edx], rdx
403 mov qword [rax + CPUMCTX.esi], rsi
404 mov qword [rax + CPUMCTX.edi], rdi
405 mov qword [rax + CPUMCTX.ebp], rbp
406 mov qword [rax + CPUMCTX.r8], r8
407 mov qword [rax + CPUMCTX.r9], r9
408 mov qword [rax + CPUMCTX.r10], r10
409 mov qword [rax + CPUMCTX.r11], r11
410 mov qword [rax + CPUMCTX.r12], r12
411 mov qword [rax + CPUMCTX.r13], r13
412 mov qword [rax + CPUMCTX.r14], r14
413 mov qword [rax + CPUMCTX.r15], r15
414
415 mov eax, VINF_SUCCESS
416
417 popf
418 pop rbp
419 ret
420ENDPROC SVMGCVMRun64
421
422;/**
423; * Saves the guest FPU context
424; *
425; * @returns VBox status code
426; * @param pCtx Guest context [rsi]
427; */
428BEGINPROC HWACCMSaveGuestFPU64
429 mov rax, cr0
430 mov rcx, rax ; save old CR0
431 and rax, ~(X86_CR0_TS | X86_CR0_EM)
432 mov cr0, rax
433
434 fxsave [rsi + CPUMCTX.fpu]
435
436 mov cr0, rcx ; and restore old CR0 again
437
438 mov eax, VINF_SUCCESS
439 ret
440ENDPROC HWACCMSaveGuestFPU64
441
442;/**
443; * Saves the guest debug context (DR0-3, DR6)
444; *
445; * @returns VBox status code
446; * @param pCtx Guest context [rsi]
447; */
448BEGINPROC HWACCMSaveGuestDebug64
449 mov rax, dr0
450 mov qword [rsi + CPUMCTX.dr + 0*8], rax
451 mov rax, dr1
452 mov qword [rsi + CPUMCTX.dr + 1*8], rax
453 mov rax, dr2
454 mov qword [rsi + CPUMCTX.dr + 2*8], rax
455 mov rax, dr3
456 mov qword [rsi + CPUMCTX.dr + 3*8], rax
457 mov rax, dr6
458 mov qword [rsi + CPUMCTX.dr + 6*8], rax
459 mov eax, VINF_SUCCESS
460 ret
461ENDPROC HWACCMSaveGuestDebug64
462
463;/**
464; * Dummy callback handler
465; *
466; * @returns VBox status code
467; * @param pCtx Guest context [rsi]
468; */
469BEGINPROC HWACCMTestSwitcher64
470 mov eax, VINF_SUCCESS
471 ret
472ENDPROC HWACCMTestSwitcher64
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette