VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMGC/HWACCMGCA.asm@ 14997

Last change on this file since 14997 was 14997, checked in by vboxsync, 16 years ago

Clearly mark possible 64 bits values

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 15.0 KB
Line 
1; $Id: HWACCMGCA.asm 14997 2008-12-04 16:32:35Z vboxsync $
2;; @file
3; VMXM - GC vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%undef RT_ARCH_X86
26%define RT_ARCH_AMD64
27%include "VBox/asmdefs.mac"
28%include "VBox/err.mac"
29%include "VBox/hwacc_vmx.mac"
30%include "VBox/cpum.mac"
31%include "VBox/x86.mac"
32
33%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
34 %macro vmwrite 2,
35 int3
36 %endmacro
37 %define vmlaunch int3
38 %define vmresume int3
39 %define vmsave int3
40 %define vmload int3
41 %define vmrun int3
42 %define clgi int3
43 %define stgi int3
44 %macro invlpga 2,
45 int3
46 %endmacro
47%endif
48
49;; @def MYPUSHAD
50; Macro generating an equivalent to pushad
51
52;; @def MYPOPAD
53; Macro generating an equivalent to popad
54
55;; @def MYPUSHSEGS
56; Macro saving all segment registers on the stack.
57; @param 1 full width register name
58; @param 2 16-bit regsiter name for \a 1.
59
60;; @def MYPOPSEGS
61; Macro restoring all segment registers on the stack
62; @param 1 full width register name
63; @param 2 16-bit regsiter name for \a 1.
64
65 ; Load the corresponding guest MSR (trashes rdx & rcx)
66 %macro LOADGUESTMSR 2
67 mov edx, dword [rsi + %2 + 4]
68 mov eax, dword [rsi + %2]
69 wrmsr
70 %endmacro
71
72 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
73 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
74 %macro LOADHOSTMSREX 2
75 mov rcx, %1
76 rdmsr
77 mov dword [rsi + %2], eax
78 mov dword [rsi + %2 + 4], edx
79 %endmacro
80
81 %ifdef ASM_CALL64_GCC
82 %macro MYPUSHAD 0
83 push r15
84 push r14
85 push r13
86 push r12
87 push rbx
88 %endmacro
89 %macro MYPOPAD 0
90 pop rbx
91 pop r12
92 pop r13
93 pop r14
94 pop r15
95 %endmacro
96
97 %else ; ASM_CALL64_MSC
98 %macro MYPUSHAD 0
99 push r15
100 push r14
101 push r13
102 push r12
103 push rbx
104 push rsi
105 push rdi
106 %endmacro
107 %macro MYPOPAD 0
108 pop rdi
109 pop rsi
110 pop rbx
111 pop r12
112 pop r13
113 pop r14
114 pop r15
115 %endmacro
116 %endif
117
118; trashes, rax, rdx & rcx
119 %macro MYPUSHSEGS 2
120 mov %2, es
121 push %1
122 mov %2, ds
123 push %1
124
125 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
126 mov ecx, MSR_K8_FS_BASE
127 rdmsr
128 push rdx
129 push rax
130 push fs
131
132 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
133 mov ecx, MSR_K8_GS_BASE
134 rdmsr
135 push rdx
136 push rax
137 push gs
138 %endmacro
139
140; trashes, rax, rdx & rcx
141 %macro MYPOPSEGS 2
142 ; Note: do not step through this code with a debugger!
143 pop gs
144 pop rax
145 pop rdx
146 mov ecx, MSR_K8_GS_BASE
147 wrmsr
148
149 pop fs
150 pop rax
151 pop rdx
152 mov ecx, MSR_K8_FS_BASE
153 wrmsr
154 ; Now it's safe to step again
155
156 pop %1
157 mov ds, %2
158 pop %1
159 mov es, %2
160 %endmacro
161
162
163
164BEGINCODE
165BITS 64
166
167
168;/**
169; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
170; *
171; * @returns VBox status code
172; * @param pCtx Guest context (rsi)
173; */
174BEGINPROC VMXGCStartVM64
175 push rbp
176 mov rbp, rsp
177
178 pushf
179 cli
180
181 ; Have to sync half the guest state as we can't access most of the 64 bits state. Sigh
182; VMCSWRITE VMX_VMCS64_GUEST_CS_BASE, [rsi + CPUMCTX.csHid.u64Base]
183; VMCSWRITE VMX_VMCS64_GUEST_DS_BASE, [rsi + CPUMCTX.dsHid.u64Base]
184; VMCSWRITE VMX_VMCS64_GUEST_ES_BASE, [rsi + CPUMCTX.esHid.u64Base]
185; VMCSWRITE VMX_VMCS64_GUEST_FS_BASE, [rsi + CPUMCTX.fsHid.u64Base]
186; VMCSWRITE VMX_VMCS64_GUEST_GS_BASE, [rsi + CPUMCTX.gsHid.u64Base]
187; VMCSWRITE VMX_VMCS64_GUEST_SS_BASE, [rsi + CPUMCTX.ssHid.u64Base]
188; VMCSWRITE VMX_VMCS64_GUEST_LDTR_BASE, [rsi + CPUMCTX.ldtrHid.u64Base]
189; VMCSWRITE VMX_VMCS64_GUEST_GDTR_BASE, [rsi + CPUMCTX.gdtrHid.u64Base]
190; VMCSWRITE VMX_VMCS64_GUEST_IDTR_BASE, [rsi + CPUMCTX.idtrHid.u64Base]
191; VMCSWRITE VMX_VMCS64_GUEST_TR_BASE, [rsi + CPUMCTX.trHid.u64Base]
192;
193; VMCSWRITE VMX_VMCS64_GUEST_SYSENTER_EIP, [rsi + CPUMCTX.SysEnter.eip]
194; VMCSWRITE VMX_VMCS64_GUEST_SYSENTER_ESP, [rsi + CPUMCTX.SysEnter.esp]
195;
196; VMCSWRITE VMX_VMCS64_GUEST_RIP, [rsi + CPUMCTX.eip]
197; VMCSWRITE VMX_VMCS64_GUEST_RSP, [rsi + CPUMCTX.esp]
198
199
200 ;/* First we have to save some final CPU context registers. */
201 lea rax, [.vmlaunch64_done wrt rip]
202 push rax
203 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
204 vmwrite rax, [rsp]
205 ;/* Note: assumes success... */
206 add rsp, 8
207
208 ;/* Manual save and restore:
209 ; * - General purpose registers except RIP, RSP
210 ; *
211 ; * Trashed:
212 ; * - CR2 (we don't care)
213 ; * - LDTR (reset to 0)
214 ; * - DRx (presumably not changed at all)
215 ; * - DR7 (reset to 0x400)
216 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
217 ; *
218 ; */
219
220 ;/* Save all general purpose host registers. */
221 MYPUSHAD
222
223 ;/* Save the Guest CPU context pointer. */
224 ; pCtx already in rsi
225
226 ;/* Save segment registers */
227 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
228 MYPUSHSEGS rax, ax
229
230 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
231 ;; @todo use the automatic load feature for MSRs
232 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
233 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
234 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
235 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
236
237 ; Save the pCtx pointer
238 push rsi
239
240 ; Save LDTR
241 xor eax, eax
242 sldt ax
243 push rax
244
245 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
246 sub rsp, 8*2
247 sgdt [rsp]
248
249 sub rsp, 8*2
250 sidt [rsp]
251
252 ; Restore CR2
253 mov rbx, qword [rsi + CPUMCTX.cr2]
254 mov cr2, rbx
255
256 mov eax, VMX_VMCS_HOST_RSP
257 vmwrite rax, rsp
258 ;/* Note: assumes success... */
259 ;/* Don't mess with ESP anymore!! */
260
261 ;/* Restore Guest's general purpose registers. */
262 mov rax, qword [rsi + CPUMCTX.eax]
263 mov rbx, qword [rsi + CPUMCTX.ebx]
264 mov rcx, qword [rsi + CPUMCTX.ecx]
265 mov rdx, qword [rsi + CPUMCTX.edx]
266 mov rbp, qword [rsi + CPUMCTX.ebp]
267 mov r8, qword [rsi + CPUMCTX.r8]
268 mov r9, qword [rsi + CPUMCTX.r9]
269 mov r10, qword [rsi + CPUMCTX.r10]
270 mov r11, qword [rsi + CPUMCTX.r11]
271 mov r12, qword [rsi + CPUMCTX.r12]
272 mov r13, qword [rsi + CPUMCTX.r13]
273 mov r14, qword [rsi + CPUMCTX.r14]
274 mov r15, qword [rsi + CPUMCTX.r15]
275
276 ;/* Restore rdi & rsi. */
277 mov rdi, qword [rsi + CPUMCTX.edi]
278 mov rsi, qword [rsi + CPUMCTX.esi]
279
280 vmlaunch
281 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
282
283ALIGNCODE(16)
284.vmlaunch64_done:
285 jc near .vmstart64_invalid_vmxon_ptr
286 jz near .vmstart64_start_failed
287
288 ; Restore base and limit of the IDTR & GDTR
289 lidt [rsp]
290 add rsp, 8*2
291 lgdt [rsp]
292 add rsp, 8*2
293
294 push rdi
295 mov rdi, [rsp + 8 * 2] ; pCtx
296
297 mov qword [rdi + CPUMCTX.eax], rax
298 mov qword [rdi + CPUMCTX.ebx], rbx
299 mov qword [rdi + CPUMCTX.ecx], rcx
300 mov qword [rdi + CPUMCTX.edx], rdx
301 mov qword [rdi + CPUMCTX.esi], rsi
302 mov qword [rdi + CPUMCTX.ebp], rbp
303 mov qword [rdi + CPUMCTX.r8], r8
304 mov qword [rdi + CPUMCTX.r9], r9
305 mov qword [rdi + CPUMCTX.r10], r10
306 mov qword [rdi + CPUMCTX.r11], r11
307 mov qword [rdi + CPUMCTX.r12], r12
308 mov qword [rdi + CPUMCTX.r13], r13
309 mov qword [rdi + CPUMCTX.r14], r14
310 mov qword [rdi + CPUMCTX.r15], r15
311
312 pop rax ; the guest edi we pushed above
313 mov qword [rdi + CPUMCTX.edi], rax
314
315 pop rax ; saved LDTR
316 lldt ax
317
318 pop rsi ; pCtx (needed in rsi by the macros below)
319
320 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
321 ;; @todo use the automatic load feature for MSRs
322 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
323
324 ; Restore segment registers
325 MYPOPSEGS rax, ax
326
327 ; Restore general purpose registers
328 MYPOPAD
329
330 mov eax, VINF_SUCCESS
331
332.vmstart64_end:
333 popf
334 pop rbp
335 ret
336
337
338.vmstart64_invalid_vmxon_ptr:
339 ; Restore base and limit of the IDTR & GDTR
340 lidt [rsp]
341 add rsp, 8*2
342 lgdt [rsp]
343 add rsp, 8*2
344
345 pop rax ; saved LDTR
346 lldt ax
347
348 pop rsi ; pCtx (needed in rsi by the macros below)
349
350 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
351 ;; @todo use the automatic load feature for MSRs
352 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
353
354 ; Restore segment registers
355 MYPOPSEGS rax, ax
356
357 ; Restore all general purpose host registers.
358 MYPOPAD
359 mov eax, VERR_VMX_INVALID_VMXON_PTR
360 jmp .vmstart64_end
361
362.vmstart64_start_failed:
363 ; Restore base and limit of the IDTR & GDTR
364 lidt [rsp]
365 add rsp, 8*2
366 lgdt [rsp]
367 add rsp, 8*2
368
369 pop rax ; saved LDTR
370 lldt ax
371
372 pop rsi ; pCtx (needed in rsi by the macros below)
373
374 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
375 ;; @todo use the automatic load feature for MSRs
376 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
377
378 ; Restore segment registers
379 MYPOPSEGS rax, ax
380
381 ; Restore all general purpose host registers.
382 MYPOPAD
383 mov eax, VERR_VMX_UNABLE_TO_START_VM
384 jmp .vmstart64_end
385ENDPROC VMXGCStartVM64
386
387
388;/**
389; * Prepares for and executes VMRUN (64 bits guests)
390; *
391; * @returns VBox status code
392; * @param HCPhysVMCB Physical address of host VMCB (rsp+8)
393; * @param HCPhysVMCB Physical address of guest VMCB (rsp+16)
394; * @param pCtx Guest context (rsi)
395; */
396BEGINPROC SVMGCVMRun64
397 push rbp
398 mov rbp, rsp
399 pushf
400
401 ;/* Manual save and restore:
402 ; * - General purpose registers except RIP, RSP, RAX
403 ; *
404 ; * Trashed:
405 ; * - CR2 (we don't care)
406 ; * - LDTR (reset to 0)
407 ; * - DRx (presumably not changed at all)
408 ; * - DR7 (reset to 0x400)
409 ; */
410
411 ;/* Save all general purpose host registers. */
412 MYPUSHAD
413
414 ;/* Save the Guest CPU context pointer. */
415 push rsi ; push for saving the state at the end
416
417 ; Restore CR2
418 mov rbx, [rsi + CPUMCTX.cr2]
419 mov cr2, rbx
420
421 ; save host fs, gs, sysenter msr etc
422 mov rax, [rbp + 8] ; pVMCBHostPhys (64 bits physical address)
423 push rax ; save for the vmload after vmrun
424 vmsave
425
426 ; setup eax for VMLOAD
427 mov rax, [rbp + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address)
428
429 ;/* Restore Guest's general purpose registers. */
430 ;/* RAX is loaded from the VMCB by VMRUN */
431 mov rbx, qword [rsi + CPUMCTX.ebx]
432 mov rcx, qword [rsi + CPUMCTX.ecx]
433 mov rdx, qword [rsi + CPUMCTX.edx]
434 mov rdi, qword [rsi + CPUMCTX.edi]
435 mov rbp, qword [rsi + CPUMCTX.ebp]
436 mov r8, qword [rsi + CPUMCTX.r8]
437 mov r9, qword [rsi + CPUMCTX.r9]
438 mov r10, qword [rsi + CPUMCTX.r10]
439 mov r11, qword [rsi + CPUMCTX.r11]
440 mov r12, qword [rsi + CPUMCTX.r12]
441 mov r13, qword [rsi + CPUMCTX.r13]
442 mov r14, qword [rsi + CPUMCTX.r14]
443 mov r15, qword [rsi + CPUMCTX.r15]
444 mov rsi, qword [rsi + CPUMCTX.esi]
445
446 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
447 clgi
448 sti
449
450 ; load guest fs, gs, sysenter msr etc
451 vmload
452 ; run the VM
453 vmrun
454
455 ;/* RAX is in the VMCB already; we can use it here. */
456
457 ; save guest fs, gs, sysenter msr etc
458 vmsave
459
460 ; load host fs, gs, sysenter msr etc
461 pop rax ; pushed above
462 vmload
463
464 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
465 cli
466 stgi
467
468 pop rax ; pCtx
469
470 mov qword [rax + CPUMCTX.ebx], rbx
471 mov qword [rax + CPUMCTX.ecx], rcx
472 mov qword [rax + CPUMCTX.edx], rdx
473 mov qword [rax + CPUMCTX.esi], rsi
474 mov qword [rax + CPUMCTX.edi], rdi
475 mov qword [rax + CPUMCTX.ebp], rbp
476 mov qword [rax + CPUMCTX.r8], r8
477 mov qword [rax + CPUMCTX.r9], r9
478 mov qword [rax + CPUMCTX.r10], r10
479 mov qword [rax + CPUMCTX.r11], r11
480 mov qword [rax + CPUMCTX.r12], r12
481 mov qword [rax + CPUMCTX.r13], r13
482 mov qword [rax + CPUMCTX.r14], r14
483 mov qword [rax + CPUMCTX.r15], r15
484
485 ; Restore general purpose registers
486 MYPOPAD
487
488 mov eax, VINF_SUCCESS
489
490 popf
491 pop rbp
492 ret
493ENDPROC SVMGCVMRun64
494
495;/**
496; * Saves the guest FPU context
497; *
498; * @returns VBox status code
499; * @param pCtx Guest context [rsi]
500; */
501BEGINPROC HWACCMSaveGuestFPU64
502 mov rax, cr0
503 mov rcx, rax ; save old CR0
504 and rax, ~(X86_CR0_TS | X86_CR0_EM)
505 mov cr0, rax
506
507 fxsave [rsi + CPUMCTX.fpu]
508
509 mov cr0, rcx ; and restore old CR0 again
510
511 mov eax, VINF_SUCCESS
512 ret
513ENDPROC HWACCMSaveGuestFPU64
514
515;/**
516; * Saves the guest debug context (DR0-3, DR6)
517; *
518; * @returns VBox status code
519; * @param pCtx Guest context [rsi]
520; */
521BEGINPROC HWACCMSaveGuestDebug64
522 mov rax, dr0
523 mov qword [rsi + CPUMCTX.dr + 0*8], rax
524 mov rax, dr1
525 mov qword [rsi + CPUMCTX.dr + 1*8], rax
526 mov rax, dr2
527 mov qword [rsi + CPUMCTX.dr + 2*8], rax
528 mov rax, dr3
529 mov qword [rsi + CPUMCTX.dr + 3*8], rax
530 mov rax, dr6
531 mov qword [rsi + CPUMCTX.dr + 6*8], rax
532 mov eax, VINF_SUCCESS
533 ret
534ENDPROC HWACCMSaveGuestDebug64
535
536;/**
537; * Dummy callback handler
538; *
539; * @returns VBox status code
540; * @param pCtx Guest context [rsi]
541; */
542BEGINPROC HWACCMTestSwitcher64
543 mov eax, VINF_SUCCESS
544 ret
545ENDPROC HWACCMTestSwitcher64
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette