VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMGC/HWACCMGCA.asm@ 15657

Last change on this file since 15657 was 15657, checked in by vboxsync, 16 years ago

Extra error checks for vmxon & vmptrld.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 15.1 KB
Line 
1; $Id: HWACCMGCA.asm 15657 2008-12-18 13:52:03Z vboxsync $
2;; @file
3; VMXM - GC vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%undef RT_ARCH_X86
26%define RT_ARCH_AMD64
27%include "VBox/asmdefs.mac"
28%include "VBox/err.mac"
29%include "VBox/hwacc_vmx.mac"
30%include "VBox/cpum.mac"
31%include "VBox/x86.mac"
32%include "../HWACCMInternal.mac"
33
34%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
35 %macro vmwrite 2,
36 int3
37 %endmacro
38 %define vmlaunch int3
39 %define vmresume int3
40 %define vmsave int3
41 %define vmload int3
42 %define vmrun int3
43 %define clgi int3
44 %define stgi int3
45 %macro invlpga 2,
46 int3
47 %endmacro
48%endif
49
50;; @def MYPUSHSEGS
51; Macro saving all segment registers on the stack.
52; @param 1 full width register name
53
54;; @def MYPOPSEGS
55; Macro restoring all segment registers on the stack
56; @param 1 full width register name
57
58 ; Load the corresponding guest MSR (trashes rdx & rcx)
59 %macro LOADGUESTMSR 2
60 mov rcx, %1
61 mov edx, dword [rsi + %2 + 4]
62 mov eax, dword [rsi + %2]
63 wrmsr
64 %endmacro
65
66 ; Save a guest MSR (trashes rdx & rcx)
67 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
68 %macro SAVEGUESTMSR 2
69 mov rcx, %1
70 rdmsr
71 mov dword [rsi + %2], eax
72 mov dword [rsi + %2 + 4], edx
73 %endmacro
74
75 %macro MYPUSHSEGS 1
76 mov %1, es
77 push %1
78 mov %1, ds
79 push %1
80 %endmacro
81
82 %macro MYPOPSEGS 1
83 pop %1
84 mov ds, %1
85 pop %1
86 mov es, %1
87 %endmacro
88
89BEGINCODE
90BITS 64
91
92
93;/**
94; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
95; *
96; * @returns VBox status code
97; * @param pPageCpuPhys VMXON physical address [rsp+8]
98; * @param pVMCSPhys VMCS physical address [rsp+16]
99; * @param pCache VMCS cache [rsp+24]
100; * @param pCtx Guest context (rsi)
101; */
102BEGINPROC VMXGCStartVM64
103 push rbp
104 mov rbp, rsp
105
106 ; Make sure VT-x instructions are allowed
107 mov rax, cr4
108 or rax, X86_CR4_VMXE
109 mov cr4, rax
110
111 ;/* Enter VMX Root Mode */
112 vmxon [rbp + 8 + 8]
113 jnc .vmxon_success
114 mov rax, VERR_VMX_INVALID_VMXON_PTR
115 jmp .vmstart64_vmxon_failed
116
117.vmxon_success:
118 jnz .vmxon_success2
119 mov rax, VERR_VMX_GENERIC
120 jmp .vmstart64_vmxon_failed
121
122.vmxon_success2:
123 ; Activate the VMCS pointer
124 vmptrld [rbp + 16 + 8]
125 jnc .vmptrld_success
126 mov rax, VERR_VMX_INVALID_VMCS_PTR
127 jmp .vmstart64_vmoff_end
128
129.vmptrld_success:
130 jnz .vmptrld_success2
131 mov rax, VERR_VMX_GENERIC
132 jmp .vmstart64_vmoff_end
133
134.vmptrld_success2:
135
136 ; Save the VMCS pointer on the stack
137 push qword [rbp + 16 + 8];
138
139 ;/* Save segment registers */
140 MYPUSHSEGS rax
141
142%ifdef VMX_USE_CACHED_VMCS_ACCESSES
143 ; Flush the VMCS write cache first (before any other vmreads/vmwrites!)
144 mov rbx, [rbp + 24 + 8] ; pCache
145
146%ifdef DEBUG
147 mov rax, [rbp + 8 + 8] ; pPageCpuPhys
148 mov [rbx + VMCSCACHE.TestIn.pPageCpuPhys], rax
149 mov rax, [rbp + 16 + 8] ; pVMCSPhys
150 mov [rbx + VMCSCACHE.TestIn.pVMCSPhys], rax
151 mov [rbx + VMCSCACHE.TestIn.pCache], rbx
152 mov [rbx + VMCSCACHE.TestIn.pCtx], rsi
153%endif
154
155 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
156 cmp ecx, 0
157 je .no_cached_writes
158 mov edx, ecx
159 mov ecx, 0
160 jmp .cached_write
161
162ALIGN(16)
163.cached_write:
164 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
165 vmwrite xAX, qword [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
166 inc xCX
167 cmp xCX, xDX
168 jl .cached_write
169
170 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
171.no_cached_writes:
172
173 ; Save the pCache pointer
174 push xBX
175%endif
176
177 ; Save the host state that's relevant in the temporary 64 bits mode
178 mov rdx, cr0
179 mov eax, VMX_VMCS_HOST_CR0
180 vmwrite rax, rdx
181
182 mov rdx, cr3
183 mov eax, VMX_VMCS_HOST_CR3
184 vmwrite rax, rdx
185
186 mov rdx, cr4
187 mov eax, VMX_VMCS_HOST_CR4
188 vmwrite rax, rdx
189
190 mov rdx, cs
191 mov eax, VMX_VMCS_HOST_FIELD_CS
192 vmwrite rax, rdx
193
194 mov rdx, ss
195 mov eax, VMX_VMCS_HOST_FIELD_SS
196 vmwrite rax, rdx
197
198 sub rsp, 8*2
199 sgdt [rsp]
200 mov eax, VMX_VMCS_HOST_GDTR_BASE
201 vmwrite rax, [rsp+2]
202 add rsp, 8*2
203
204 ; hopefully we can ignore TR (we restore it anyway on the way back to 32 bits mode)
205
206 ;/* First we have to save some final CPU context registers. */
207 lea rdx, [.vmlaunch64_done wrt rip]
208 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
209 vmwrite rax, rdx
210 ;/* Note: assumes success... */
211
212 ;/* Manual save and restore:
213 ; * - General purpose registers except RIP, RSP
214 ; *
215 ; * Trashed:
216 ; * - CR2 (we don't care)
217 ; * - LDTR (reset to 0)
218 ; * - DRx (presumably not changed at all)
219 ; * - DR7 (reset to 0x400)
220 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
221 ; *
222 ; */
223
224 ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
225 ;; @todo use the automatic load feature for MSRs
226 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
227%if 0 ; not supported on Intel CPUs
228 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
229%endif
230 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
231 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
232 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
233
234 ; Save the pCtx pointer
235 push rsi
236
237 ; Restore CR2
238 mov rbx, qword [rsi + CPUMCTX.cr2]
239 mov cr2, rbx
240
241 mov eax, VMX_VMCS_HOST_RSP
242 vmwrite rax, rsp
243 ;/* Note: assumes success... */
244 ;/* Don't mess with ESP anymore!! */
245
246 ;/* Restore Guest's general purpose registers. */
247 mov rax, qword [rsi + CPUMCTX.eax]
248 mov rbx, qword [rsi + CPUMCTX.ebx]
249 mov rcx, qword [rsi + CPUMCTX.ecx]
250 mov rdx, qword [rsi + CPUMCTX.edx]
251 mov rbp, qword [rsi + CPUMCTX.ebp]
252 mov r8, qword [rsi + CPUMCTX.r8]
253 mov r9, qword [rsi + CPUMCTX.r9]
254 mov r10, qword [rsi + CPUMCTX.r10]
255 mov r11, qword [rsi + CPUMCTX.r11]
256 mov r12, qword [rsi + CPUMCTX.r12]
257 mov r13, qword [rsi + CPUMCTX.r13]
258 mov r14, qword [rsi + CPUMCTX.r14]
259 mov r15, qword [rsi + CPUMCTX.r15]
260
261 ;/* Restore rdi & rsi. */
262 mov rdi, qword [rsi + CPUMCTX.edi]
263 mov rsi, qword [rsi + CPUMCTX.esi]
264
265 vmlaunch
266 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
267
268ALIGNCODE(16)
269.vmlaunch64_done:
270 jc near .vmstart64_invalid_vmxon_ptr
271 jz near .vmstart64_start_failed
272
273 push rdi
274 mov rdi, [rsp + 8] ; pCtx
275
276 mov qword [rdi + CPUMCTX.eax], rax
277 mov qword [rdi + CPUMCTX.ebx], rbx
278 mov qword [rdi + CPUMCTX.ecx], rcx
279 mov qword [rdi + CPUMCTX.edx], rdx
280 mov qword [rdi + CPUMCTX.esi], rsi
281 mov qword [rdi + CPUMCTX.ebp], rbp
282 mov qword [rdi + CPUMCTX.r8], r8
283 mov qword [rdi + CPUMCTX.r9], r9
284 mov qword [rdi + CPUMCTX.r10], r10
285 mov qword [rdi + CPUMCTX.r11], r11
286 mov qword [rdi + CPUMCTX.r12], r12
287 mov qword [rdi + CPUMCTX.r13], r13
288 mov qword [rdi + CPUMCTX.r14], r14
289 mov qword [rdi + CPUMCTX.r15], r15
290
291 pop rax ; the guest edi we pushed above
292 mov qword [rdi + CPUMCTX.edi], rax
293
294 pop rsi ; pCtx (needed in rsi by the macros below)
295
296 ;; @todo use the automatic load feature for MSRs
297 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
298
299%ifdef VMX_USE_CACHED_VMCS_ACCESSES
300 pop rdi ; saved pCache
301
302%ifdef DEBUG
303 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
304 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
305%endif
306
307 mov ecx, [rdi + VMCSCACHE.Read.cValidEntries]
308 cmp ecx, 0 ; can't happen
309 je .no_cached_reads
310 jmp .cached_read
311
312ALIGN(16)
313.cached_read:
314 dec xCX
315 mov eax, [rdi + VMCSCACHE.Read.aField + xCX*4]
316 vmread qword [rdi + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
317 cmp xCX, 0
318 jnz .cached_read
319.no_cached_reads:
320
321 ; Save CR2 for EPT
322 mov rax, cr2
323 mov [rdi + VMCSCACHE.cr2], rax
324%endif
325
326 ; Restore segment registers
327 MYPOPSEGS rax
328
329 mov eax, VINF_SUCCESS
330
331.vmstart64_end:
332
333%ifdef VMX_USE_CACHED_VMCS_ACCESSES
334%ifdef DEBUG
335 mov rdx, [rsp] ; pVMCSPhys
336 mov [rdi + VMCSCACHE.TestOut.pVMCSPhys], rdx
337%endif
338%endif
339
340 ; Write back the data and disable the VMCS
341 vmclear qword [rsp] ;Pushed pVMCS
342 add rsp, 8
343
344.vmstart64_vmoff_end:
345 ; Disable VMX root mode
346 vmxoff
347.vmstart64_vmxon_failed:
348%ifdef VMX_USE_CACHED_VMCS_ACCESSES
349%ifdef DEBUG
350 cmp eax, VINF_SUCCESS
351 jne .skip_flags_save
352
353 pushf
354 pop rdx
355 mov [rdi + VMCSCACHE.TestOut.eflags], rdx
356.skip_flags_save:
357%endif
358%endif
359 pop rbp
360 ret
361
362
363.vmstart64_invalid_vmxon_ptr:
364 pop rsi ; pCtx (needed in rsi by the macros below)
365
366%ifdef VMX_USE_CACHED_VMCS_ACCESSES
367 pop rdi ; pCache
368
369%ifdef DEBUG
370 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
371 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
372%endif
373
374%endif
375
376 ; Restore segment registers
377 MYPOPSEGS rax
378
379 ; Restore all general purpose host registers.
380 mov eax, VERR_VMX_INVALID_VMXON_PTR
381 jmp .vmstart64_end
382
383.vmstart64_start_failed:
384 pop rsi ; pCtx (needed in rsi by the macros below)
385
386%ifdef VMX_USE_CACHED_VMCS_ACCESSES
387 pop rdi ; pCache
388
389%ifdef DEBUG
390 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
391 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
392%endif
393
394%endif
395
396 ; Restore segment registers
397 MYPOPSEGS rax
398
399 ; Restore all general purpose host registers.
400 mov eax, VERR_VMX_UNABLE_TO_START_VM
401 jmp .vmstart64_end
402ENDPROC VMXGCStartVM64
403
404
405;/**
406; * Prepares for and executes VMRUN (64 bits guests)
407; *
408; * @returns VBox status code
409; * @param HCPhysVMCB Physical address of host VMCB (rsp+8)
410; * @param HCPhysVMCB Physical address of guest VMCB (rsp+16)
411; * @param pCtx Guest context (rsi)
412; */
413BEGINPROC SVMGCVMRun64
414 push rbp
415 mov rbp, rsp
416 pushf
417
418 ;/* Manual save and restore:
419 ; * - General purpose registers except RIP, RSP, RAX
420 ; *
421 ; * Trashed:
422 ; * - CR2 (we don't care)
423 ; * - LDTR (reset to 0)
424 ; * - DRx (presumably not changed at all)
425 ; * - DR7 (reset to 0x400)
426 ; */
427
428 ;/* Save the Guest CPU context pointer. */
429 push rsi ; push for saving the state at the end
430
431 ; save host fs, gs, sysenter msr etc
432 mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address)
433 push rax ; save for the vmload after vmrun
434 vmsave
435
436 ; setup eax for VMLOAD
437 mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address)
438
439 ;/* Restore Guest's general purpose registers. */
440 ;/* RAX is loaded from the VMCB by VMRUN */
441 mov rbx, qword [rsi + CPUMCTX.ebx]
442 mov rcx, qword [rsi + CPUMCTX.ecx]
443 mov rdx, qword [rsi + CPUMCTX.edx]
444 mov rdi, qword [rsi + CPUMCTX.edi]
445 mov rbp, qword [rsi + CPUMCTX.ebp]
446 mov r8, qword [rsi + CPUMCTX.r8]
447 mov r9, qword [rsi + CPUMCTX.r9]
448 mov r10, qword [rsi + CPUMCTX.r10]
449 mov r11, qword [rsi + CPUMCTX.r11]
450 mov r12, qword [rsi + CPUMCTX.r12]
451 mov r13, qword [rsi + CPUMCTX.r13]
452 mov r14, qword [rsi + CPUMCTX.r14]
453 mov r15, qword [rsi + CPUMCTX.r15]
454 mov rsi, qword [rsi + CPUMCTX.esi]
455
456 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
457 clgi
458 sti
459
460 ; load guest fs, gs, sysenter msr etc
461 vmload
462 ; run the VM
463 vmrun
464
465 ;/* RAX is in the VMCB already; we can use it here. */
466
467 ; save guest fs, gs, sysenter msr etc
468 vmsave
469
470 ; load host fs, gs, sysenter msr etc
471 pop rax ; pushed above
472 vmload
473
474 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
475 cli
476 stgi
477
478 pop rax ; pCtx
479
480 mov qword [rax + CPUMCTX.ebx], rbx
481 mov qword [rax + CPUMCTX.ecx], rcx
482 mov qword [rax + CPUMCTX.edx], rdx
483 mov qword [rax + CPUMCTX.esi], rsi
484 mov qword [rax + CPUMCTX.edi], rdi
485 mov qword [rax + CPUMCTX.ebp], rbp
486 mov qword [rax + CPUMCTX.r8], r8
487 mov qword [rax + CPUMCTX.r9], r9
488 mov qword [rax + CPUMCTX.r10], r10
489 mov qword [rax + CPUMCTX.r11], r11
490 mov qword [rax + CPUMCTX.r12], r12
491 mov qword [rax + CPUMCTX.r13], r13
492 mov qword [rax + CPUMCTX.r14], r14
493 mov qword [rax + CPUMCTX.r15], r15
494
495 mov eax, VINF_SUCCESS
496
497 popf
498 pop rbp
499 ret
500ENDPROC SVMGCVMRun64
501
502;/**
503; * Saves the guest FPU context
504; *
505; * @returns VBox status code
506; * @param pCtx Guest context [rsi]
507; */
508BEGINPROC HWACCMSaveGuestFPU64
509 mov rax, cr0
510 mov rcx, rax ; save old CR0
511 and rax, ~(X86_CR0_TS | X86_CR0_EM)
512 mov cr0, rax
513
514 fxsave [rsi + CPUMCTX.fpu]
515
516 mov cr0, rcx ; and restore old CR0 again
517
518 mov eax, VINF_SUCCESS
519 ret
520ENDPROC HWACCMSaveGuestFPU64
521
522;/**
523; * Saves the guest debug context (DR0-3, DR6)
524; *
525; * @returns VBox status code
526; * @param pCtx Guest context [rsi]
527; */
528BEGINPROC HWACCMSaveGuestDebug64
529 mov rax, dr0
530 mov qword [rsi + CPUMCTX.dr + 0*8], rax
531 mov rax, dr1
532 mov qword [rsi + CPUMCTX.dr + 1*8], rax
533 mov rax, dr2
534 mov qword [rsi + CPUMCTX.dr + 2*8], rax
535 mov rax, dr3
536 mov qword [rsi + CPUMCTX.dr + 3*8], rax
537 mov rax, dr6
538 mov qword [rsi + CPUMCTX.dr + 6*8], rax
539 mov eax, VINF_SUCCESS
540 ret
541ENDPROC HWACCMSaveGuestDebug64
542
543;/**
544; * Dummy callback handler
545; *
546; * @returns VBox status code
547; * @param param1 Parameter 1 [rsp+8]
548; * @param param2 Parameter 2 [rsp+12]
549; * @param param3 Parameter 3 [rsp+16]
550; * @param param4 Parameter 4 [rsp+20]
551; * @param param5 Parameter 5 [rsp+24]
552; * @param pCtx Guest context [rsi]
553; */
554BEGINPROC HWACCMTestSwitcher64
555 mov eax, [rsp+8]
556 ret
557ENDPROC HWACCMTestSwitcher64
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette