VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMGC/HWACCMGCA.asm@ 15576

Last change on this file since 15576 was 15576, checked in by vboxsync, 16 years ago

More paranoid checks

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.9 KB
Line 
1; $Id: HWACCMGCA.asm 15576 2008-12-16 12:01:10Z vboxsync $
2;; @file
3; VMXM - GC vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%undef RT_ARCH_X86
26%define RT_ARCH_AMD64
27%include "VBox/asmdefs.mac"
28%include "VBox/err.mac"
29%include "VBox/hwacc_vmx.mac"
30%include "VBox/cpum.mac"
31%include "VBox/x86.mac"
32%include "../HWACCMInternal.mac"
33
34%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
35 %macro vmwrite 2,
36 int3
37 %endmacro
38 %define vmlaunch int3
39 %define vmresume int3
40 %define vmsave int3
41 %define vmload int3
42 %define vmrun int3
43 %define clgi int3
44 %define stgi int3
45 %macro invlpga 2,
46 int3
47 %endmacro
48%endif
49
50;; @def MYPUSHSEGS
51; Macro saving all segment registers on the stack.
52; @param 1 full width register name
53
54;; @def MYPOPSEGS
55; Macro restoring all segment registers on the stack
56; @param 1 full width register name
57
58 ; Load the corresponding guest MSR (trashes rdx & rcx)
59 %macro LOADGUESTMSR 2
60 mov rcx, %1
61 mov edx, dword [rsi + %2 + 4]
62 mov eax, dword [rsi + %2]
63 wrmsr
64 %endmacro
65
66 ; Save a guest MSR (trashes rdx & rcx)
67 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
68 %macro SAVEGUESTMSR 2
69 mov rcx, %1
70 rdmsr
71 mov dword [rsi + %2], eax
72 mov dword [rsi + %2 + 4], edx
73 %endmacro
74
75 %macro MYPUSHSEGS 1
76 mov %1, es
77 push %1
78 mov %1, ds
79 push %1
80 %endmacro
81
82 %macro MYPOPSEGS 1
83 pop %1
84 mov ds, %1
85 pop %1
86 mov es, %1
87 %endmacro
88
89BEGINCODE
90BITS 64
91
92
93;/**
94; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
95; *
96; * @returns VBox status code
97; * @param pPageCpuPhys VMXON physical address [rsp+8]
98; * @param pVMCSPhys VMCS physical address [rsp+16]
99; * @param pCache VMCS cache [rsp+24]
100; * @param pCtx Guest context (rsi)
101; */
102BEGINPROC VMXGCStartVM64
103 push rbp
104 mov rbp, rsp
105
106 ; Make sure VT-x instructions are allowed
107 mov rax, cr4
108 or rax, X86_CR4_VMXE
109 mov cr4, rax
110
111 ;/* Enter VMX Root Mode */
112 vmxon [rbp + 8 + 8]
113 jnc .vmxon_success
114 mov rax, VERR_VMX_INVALID_VMXON_PTR
115 jmp .vmstart64_vmxon_failed
116
117.vmxon_success:
118 ; Activate the VMCS pointer
119 vmptrld [rbp + 16 + 8]
120 jnc .vmptrld_success
121 mov rax, VERR_VMX_INVALID_VMCS_PTR
122 jmp .vmstart64_vmoff_end
123
124.vmptrld_success:
125
126 ; Save the VMCS pointer on the stack
127 push qword [rbp + 16 + 8];
128
129 ;/* Save segment registers */
130 MYPUSHSEGS rax
131
132%ifdef VMX_USE_CACHED_VMCS_ACCESSES
133 ; Flush the VMCS write cache first (before any other vmreads/vmwrites!)
134 mov rbx, [rbp + 24 + 8] ; pCache
135
136%ifdef DEBUG
137 mov rax, [rbp + 8 + 8] ; pPageCpuPhys
138 mov [rbx + VMCSCACHE.TestIn.pPageCpuPhys], rax
139 mov rax, [rbp + 16 + 8] ; pVMCSPhys
140 mov [rbx + VMCSCACHE.TestIn.pVMCSPhys], rax
141 mov [rbx + VMCSCACHE.TestIn.pCache], rbx
142 mov [rbx + VMCSCACHE.TestIn.pCtx], rsi
143%endif
144
145 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
146 cmp ecx, 0
147 je .no_cached_writes
148 mov edx, ecx
149 mov ecx, 0
150 jmp .cached_write
151
152ALIGN(16)
153.cached_write:
154 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
155 vmwrite xAX, qword [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
156 inc xCX
157 cmp xCX, xDX
158 jl .cached_write
159
160 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
161.no_cached_writes:
162
163 ; Save the pCache pointer
164 push xBX
165%endif
166
167 ; Save the host state that's relevant in the temporary 64 bits mode
168 mov rdx, cr0
169 mov eax, VMX_VMCS_HOST_CR0
170 vmwrite rax, rdx
171
172 mov rdx, cr3
173 mov eax, VMX_VMCS_HOST_CR3
174 vmwrite rax, rdx
175
176 mov rdx, cr4
177 mov eax, VMX_VMCS_HOST_CR4
178 vmwrite rax, rdx
179
180 mov rdx, cs
181 mov eax, VMX_VMCS_HOST_FIELD_CS
182 vmwrite rax, rdx
183
184 mov rdx, ss
185 mov eax, VMX_VMCS_HOST_FIELD_SS
186 vmwrite rax, rdx
187
188 sub rsp, 8*2
189 sgdt [rsp]
190 mov eax, VMX_VMCS_HOST_GDTR_BASE
191 vmwrite rax, [rsp+2]
192 add rsp, 8*2
193
194 ; hopefully we can ignore TR (we restore it anyway on the way back to 32 bits mode)
195
196 ;/* First we have to save some final CPU context registers. */
197 lea rdx, [.vmlaunch64_done wrt rip]
198 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
199 vmwrite rax, rdx
200 ;/* Note: assumes success... */
201
202 ;/* Manual save and restore:
203 ; * - General purpose registers except RIP, RSP
204 ; *
205 ; * Trashed:
206 ; * - CR2 (we don't care)
207 ; * - LDTR (reset to 0)
208 ; * - DRx (presumably not changed at all)
209 ; * - DR7 (reset to 0x400)
210 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
211 ; *
212 ; */
213
214 ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
215 ;; @todo use the automatic load feature for MSRs
216 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
217%if 0 ; not supported on Intel CPUs
218 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
219%endif
220 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
221 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
222 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
223
224 ; Save the pCtx pointer
225 push rsi
226
227 ; Restore CR2
228 mov rbx, qword [rsi + CPUMCTX.cr2]
229 mov cr2, rbx
230
231 mov eax, VMX_VMCS_HOST_RSP
232 vmwrite rax, rsp
233 ;/* Note: assumes success... */
234 ;/* Don't mess with ESP anymore!! */
235
236 ;/* Restore Guest's general purpose registers. */
237 mov rax, qword [rsi + CPUMCTX.eax]
238 mov rbx, qword [rsi + CPUMCTX.ebx]
239 mov rcx, qword [rsi + CPUMCTX.ecx]
240 mov rdx, qword [rsi + CPUMCTX.edx]
241 mov rbp, qword [rsi + CPUMCTX.ebp]
242 mov r8, qword [rsi + CPUMCTX.r8]
243 mov r9, qword [rsi + CPUMCTX.r9]
244 mov r10, qword [rsi + CPUMCTX.r10]
245 mov r11, qword [rsi + CPUMCTX.r11]
246 mov r12, qword [rsi + CPUMCTX.r12]
247 mov r13, qword [rsi + CPUMCTX.r13]
248 mov r14, qword [rsi + CPUMCTX.r14]
249 mov r15, qword [rsi + CPUMCTX.r15]
250
251 ;/* Restore rdi & rsi. */
252 mov rdi, qword [rsi + CPUMCTX.edi]
253 mov rsi, qword [rsi + CPUMCTX.esi]
254
255 vmlaunch
256 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
257
258ALIGNCODE(16)
259.vmlaunch64_done:
260 jc near .vmstart64_invalid_vmxon_ptr
261 jz near .vmstart64_start_failed
262
263 push rdi
264 mov rdi, [rsp + 8] ; pCtx
265
266 mov qword [rdi + CPUMCTX.eax], rax
267 mov qword [rdi + CPUMCTX.ebx], rbx
268 mov qword [rdi + CPUMCTX.ecx], rcx
269 mov qword [rdi + CPUMCTX.edx], rdx
270 mov qword [rdi + CPUMCTX.esi], rsi
271 mov qword [rdi + CPUMCTX.ebp], rbp
272 mov qword [rdi + CPUMCTX.r8], r8
273 mov qword [rdi + CPUMCTX.r9], r9
274 mov qword [rdi + CPUMCTX.r10], r10
275 mov qword [rdi + CPUMCTX.r11], r11
276 mov qword [rdi + CPUMCTX.r12], r12
277 mov qword [rdi + CPUMCTX.r13], r13
278 mov qword [rdi + CPUMCTX.r14], r14
279 mov qword [rdi + CPUMCTX.r15], r15
280
281 pop rax ; the guest edi we pushed above
282 mov qword [rdi + CPUMCTX.edi], rax
283
284 pop rsi ; pCtx (needed in rsi by the macros below)
285
286 ;; @todo use the automatic load feature for MSRs
287 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
288
289%ifdef VMX_USE_CACHED_VMCS_ACCESSES
290 pop rdi ; saved pCache
291
292%ifdef DEBUG
293 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
294 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
295%endif
296
297 mov ecx, [rdi + VMCSCACHE.Read.cValidEntries]
298 cmp ecx, 0 ; can't happen
299 je .no_cached_reads
300 jmp .cached_read
301
302ALIGN(16)
303.cached_read:
304 dec xCX
305 mov eax, [rdi + VMCSCACHE.Read.aField + xCX*4]
306 vmread qword [rdi + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
307 cmp xCX, 0
308 jnz .cached_read
309.no_cached_reads:
310
311 ; Save CR2 for EPT
312 mov rax, cr2
313 mov [rdi + VMCSCACHE.cr2], rax
314%endif
315
316 ; Restore segment registers
317 MYPOPSEGS rax
318
319 mov eax, VINF_SUCCESS
320
321.vmstart64_end:
322
323%ifdef VMX_USE_CACHED_VMCS_ACCESSES
324%ifdef DEBUG
325 mov rdx, [rsp] ; pVMCSPhys
326 mov [rdi + VMCSCACHE.TestOut.pVMCSPhys], rdx
327%endif
328%endif
329
330 ; Write back the data and disable the VMCS
331 vmclear qword [rsp] ;Pushed pVMCS
332 add rsp, 8
333
334.vmstart64_vmoff_end:
335 ; Disable VMX root mode
336 vmxoff
337.vmstart64_vmxon_failed:
338%ifdef VMX_USE_CACHED_VMCS_ACCESSES
339%ifdef DEBUG
340 cmp eax, VINF_SUCCESS
341 jne .skip_flags_save
342
343 pushf
344 pop rdx
345 mov [rdi + VMCSCACHE.TestOut.eflags], rdx
346.skip_flags_save:
347%endif
348%endif
349 pop rbp
350 ret
351
352
353.vmstart64_invalid_vmxon_ptr:
354 pop rsi ; pCtx (needed in rsi by the macros below)
355
356%ifdef VMX_USE_CACHED_VMCS_ACCESSES
357 pop rdi ; pCache
358
359%ifdef DEBUG
360 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
361 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
362%endif
363
364%endif
365
366 ; Restore segment registers
367 MYPOPSEGS rax
368
369 ; Restore all general purpose host registers.
370 mov eax, VERR_VMX_INVALID_VMXON_PTR
371 jmp .vmstart64_end
372
373.vmstart64_start_failed:
374 pop rsi ; pCtx (needed in rsi by the macros below)
375
376%ifdef VMX_USE_CACHED_VMCS_ACCESSES
377 pop rdi ; pCache
378
379%ifdef DEBUG
380 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
381 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
382%endif
383
384%endif
385
386 ; Restore segment registers
387 MYPOPSEGS rax
388
389 ; Restore all general purpose host registers.
390 mov eax, VERR_VMX_UNABLE_TO_START_VM
391 jmp .vmstart64_end
392ENDPROC VMXGCStartVM64
393
394
395;/**
396; * Prepares for and executes VMRUN (64 bits guests)
397; *
398; * @returns VBox status code
399; * @param HCPhysVMCB Physical address of host VMCB (rsp+8)
400; * @param HCPhysVMCB Physical address of guest VMCB (rsp+16)
401; * @param pCtx Guest context (rsi)
402; */
403BEGINPROC SVMGCVMRun64
404 push rbp
405 mov rbp, rsp
406 pushf
407
408 ;/* Manual save and restore:
409 ; * - General purpose registers except RIP, RSP, RAX
410 ; *
411 ; * Trashed:
412 ; * - CR2 (we don't care)
413 ; * - LDTR (reset to 0)
414 ; * - DRx (presumably not changed at all)
415 ; * - DR7 (reset to 0x400)
416 ; */
417
418 ;/* Save the Guest CPU context pointer. */
419 push rsi ; push for saving the state at the end
420
421 ; save host fs, gs, sysenter msr etc
422 mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address)
423 push rax ; save for the vmload after vmrun
424 vmsave
425
426 ; setup eax for VMLOAD
427 mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address)
428
429 ;/* Restore Guest's general purpose registers. */
430 ;/* RAX is loaded from the VMCB by VMRUN */
431 mov rbx, qword [rsi + CPUMCTX.ebx]
432 mov rcx, qword [rsi + CPUMCTX.ecx]
433 mov rdx, qword [rsi + CPUMCTX.edx]
434 mov rdi, qword [rsi + CPUMCTX.edi]
435 mov rbp, qword [rsi + CPUMCTX.ebp]
436 mov r8, qword [rsi + CPUMCTX.r8]
437 mov r9, qword [rsi + CPUMCTX.r9]
438 mov r10, qword [rsi + CPUMCTX.r10]
439 mov r11, qword [rsi + CPUMCTX.r11]
440 mov r12, qword [rsi + CPUMCTX.r12]
441 mov r13, qword [rsi + CPUMCTX.r13]
442 mov r14, qword [rsi + CPUMCTX.r14]
443 mov r15, qword [rsi + CPUMCTX.r15]
444 mov rsi, qword [rsi + CPUMCTX.esi]
445
446 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
447 clgi
448 sti
449
450 ; load guest fs, gs, sysenter msr etc
451 vmload
452 ; run the VM
453 vmrun
454
455 ;/* RAX is in the VMCB already; we can use it here. */
456
457 ; save guest fs, gs, sysenter msr etc
458 vmsave
459
460 ; load host fs, gs, sysenter msr etc
461 pop rax ; pushed above
462 vmload
463
464 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
465 cli
466 stgi
467
468 pop rax ; pCtx
469
470 mov qword [rax + CPUMCTX.ebx], rbx
471 mov qword [rax + CPUMCTX.ecx], rcx
472 mov qword [rax + CPUMCTX.edx], rdx
473 mov qword [rax + CPUMCTX.esi], rsi
474 mov qword [rax + CPUMCTX.edi], rdi
475 mov qword [rax + CPUMCTX.ebp], rbp
476 mov qword [rax + CPUMCTX.r8], r8
477 mov qword [rax + CPUMCTX.r9], r9
478 mov qword [rax + CPUMCTX.r10], r10
479 mov qword [rax + CPUMCTX.r11], r11
480 mov qword [rax + CPUMCTX.r12], r12
481 mov qword [rax + CPUMCTX.r13], r13
482 mov qword [rax + CPUMCTX.r14], r14
483 mov qword [rax + CPUMCTX.r15], r15
484
485 mov eax, VINF_SUCCESS
486
487 popf
488 pop rbp
489 ret
490ENDPROC SVMGCVMRun64
491
492;/**
493; * Saves the guest FPU context
494; *
495; * @returns VBox status code
496; * @param pCtx Guest context [rsi]
497; */
498BEGINPROC HWACCMSaveGuestFPU64
499 mov rax, cr0
500 mov rcx, rax ; save old CR0
501 and rax, ~(X86_CR0_TS | X86_CR0_EM)
502 mov cr0, rax
503
504 fxsave [rsi + CPUMCTX.fpu]
505
506 mov cr0, rcx ; and restore old CR0 again
507
508 mov eax, VINF_SUCCESS
509 ret
510ENDPROC HWACCMSaveGuestFPU64
511
512;/**
513; * Saves the guest debug context (DR0-3, DR6)
514; *
515; * @returns VBox status code
516; * @param pCtx Guest context [rsi]
517; */
518BEGINPROC HWACCMSaveGuestDebug64
519 mov rax, dr0
520 mov qword [rsi + CPUMCTX.dr + 0*8], rax
521 mov rax, dr1
522 mov qword [rsi + CPUMCTX.dr + 1*8], rax
523 mov rax, dr2
524 mov qword [rsi + CPUMCTX.dr + 2*8], rax
525 mov rax, dr3
526 mov qword [rsi + CPUMCTX.dr + 3*8], rax
527 mov rax, dr6
528 mov qword [rsi + CPUMCTX.dr + 6*8], rax
529 mov eax, VINF_SUCCESS
530 ret
531ENDPROC HWACCMSaveGuestDebug64
532
533;/**
534; * Dummy callback handler
535; *
536; * @returns VBox status code
537; * @param param1 Parameter 1 [rsp+8]
538; * @param param2 Parameter 2 [rsp+12]
539; * @param param3 Parameter 3 [rsp+16]
540; * @param param4 Parameter 4 [rsp+20]
541; * @param param5 Parameter 5 [rsp+24]
542; * @param pCtx Guest context [rsi]
543; */
544BEGINPROC HWACCMTestSwitcher64
545 mov eax, [rsp+8]
546 ret
547ENDPROC HWACCMTestSwitcher64
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette