VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/HMRCA.asm@ 45741

Last change on this file since 45741 was 45739, checked in by vboxsync, 12 years ago

VMM: First part of HMIsEnabled() and PGMMap*.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.3 KB
Line 
1; $Id: HMRCA.asm 45739 2013-04-25 19:44:05Z vboxsync $
2;; @file
3; VMXM - GC vmx helpers
4;
5
6;
7; Copyright (C) 2006-2012 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%undef RT_ARCH_X86
22%define RT_ARCH_AMD64
23%include "VBox/asmdefs.mac"
24%include "VBox/err.mac"
25%include "VBox/vmm/hm_vmx.mac"
26%include "VBox/vmm/cpum.mac"
27%include "iprt/x86.mac"
28%include "HMInternal.mac"
29
30%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
31 %macro vmwrite 2,
32 int3
33 %endmacro
34 %define vmlaunch int3
35 %define vmresume int3
36 %define vmsave int3
37 %define vmload int3
38 %define vmrun int3
39 %define clgi int3
40 %define stgi int3
41 %macro invlpga 2,
42 int3
43 %endmacro
44%endif
45
46;; @def MYPUSHSEGS
47; Macro saving all segment registers on the stack.
48; @param 1 full width register name
49
50;; @def MYPOPSEGS
51; Macro restoring all segment registers on the stack
52; @param 1 full width register name
53
54 ; Load the corresponding guest MSR (trashes rdx & rcx)
55 %macro LOADGUESTMSR 2
56 mov rcx, %1
57 mov edx, dword [rsi + %2 + 4]
58 mov eax, dword [rsi + %2]
59 wrmsr
60 %endmacro
61
62 ; Save a guest MSR (trashes rdx & rcx)
63 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
64 %macro SAVEGUESTMSR 2
65 mov rcx, %1
66 rdmsr
67 mov dword [rsi + %2], eax
68 mov dword [rsi + %2 + 4], edx
69 %endmacro
70
71 %macro MYPUSHSEGS 1
72 mov %1, es
73 push %1
74 mov %1, ds
75 push %1
76 %endmacro
77
78 %macro MYPOPSEGS 1
79 pop %1
80 mov ds, %1
81 pop %1
82 mov es, %1
83 %endmacro
84
85BEGINCODE
86BITS 64
87
88
89;/**
90; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
91; *
92; * @returns VBox status code
93; * @param HCPhysCpuPage VMXON physical address [rsp+8]
94; * @param HCPhysVmcs VMCS physical address [rsp+16]
95; * @param pCache VMCS cache [rsp+24]
96; * @param pCtx Guest context (rsi)
97; */
98BEGINPROC VMXGCStartVM64
99 push rbp
100 mov rbp, rsp
101
102 ; Make sure VT-x instructions are allowed
103 mov rax, cr4
104 or rax, X86_CR4_VMXE
105 mov cr4, rax
106
107 ;/* Enter VMX Root Mode */
108 vmxon [rbp + 8 + 8]
109 jnc .vmxon_success
110 mov rax, VERR_VMX_INVALID_VMXON_PTR
111 jmp .vmstart64_vmxon_failed
112
113.vmxon_success:
114 jnz .vmxon_success2
115 mov rax, VERR_VMX_VMXON_FAILED
116 jmp .vmstart64_vmxon_failed
117
118.vmxon_success2:
119 ; Activate the VMCS pointer
120 vmptrld [rbp + 16 + 8]
121 jnc .vmptrld_success
122 mov rax, VERR_VMX_INVALID_VMCS_PTR
123 jmp .vmstart64_vmxoff_end
124
125.vmptrld_success:
126 jnz .vmptrld_success2
127 mov rax, VERR_VMX_VMPTRLD_FAILED
128 jmp .vmstart64_vmxoff_end
129
130.vmptrld_success2:
131
132 ; Save the VMCS pointer on the stack
133 push qword [rbp + 16 + 8];
134
135 ;/* Save segment registers */
136 MYPUSHSEGS rax
137
138%ifdef VMX_USE_CACHED_VMCS_ACCESSES
139 ; Flush the VMCS write cache first (before any other vmreads/vmwrites!)
140 mov rbx, [rbp + 24 + 8] ; pCache
141
142 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
143 mov qword [rbx + VMCSCACHE.uPos], 2
144 %endif
145
146 %ifdef DEBUG
147 mov rax, [rbp + 8 + 8] ; HCPhysCpuPage
148 mov [rbx + VMCSCACHE.TestIn.HCPhysCpuPage], rax
149 mov rax, [rbp + 16 + 8] ; HCPhysVmcs
150 mov [rbx + VMCSCACHE.TestIn.HCPhysVmcs], rax
151 mov [rbx + VMCSCACHE.TestIn.pCache], rbx
152 mov [rbx + VMCSCACHE.TestIn.pCtx], rsi
153 %endif
154
155 mov ecx, [rbx + VMCSCACHE.Write.cValidEntries]
156 cmp ecx, 0
157 je .no_cached_writes
158 mov rdx, rcx
159 mov rcx, 0
160 jmp .cached_write
161
162ALIGN(16)
163.cached_write:
164 mov eax, [rbx + VMCSCACHE.Write.aField + rcx*4]
165 vmwrite rax, qword [rbx + VMCSCACHE.Write.aFieldVal + rcx*8]
166 inc rcx
167 cmp rcx, rdx
168 jl .cached_write
169
170 mov dword [rbx + VMCSCACHE.Write.cValidEntries], 0
171.no_cached_writes:
172
173 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
174 mov qword [rbx + VMCSCACHE.uPos], 3
175 %endif
176 ; Save the pCache pointer
177 push xBX
178%endif
179
180 ; Save the host state that's relevant in the temporary 64 bits mode
181 mov rdx, cr0
182 mov eax, VMX_VMCS_HOST_CR0
183 vmwrite rax, rdx
184
185 mov rdx, cr3
186 mov eax, VMX_VMCS_HOST_CR3
187 vmwrite rax, rdx
188
189 mov rdx, cr4
190 mov eax, VMX_VMCS_HOST_CR4
191 vmwrite rax, rdx
192
193 mov rdx, cs
194 mov eax, VMX_VMCS_HOST_FIELD_CS
195 vmwrite rax, rdx
196
197 mov rdx, ss
198 mov eax, VMX_VMCS_HOST_FIELD_SS
199 vmwrite rax, rdx
200
201 sub rsp, 8*2
202 sgdt [rsp]
203 mov eax, VMX_VMCS_HOST_GDTR_BASE
204 vmwrite rax, [rsp+2]
205 add rsp, 8*2
206
207%ifdef VBOX_WITH_CRASHDUMP_MAGIC
208 mov qword [rbx + VMCSCACHE.uPos], 4
209%endif
210
211 ; hopefully we can ignore TR (we restore it anyway on the way back to 32 bits mode)
212
213 ;/* First we have to save some final CPU context registers. */
214 lea rdx, [.vmlaunch64_done wrt rip]
215 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
216 vmwrite rax, rdx
217 ;/* Note: assumes success... */
218
219 ;/* Manual save and restore:
220 ; * - General purpose registers except RIP, RSP
221 ; *
222 ; * Trashed:
223 ; * - CR2 (we don't care)
224 ; * - LDTR (reset to 0)
225 ; * - DRx (presumably not changed at all)
226 ; * - DR7 (reset to 0x400)
227 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
228 ; *
229 ; */
230
231%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
232 ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
233 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
234 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
235 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
236%endif
237 ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}.
238 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
239
240%ifdef VBOX_WITH_CRASHDUMP_MAGIC
241 mov qword [rbx + VMCSCACHE.uPos], 5
242%endif
243
244 ; Save the pCtx pointer
245 push rsi
246
247 ; Restore CR2
248 mov rbx, qword [rsi + CPUMCTX.cr2]
249 mov rdx, cr2
250 cmp rdx, rbx
251 je .skipcr2write64
252 mov cr2, rbx
253
254.skipcr2write64:
255 mov eax, VMX_VMCS_HOST_RSP
256 vmwrite rax, rsp
257 ;/* Note: assumes success... */
258 ;/* Don't mess with ESP anymore!! */
259
260 ;/* Restore Guest's general purpose registers. */
261 mov rax, qword [rsi + CPUMCTX.eax]
262 mov rbx, qword [rsi + CPUMCTX.ebx]
263 mov rcx, qword [rsi + CPUMCTX.ecx]
264 mov rdx, qword [rsi + CPUMCTX.edx]
265 mov rbp, qword [rsi + CPUMCTX.ebp]
266 mov r8, qword [rsi + CPUMCTX.r8]
267 mov r9, qword [rsi + CPUMCTX.r9]
268 mov r10, qword [rsi + CPUMCTX.r10]
269 mov r11, qword [rsi + CPUMCTX.r11]
270 mov r12, qword [rsi + CPUMCTX.r12]
271 mov r13, qword [rsi + CPUMCTX.r13]
272 mov r14, qword [rsi + CPUMCTX.r14]
273 mov r15, qword [rsi + CPUMCTX.r15]
274
275 ;/* Restore rdi & rsi. */
276 mov rdi, qword [rsi + CPUMCTX.edi]
277 mov rsi, qword [rsi + CPUMCTX.esi]
278
279 vmlaunch
280 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
281
282ALIGNCODE(16)
283.vmlaunch64_done:
284 jc near .vmstart64_invalid_vmxon_ptr
285 jz near .vmstart64_start_failed
286
287 push rdi
288 mov rdi, [rsp + 8] ; pCtx
289
290 mov qword [rdi + CPUMCTX.eax], rax
291 mov qword [rdi + CPUMCTX.ebx], rbx
292 mov qword [rdi + CPUMCTX.ecx], rcx
293 mov qword [rdi + CPUMCTX.edx], rdx
294 mov qword [rdi + CPUMCTX.esi], rsi
295 mov qword [rdi + CPUMCTX.ebp], rbp
296 mov qword [rdi + CPUMCTX.r8], r8
297 mov qword [rdi + CPUMCTX.r9], r9
298 mov qword [rdi + CPUMCTX.r10], r10
299 mov qword [rdi + CPUMCTX.r11], r11
300 mov qword [rdi + CPUMCTX.r12], r12
301 mov qword [rdi + CPUMCTX.r13], r13
302 mov qword [rdi + CPUMCTX.r14], r14
303 mov qword [rdi + CPUMCTX.r15], r15
304%ifndef VBOX_WITH_OLD_VTX_CODE
305 mov rax, cr2
306 mov qword [rdi + CPUMCTX.cr2], rax
307%endif
308
309 pop rax ; the guest edi we pushed above
310 mov qword [rdi + CPUMCTX.edi], rax
311
312 pop rsi ; pCtx (needed in rsi by the macros below)
313
314%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
315 SAVEGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
316 SAVEGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
317 SAVEGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
318%endif
319 ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}.
320 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
321
322%ifdef VMX_USE_CACHED_VMCS_ACCESSES
323 pop rdi ; saved pCache
324
325 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
326 mov dword [rdi + VMCSCACHE.uPos], 7
327 %endif
328 %ifdef DEBUG
329 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
330 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
331 mov rax, cr8
332 mov [rdi + VMCSCACHE.TestOut.cr8], rax
333 %endif
334
335 mov ecx, [rdi + VMCSCACHE.Read.cValidEntries]
336 cmp ecx, 0 ; can't happen
337 je .no_cached_reads
338 jmp .cached_read
339
340ALIGN(16)
341.cached_read:
342 dec rcx
343 mov eax, [rdi + VMCSCACHE.Read.aField + rcx*4]
344 vmread qword [rdi + VMCSCACHE.Read.aFieldVal + rcx*8], rax
345 cmp rcx, 0
346 jnz .cached_read
347.no_cached_reads:
348
349 %ifdef VBOX_WITH_OLD_VTX_CODE
350 ; Save CR2 for EPT
351 mov rax, cr2
352 mov [rdi + VMCSCACHE.cr2], rax
353 %endif
354 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
355 mov dword [rdi + VMCSCACHE.uPos], 8
356 %endif
357%endif
358
359 ; Restore segment registers
360 MYPOPSEGS rax
361
362 mov eax, VINF_SUCCESS
363
364%ifdef VBOX_WITH_CRASHDUMP_MAGIC
365 mov dword [rdi + VMCSCACHE.uPos], 9
366%endif
367.vmstart64_end:
368
369%ifdef VMX_USE_CACHED_VMCS_ACCESSES
370 %ifdef DEBUG
371 mov rdx, [rsp] ; HCPhysVmcs
372 mov [rdi + VMCSCACHE.TestOut.HCPhysVmcs], rdx
373 %endif
374%endif
375
376 ; Write back the data and disable the VMCS
377 vmclear qword [rsp] ;Pushed pVMCS
378 add rsp, 8
379
380.vmstart64_vmxoff_end:
381 ; Disable VMX root mode
382 vmxoff
383.vmstart64_vmxon_failed:
384%ifdef VMX_USE_CACHED_VMCS_ACCESSES
385 %ifdef DEBUG
386 cmp eax, VINF_SUCCESS
387 jne .skip_flags_save
388
389 pushf
390 pop rdx
391 mov [rdi + VMCSCACHE.TestOut.eflags], rdx
392 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
393 mov dword [rdi + VMCSCACHE.uPos], 12
394 %endif
395.skip_flags_save:
396 %endif
397%endif
398 pop rbp
399 ret
400
401
402.vmstart64_invalid_vmxon_ptr:
403 pop rsi ; pCtx (needed in rsi by the macros below)
404
405%ifdef VMX_USE_CACHED_VMCS_ACCESSES
406 pop rdi ; pCache
407 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
408 mov dword [rdi + VMCSCACHE.uPos], 10
409 %endif
410
411 %ifdef DEBUG
412 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
413 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
414 %endif
415%endif
416
417 ; Restore segment registers
418 MYPOPSEGS rax
419
420 ; Restore all general purpose host registers.
421 mov eax, VERR_VMX_INVALID_VMXON_PTR
422 jmp .vmstart64_end
423
424.vmstart64_start_failed:
425 pop rsi ; pCtx (needed in rsi by the macros below)
426
427%ifdef VMX_USE_CACHED_VMCS_ACCESSES
428 pop rdi ; pCache
429
430 %ifdef DEBUG
431 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
432 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
433 %endif
434 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
435 mov dword [rdi + VMCSCACHE.uPos], 11
436 %endif
437%endif
438
439 ; Restore segment registers
440 MYPOPSEGS rax
441
442 ; Restore all general purpose host registers.
443 mov eax, VERR_VMX_UNABLE_TO_START_VM
444 jmp .vmstart64_end
445ENDPROC VMXGCStartVM64
446
447
448;/**
449; * Prepares for and executes VMRUN (64 bits guests)
450; *
451; * @returns VBox status code
452; * @param HCPhysVMCB Physical address of host VMCB (rsp+8)
453; * @param HCPhysVMCB Physical address of guest VMCB (rsp+16)
454; * @param pCtx Guest context (rsi)
455; */
456BEGINPROC SVMGCVMRun64
457 push rbp
458 mov rbp, rsp
459 pushf
460
461 ;/* Manual save and restore:
462 ; * - General purpose registers except RIP, RSP, RAX
463 ; *
464 ; * Trashed:
465 ; * - CR2 (we don't care)
466 ; * - LDTR (reset to 0)
467 ; * - DRx (presumably not changed at all)
468 ; * - DR7 (reset to 0x400)
469 ; */
470
471 ;/* Save the Guest CPU context pointer. */
472 push rsi ; push for saving the state at the end
473
474 ; save host fs, gs, sysenter msr etc
475 mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address)
476 push rax ; save for the vmload after vmrun
477 vmsave
478
479 ; setup eax for VMLOAD
480 mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address)
481
482 ;/* Restore Guest's general purpose registers. */
483 ;/* RAX is loaded from the VMCB by VMRUN */
484 mov rbx, qword [rsi + CPUMCTX.ebx]
485 mov rcx, qword [rsi + CPUMCTX.ecx]
486 mov rdx, qword [rsi + CPUMCTX.edx]
487 mov rdi, qword [rsi + CPUMCTX.edi]
488 mov rbp, qword [rsi + CPUMCTX.ebp]
489 mov r8, qword [rsi + CPUMCTX.r8]
490 mov r9, qword [rsi + CPUMCTX.r9]
491 mov r10, qword [rsi + CPUMCTX.r10]
492 mov r11, qword [rsi + CPUMCTX.r11]
493 mov r12, qword [rsi + CPUMCTX.r12]
494 mov r13, qword [rsi + CPUMCTX.r13]
495 mov r14, qword [rsi + CPUMCTX.r14]
496 mov r15, qword [rsi + CPUMCTX.r15]
497 mov rsi, qword [rsi + CPUMCTX.esi]
498
499 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
500 clgi
501 sti
502
503 ; load guest fs, gs, sysenter msr etc
504 vmload
505 ; run the VM
506 vmrun
507
508 ;/* RAX is in the VMCB already; we can use it here. */
509
510 ; save guest fs, gs, sysenter msr etc
511 vmsave
512
513 ; load host fs, gs, sysenter msr etc
514 pop rax ; pushed above
515 vmload
516
517 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
518 cli
519 stgi
520
521 pop rax ; pCtx
522
523 mov qword [rax + CPUMCTX.ebx], rbx
524 mov qword [rax + CPUMCTX.ecx], rcx
525 mov qword [rax + CPUMCTX.edx], rdx
526 mov qword [rax + CPUMCTX.esi], rsi
527 mov qword [rax + CPUMCTX.edi], rdi
528 mov qword [rax + CPUMCTX.ebp], rbp
529 mov qword [rax + CPUMCTX.r8], r8
530 mov qword [rax + CPUMCTX.r9], r9
531 mov qword [rax + CPUMCTX.r10], r10
532 mov qword [rax + CPUMCTX.r11], r11
533 mov qword [rax + CPUMCTX.r12], r12
534 mov qword [rax + CPUMCTX.r13], r13
535 mov qword [rax + CPUMCTX.r14], r14
536 mov qword [rax + CPUMCTX.r15], r15
537
538 mov eax, VINF_SUCCESS
539
540 popf
541 pop rbp
542 ret
543ENDPROC SVMGCVMRun64
544
545;/**
546; * Saves the guest FPU context
547; *
548; * @returns VBox status code
549; * @param pCtx Guest context [rsi]
550; */
551BEGINPROC HMSaveGuestFPU64
552 mov rax, cr0
553 mov rcx, rax ; save old CR0
554 and rax, ~(X86_CR0_TS | X86_CR0_EM)
555 mov cr0, rax
556
557 fxsave [rsi + CPUMCTX.fpu]
558
559 mov cr0, rcx ; and restore old CR0 again
560
561 mov eax, VINF_SUCCESS
562 ret
563ENDPROC HMSaveGuestFPU64
564
565;/**
566; * Saves the guest debug context (DR0-3, DR6)
567; *
568; * @returns VBox status code
569; * @param pCtx Guest context [rsi]
570; */
571BEGINPROC HMSaveGuestDebug64
572 mov rax, dr0
573 mov qword [rsi + CPUMCTX.dr + 0*8], rax
574 mov rax, dr1
575 mov qword [rsi + CPUMCTX.dr + 1*8], rax
576 mov rax, dr2
577 mov qword [rsi + CPUMCTX.dr + 2*8], rax
578 mov rax, dr3
579 mov qword [rsi + CPUMCTX.dr + 3*8], rax
580 mov rax, dr6
581 mov qword [rsi + CPUMCTX.dr + 6*8], rax
582 mov eax, VINF_SUCCESS
583 ret
584ENDPROC HMSaveGuestDebug64
585
586;/**
587; * Dummy callback handler
588; *
589; * @returns VBox status code
590; * @param param1 Parameter 1 [rsp+8]
591; * @param param2 Parameter 2 [rsp+12]
592; * @param param3 Parameter 3 [rsp+16]
593; * @param param4 Parameter 4 [rsp+20]
594; * @param param5 Parameter 5 [rsp+24]
595; * @param pCtx Guest context [rsi]
596; */
597BEGINPROC HMTestSwitcher64
598 mov eax, [rsp+8]
599 ret
600ENDPROC HMTestSwitcher64
601
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette