VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac@ 49284

Last change on this file since 49284 was 49018, checked in by vboxsync, 11 years ago

VMM/HMR0Mixed.mac: Remove ugly jump label suffix passing when assembler can handle it with '%%'.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 21.8 KB
Line 
1; $Id: HMR0Mixed.mac 49018 2013-10-10 08:19:24Z vboxsync $
2;; @file
3; HM - Ring-0 Host 32/64, Guest 32/64 world-switch routines
4;
5; Darwin uses this to build two versions in the hybrid case.
6; Included by HMR0A.asm with RT_ARCH_AMD64 defined or undefined.
7;
8
9;
10; Copyright (C) 2006-2013 Oracle Corporation
11;
12; This file is part of VirtualBox Open Source Edition (OSE), as
13; available from http://www.virtualbox.org. This file is free software;
14; you can redistribute it and/or modify it under the terms of the GNU
15; General Public License (GPL) as published by the Free Software
16; Foundation, in version 2 as it comes in the "COPYING" file of the
17; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
18; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
19;
20
21%ifdef RT_ARCH_AMD64
22 ;;
23 ; Keep these macro definitions in this file as it gets included and compiled
24 ; with RT_ARCH_AMD64 once and RT_ARCH_X86 once.
25 %define VMX_SKIP_GDTR
26 %ifdef RT_OS_DARWIN
27 ; Darwin (Mavericks) uses IDTR limit to store the CPUID so we need to restore it always.
28 ; See @bugref{6875}.
29 %elifdef RT_OS_WINDOWS
30 ; Windows 8.1 RTM also seems to be using the IDTR limit for something. See @bugref{6956}.
31 ;; @todo figure out what exactly it does and try and restrict it more.
32 %else
33 %define VMX_SKIP_IDTR
34 %endif
35 %define VMX_SKIP_TR
36%endif
37
38;; @def RESTORE_STATE_VM32
39; Macro restoring essential host state and updating guest state
40; for common host, 32-bit guest for VT-x.
41%macro RESTORE_STATE_VM32 0
42 ; Restore base and limit of the IDTR & GDTR.
43 %ifndef VMX_SKIP_IDTR
44 lidt [xSP]
45 add xSP, xCB * 2
46 %endif
47 %ifndef VMX_SKIP_GDTR
48 lgdt [xSP]
49 add xSP, xCB * 2
50 %endif
51
52 push xDI
53 %ifndef VMX_SKIP_TR
54 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR).
55 %else
56 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR).
57 %endif
58
59 mov [ss:xDI + CPUMCTX.eax], eax
60 mov [ss:xDI + CPUMCTX.ebx], ebx
61 mov [ss:xDI + CPUMCTX.ecx], ecx
62 mov [ss:xDI + CPUMCTX.edx], edx
63 mov [ss:xDI + CPUMCTX.esi], esi
64 mov [ss:xDI + CPUMCTX.ebp], ebp
65 mov xAX, cr2
66 mov [ss:xDI + CPUMCTX.cr2], xAX
67
68 %ifdef RT_ARCH_AMD64
69 pop xAX ; The guest edi we pushed above.
70 mov dword [ss:xDI + CPUMCTX.edi], eax
71 %else
72 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
73 %endif
74
75 %ifndef VMX_SKIP_TR
76 ; Restore TSS selector; must mark it as not busy before using ltr (!)
77 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
78 ; @todo get rid of sgdt
79 pop xBX ; Saved TR
80 sub xSP, xCB * 2
81 sgdt [xSP]
82 mov xAX, xBX
83 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
84 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
85 and dword [ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
86 ltr bx
87 add xSP, xCB * 2
88 %endif
89
90 pop xAX ; Saved LDTR
91 %ifdef RT_ARCH_AMD64
92 cmp eax, 0
93 je %%skip_ldt_write32
94 %endif
95 lldt ax
96
97%%skip_ldt_write32:
98 add xSP, xCB ; pCtx
99
100 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
101 pop xDX ; Saved pCache
102
103 ; Note! If we get here as a result of invalid VMCS pointer, all the following
104 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
105 ; trouble only just less efficient.
106 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
107 cmp ecx, 0 ; Can't happen
108 je %%no_cached_read32
109 jmp %%cached_read32
110
111ALIGN(16)
112%%cached_read32:
113 dec xCX
114 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX * 4]
115 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
116 cmp xCX, 0
117 jnz %%cached_read32
118%%no_cached_read32:
119 %endif
120
121 ; Restore segment registers.
122 MYPOPSEGS xAX, ax
123
124 ; Restore general purpose registers.
125 MYPOPAD
126%endmacro
127
128
129;/**
130; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
131; *
132; * @returns VBox status code
133; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi Whether to use vmlauch/vmresume.
134; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Pointer to the guest-CPU context.
135; * @param pCache x86:[esp+10],msc:r8, gcc:rdx Pointer to the VMCS cache.
136; */
137ALIGNCODE(16)
138BEGINPROC MY_NAME(VMXR0StartVM32)
139 push xBP
140 mov xBP, xSP
141
142 pushf
143 cli
144
145 ; Save all general purpose host registers.
146 MYPUSHAD
147
148 ; First we have to save some final CPU context registers.
149 mov eax, VMX_VMCS_HOST_RIP
150%ifdef RT_ARCH_AMD64
151 lea r10, [.vmlaunch_done wrt rip]
152 vmwrite rax, r10
153%else
154 mov ecx, .vmlaunch_done
155 vmwrite eax, ecx
156%endif
157 ; Note: assumes success!
158
159 ; Save guest-CPU context pointer.
160%ifdef RT_ARCH_AMD64
161 %ifdef ASM_CALL64_GCC
162 ; fResume already in rdi
163 ; pCtx already in rsi
164 mov rbx, rdx ; pCache
165 %else
166 mov rdi, rcx ; fResume
167 mov rsi, rdx ; pCtx
168 mov rbx, r8 ; pCache
169 %endif
170%else
171 mov edi, [ebp + 8] ; fResume
172 mov esi, [ebp + 12] ; pCtx
173 mov ebx, [ebp + 16] ; pCache
174%endif
175
176 ; Save segment registers.
177 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
178 MYPUSHSEGS xAX, ax
179
180%ifdef VMX_USE_CACHED_VMCS_ACCESSES
181 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
182 cmp ecx, 0
183 je .no_cached_writes
184 mov edx, ecx
185 mov ecx, 0
186 jmp .cached_write
187
188ALIGN(16)
189.cached_write:
190 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
191 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
192 inc xCX
193 cmp xCX, xDX
194 jl .cached_write
195
196 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
197.no_cached_writes:
198
199 ; Save the pCache pointer.
200 push xBX
201%endif
202
203 ; Save the pCtx pointer.
204 push xSI
205
206 ; Save host LDTR.
207 xor eax, eax
208 sldt ax
209 push xAX
210
211%ifndef VMX_SKIP_TR
212 ; The host TR limit is reset to 0x67; save & restore it manually.
213 str eax
214 push xAX
215%endif
216
217%ifndef VMX_SKIP_GDTR
218 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
219 sub xSP, xCB * 2
220 sgdt [xSP]
221%endif
222%ifndef VMX_SKIP_IDTR
223 sub xSP, xCB * 2
224 sidt [xSP]
225%endif
226
227 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
228 mov xBX, [xSI + CPUMCTX.cr2]
229 mov xDX, cr2
230 cmp xBX, xDX
231 je .skip_cr2_write32
232 mov cr2, xBX
233
234.skip_cr2_write32:
235 mov eax, VMX_VMCS_HOST_RSP
236 vmwrite xAX, xSP
237 ; Note: assumes success!
238 ; Don't mess with ESP anymore!!!
239
240 ; Load guest general purpose registers.
241 mov eax, [xSI + CPUMCTX.eax]
242 mov ebx, [xSI + CPUMCTX.ebx]
243 mov ecx, [xSI + CPUMCTX.ecx]
244 mov edx, [xSI + CPUMCTX.edx]
245 mov ebp, [xSI + CPUMCTX.ebp]
246
247 ; Resume or start VM?
248 cmp xDI, 0 ; fResume
249 je .vmlaunch_launch
250
251 ; Load guest edi & esi.
252 mov edi, [xSI + CPUMCTX.edi]
253 mov esi, [xSI + CPUMCTX.esi]
254
255 vmresume
256 jmp .vmlaunch_done; ; Here if vmresume detected a failure.
257
258.vmlaunch_launch:
259 ; Save guest edi & esi.
260 mov edi, [xSI + CPUMCTX.edi]
261 mov esi, [xSI + CPUMCTX.esi]
262
263 vmlaunch
264 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
265
266ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
267.vmlaunch_done:
268 jc near .vmxstart_invalid_vmcs_ptr
269 jz near .vmxstart_start_failed
270
271 RESTORE_STATE_VM32
272 mov eax, VINF_SUCCESS
273
274.vmstart_end:
275 popf
276 pop xBP
277 ret
278
279.vmxstart_invalid_vmcs_ptr:
280 RESTORE_STATE_VM32
281 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
282 jmp .vmstart_end
283
284.vmxstart_start_failed:
285 RESTORE_STATE_VM32
286 mov eax, VERR_VMX_UNABLE_TO_START_VM
287 jmp .vmstart_end
288
289ENDPROC MY_NAME(VMXR0StartVM32)
290
291
292%ifdef RT_ARCH_AMD64
293;; @def RESTORE_STATE_VM64
294; Macro restoring essential host state and updating guest state
295; for 64-bit host, 64-bit guest for VT-x.
296;
297%macro RESTORE_STATE_VM64 0
298 ; Restore base and limit of the IDTR & GDTR
299 %ifndef VMX_SKIP_IDTR
300 lidt [xSP]
301 add xSP, xCB * 2
302 %endif
303 %ifndef VMX_SKIP_GDTR
304 lgdt [xSP]
305 add xSP, xCB * 2
306 %endif
307
308 push xDI
309 %ifndef VMX_SKIP_TR
310 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)
311 %else
312 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)
313 %endif
314
315 mov qword [xDI + CPUMCTX.eax], rax
316 mov qword [xDI + CPUMCTX.ebx], rbx
317 mov qword [xDI + CPUMCTX.ecx], rcx
318 mov qword [xDI + CPUMCTX.edx], rdx
319 mov qword [xDI + CPUMCTX.esi], rsi
320 mov qword [xDI + CPUMCTX.ebp], rbp
321 mov qword [xDI + CPUMCTX.r8], r8
322 mov qword [xDI + CPUMCTX.r9], r9
323 mov qword [xDI + CPUMCTX.r10], r10
324 mov qword [xDI + CPUMCTX.r11], r11
325 mov qword [xDI + CPUMCTX.r12], r12
326 mov qword [xDI + CPUMCTX.r13], r13
327 mov qword [xDI + CPUMCTX.r14], r14
328 mov qword [xDI + CPUMCTX.r15], r15
329 mov rax, cr2
330 mov qword [xDI + CPUMCTX.cr2], rax
331
332 pop xAX ; The guest rdi we pushed above
333 mov qword [xDI + CPUMCTX.edi], rax
334
335 %ifndef VMX_SKIP_TR
336 ; Restore TSS selector; must mark it as not busy before using ltr (!)
337 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
338 ; @todo get rid of sgdt
339 pop xBX ; Saved TR
340 sub xSP, xCB * 2
341 sgdt [xSP]
342 mov xAX, xBX
343 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
344 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
345 and dword [xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
346 ltr bx
347 add xSP, xCB * 2
348 %endif
349
350 pop xAX ; Saved LDTR
351 cmp eax, 0
352 je %%skip_ldt_write64
353 lldt ax
354
355%%skip_ldt_write64:
356 pop xSI ; pCtx (needed in rsi by the macros below)
357
358 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
359 ; Save the guest MSRs and load the host MSRs.
360 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
361 LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
362 LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
363 LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
364 %endif
365
366 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
367 pop xDX ; Saved pCache
368
369 ; Note! If we get here as a result of invalid VMCS pointer, all the following
370 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
371 ; trouble only just less efficient.
372 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
373 cmp ecx, 0 ; Can't happen
374 je %%no_cached_read64
375 jmp %%cached_read64
376
377ALIGN(16)
378%%cached_read64:
379 dec xCX
380 mov eax, [xDX + VMCSCACHE.Read.aField + xCX * 4]
381 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
382 cmp xCX, 0
383 jnz %%cached_read64
384%%no_cached_read64:
385 %endif
386
387 ; Restore segment registers.
388 MYPOPSEGS xAX, ax
389
390 ; Restore general purpose registers.
391 MYPOPAD
392%endmacro
393
394
395;/**
396; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
397; *
398; * @returns VBox status code
399; * @param fResume msc:rcx, gcc:rdi Whether to use vmlauch/vmresume.
400; * @param pCtx msc:rdx, gcc:rsi Pointer to the guest-CPU context.
401; * @param pCache msc:r8, gcc:rdx Pointer to the VMCS cache.
402; */
403ALIGNCODE(16)
404BEGINPROC MY_NAME(VMXR0StartVM64)
405 push xBP
406 mov xBP, xSP
407
408 pushf
409 cli
410
411 ; Save all general purpose host registers.
412 MYPUSHAD
413
414 ; First we have to save some final CPU context registers.
415 lea r10, [.vmlaunch64_done wrt rip]
416 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
417 vmwrite rax, r10
418 ; Note: assumes success!
419
420 ; Save guest-CPU context pointer.
421%ifdef ASM_CALL64_GCC
422 ; fResume already in rdi
423 ; pCtx already in rsi
424 mov rbx, rdx ; pCache
425%else
426 mov rdi, rcx ; fResume
427 mov rsi, rdx ; pCtx
428 mov rbx, r8 ; pCache
429%endif
430
431 ; Save segment registers.
432 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
433 MYPUSHSEGS xAX, ax
434
435%ifdef VMX_USE_CACHED_VMCS_ACCESSES
436 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
437 cmp ecx, 0
438 je .no_cached_writes
439 mov edx, ecx
440 mov ecx, 0
441 jmp .cached_write
442
443ALIGN(16)
444.cached_write:
445 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
446 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
447 inc xCX
448 cmp xCX, xDX
449 jl .cached_write
450
451 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
452.no_cached_writes:
453
454 ; Save the pCache pointer.
455 push xBX
456%endif
457
458%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
459 ; Save the host MSRs and load the guest MSRs.
460 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
461 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
462 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
463 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
464%endif
465
466 ; Save the pCtx pointer.
467 push xSI
468
469 ; Save host LDTR.
470 xor eax, eax
471 sldt ax
472 push xAX
473
474%ifndef VMX_SKIP_TR
475 ; The host TR limit is reset to 0x67; save & restore it manually.
476 str eax
477 push xAX
478%endif
479
480%ifndef VMX_SKIP_GDTR
481 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
482 sub xSP, xCB * 2
483 sgdt [xSP]
484%endif
485%ifndef VMX_SKIP_IDTR
486 sub xSP, xCB * 2
487 sidt [xSP]
488%endif
489
490 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
491 mov rbx, qword [xSI + CPUMCTX.cr2]
492 mov rdx, cr2
493 cmp rbx, rdx
494 je .skip_cr2_write
495 mov cr2, rbx
496
497.skip_cr2_write:
498 mov eax, VMX_VMCS_HOST_RSP
499 vmwrite xAX, xSP
500 ; Note: assumes success!
501 ; Don't mess with ESP anymore!!!
502
503 ; Load guest general purpose registers.
504 mov rax, qword [xSI + CPUMCTX.eax]
505 mov rbx, qword [xSI + CPUMCTX.ebx]
506 mov rcx, qword [xSI + CPUMCTX.ecx]
507 mov rdx, qword [xSI + CPUMCTX.edx]
508 mov rbp, qword [xSI + CPUMCTX.ebp]
509 mov r8, qword [xSI + CPUMCTX.r8]
510 mov r9, qword [xSI + CPUMCTX.r9]
511 mov r10, qword [xSI + CPUMCTX.r10]
512 mov r11, qword [xSI + CPUMCTX.r11]
513 mov r12, qword [xSI + CPUMCTX.r12]
514 mov r13, qword [xSI + CPUMCTX.r13]
515 mov r14, qword [xSI + CPUMCTX.r14]
516 mov r15, qword [xSI + CPUMCTX.r15]
517
518 ; Resume or start VM?
519 cmp xDI, 0 ; fResume
520 je .vmlaunch64_launch
521
522 ; Load guest rdi & rsi.
523 mov rdi, qword [xSI + CPUMCTX.edi]
524 mov rsi, qword [xSI + CPUMCTX.esi]
525
526 vmresume
527 jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
528
529.vmlaunch64_launch:
530 ; Save guest rdi & rsi.
531 mov rdi, qword [xSI + CPUMCTX.edi]
532 mov rsi, qword [xSI + CPUMCTX.esi]
533
534 vmlaunch
535 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
536
537ALIGNCODE(16)
538.vmlaunch64_done:
539 jc near .vmxstart64_invalid_vmcs_ptr
540 jz near .vmxstart64_start_failed
541
542 RESTORE_STATE_VM64
543 mov eax, VINF_SUCCESS
544
545.vmstart64_end:
546 popf
547 pop xBP
548 ret
549
550.vmxstart64_invalid_vmcs_ptr:
551 RESTORE_STATE_VM64
552 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
553 jmp .vmstart64_end
554
555.vmxstart64_start_failed:
556 RESTORE_STATE_VM64
557 mov eax, VERR_VMX_UNABLE_TO_START_VM
558 jmp .vmstart64_end
559ENDPROC MY_NAME(VMXR0StartVM64)
560%endif ; RT_ARCH_AMD64
561
562
563;/**
564; * Prepares for and executes VMRUN (32 bits guests)
565; *
566; * @returns VBox status code
567; * @param HCPhysVMCB Physical address of host VMCB.
568; * @param HCPhysVMCB Physical address of guest VMCB.
569; * @param pCtx Pointer to the guest CPU-context.
570; */
571ALIGNCODE(16)
572BEGINPROC MY_NAME(SVMR0VMRun)
573%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
574 %ifdef ASM_CALL64_GCC
575 push rdx
576 push rsi
577 push rdi
578 %else
579 push r8
580 push rdx
581 push rcx
582 %endif
583 push 0
584%endif
585 push xBP
586 mov xBP, xSP
587 pushf
588
589 ; Save all general purpose host registers.
590 MYPUSHAD
591
592 ; Save guest CPU-context pointer.
593 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
594 push xSI ; push for saving the state at the end
595
596 ; Save host fs, gs, sysenter msr etc.
597 mov xAX, [xBP + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
598 push xAX ; save for the vmload after vmrun
599 vmsave
600
601 ; Setup eax for VMLOAD.
602 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
603
604 ; Load guest general purpose registers.
605 ; eax is loaded from the VMCB by VMRUN.
606 mov ebx, [xSI + CPUMCTX.ebx]
607 mov ecx, [xSI + CPUMCTX.ecx]
608 mov edx, [xSI + CPUMCTX.edx]
609 mov edi, [xSI + CPUMCTX.edi]
610 mov ebp, [xSI + CPUMCTX.ebp]
611 mov esi, [xSI + CPUMCTX.esi]
612
613 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
614 clgi
615 sti
616
617 ; Load guest fs, gs, sysenter msr etc.
618 vmload
619 ; Run the VM.
620 vmrun
621
622 ; eax is in the VMCB already; we can use it here.
623
624 ; Save guest fs, gs, sysenter msr etc.
625 vmsave
626
627 ; Load host fs, gs, sysenter msr etc.
628 pop xAX ; Pushed above
629 vmload
630
631 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
632 cli
633 stgi
634
635 pop xAX ; pCtx
636
637 mov [ss:xAX + CPUMCTX.ebx], ebx
638 mov [ss:xAX + CPUMCTX.ecx], ecx
639 mov [ss:xAX + CPUMCTX.edx], edx
640 mov [ss:xAX + CPUMCTX.esi], esi
641 mov [ss:xAX + CPUMCTX.edi], edi
642 mov [ss:xAX + CPUMCTX.ebp], ebp
643
644 ; Restore host general purpose registers.
645 MYPOPAD
646
647 mov eax, VINF_SUCCESS
648
649 popf
650 pop xBP
651%ifdef RT_ARCH_AMD64
652 add xSP, 4*xCB
653%endif
654 ret
655ENDPROC MY_NAME(SVMR0VMRun)
656
657%ifdef RT_ARCH_AMD64
658;/**
659; * Prepares for and executes VMRUN (64 bits guests)
660; *
661; * @returns VBox status code
662; * @param HCPhysVMCB Physical address of host VMCB.
663; * @param HCPhysVMCB Physical address of guest VMCB.
664; * @param pCtx Pointer to the guest-CPU context.
665; */
666ALIGNCODE(16)
667BEGINPROC MY_NAME(SVMR0VMRun64)
668 ; Fake a cdecl stack frame
669 %ifdef ASM_CALL64_GCC
670 push rdx
671 push rsi
672 push rdi
673 %else
674 push r8
675 push rdx
676 push rcx
677 %endif
678 push 0
679 push rbp
680 mov rbp, rsp
681 pushf
682
683 ; Manual save and restore:
684 ; - General purpose registers except RIP, RSP, RAX
685 ;
686 ; Trashed:
687 ; - CR2 (we don't care)
688 ; - LDTR (reset to 0)
689 ; - DRx (presumably not changed at all)
690 ; - DR7 (reset to 0x400)
691 ;
692
693 ; Save all general purpose host registers.
694 MYPUSHAD
695
696 ; Save guest CPU-context pointer.
697 mov rsi, [rbp + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
698 push rsi ; push for saving the state at the end
699
700 ; Save host fs, gs, sysenter msr etc.
701 mov rax, [rbp + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
702 push rax ; Save for the vmload after vmrun
703 vmsave
704
705 ; Setup eax for VMLOAD.
706 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
707
708 ; Load guest general purpose registers.
709 ; rax is loaded from the VMCB by VMRUN.
710 mov rbx, qword [xSI + CPUMCTX.ebx]
711 mov rcx, qword [xSI + CPUMCTX.ecx]
712 mov rdx, qword [xSI + CPUMCTX.edx]
713 mov rdi, qword [xSI + CPUMCTX.edi]
714 mov rbp, qword [xSI + CPUMCTX.ebp]
715 mov r8, qword [xSI + CPUMCTX.r8]
716 mov r9, qword [xSI + CPUMCTX.r9]
717 mov r10, qword [xSI + CPUMCTX.r10]
718 mov r11, qword [xSI + CPUMCTX.r11]
719 mov r12, qword [xSI + CPUMCTX.r12]
720 mov r13, qword [xSI + CPUMCTX.r13]
721 mov r14, qword [xSI + CPUMCTX.r14]
722 mov r15, qword [xSI + CPUMCTX.r15]
723 mov rsi, qword [xSI + CPUMCTX.esi]
724
725 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
726 clgi
727 sti
728
729 ; Load guest fs, gs, sysenter msr etc.
730 vmload
731 ; Run the VM.
732 vmrun
733
734 ; rax is in the VMCB already; we can use it here.
735
736 ; Save guest fs, gs, sysenter msr etc.
737 vmsave
738
739 ; Load host fs, gs, sysenter msr etc.
740 pop rax ; pushed above
741 vmload
742
743 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
744 cli
745 stgi
746
747 pop rax ; pCtx
748
749 mov qword [rax + CPUMCTX.ebx], rbx
750 mov qword [rax + CPUMCTX.ecx], rcx
751 mov qword [rax + CPUMCTX.edx], rdx
752 mov qword [rax + CPUMCTX.esi], rsi
753 mov qword [rax + CPUMCTX.edi], rdi
754 mov qword [rax + CPUMCTX.ebp], rbp
755 mov qword [rax + CPUMCTX.r8], r8
756 mov qword [rax + CPUMCTX.r9], r9
757 mov qword [rax + CPUMCTX.r10], r10
758 mov qword [rax + CPUMCTX.r11], r11
759 mov qword [rax + CPUMCTX.r12], r12
760 mov qword [rax + CPUMCTX.r13], r13
761 mov qword [rax + CPUMCTX.r14], r14
762 mov qword [rax + CPUMCTX.r15], r15
763
764 ; Restore host general purpose registers.
765 MYPOPAD
766
767 mov eax, VINF_SUCCESS
768
769 popf
770 pop rbp
771 add rsp, 4 * xCB
772 ret
773ENDPROC MY_NAME(SVMR0VMRun64)
774%endif ; RT_ARCH_AMD64
775
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette