VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac@ 55715

Last change on this file since 55715 was 55290, checked in by vboxsync, 10 years ago

HM: Save/Load/Restore XCR0 handling during world switching. Implemented XSETBV for VT-x.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 26.9 KB
Line 
1; $Id: HMR0Mixed.mac 55290 2015-04-15 15:04:30Z vboxsync $
2;; @file
3; HM - Ring-0 Host 32/64, Guest 32/64 world-switch routines
4;
5; Darwin uses this to build two versions in the hybrid case.
6; Included by HMR0A.asm with RT_ARCH_AMD64 defined or undefined.
7;
8
9;
10; Copyright (C) 2006-2013 Oracle Corporation
11;
12; This file is part of VirtualBox Open Source Edition (OSE), as
13; available from http://www.virtualbox.org. This file is free software;
14; you can redistribute it and/or modify it under the terms of the GNU
15; General Public License (GPL) as published by the Free Software
16; Foundation, in version 2 as it comes in the "COPYING" file of the
17; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
18; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
19;
20
21
22;;
23; Keep these macro definitions in this file as it gets included and compiled
24; with RT_ARCH_AMD64 once and RT_ARCH_X86 once.
25%undef VMX_SKIP_GDTR
26%undef VMX_SKIP_IDTR
27%undef VMX_SKIP_TR
28
29%ifdef RT_ARCH_AMD64
30 %define VMX_SKIP_GDTR
31 %ifdef RT_OS_DARWIN
32 ; Darwin (Mavericks) uses IDTR limit to store the CPU Id so we need to restore it always.
33 ; See @bugref{6875}.
34 %else
35 %define VMX_SKIP_IDTR
36 %endif
37 %define VMX_SKIP_TR
38%endif
39
40;; @def RESTORE_STATE_VM32
41; Macro restoring essential host state and updating guest state
42; for common host, 32-bit guest for VT-x.
43%macro RESTORE_STATE_VM32 0
44 ; Restore base and limit of the IDTR & GDTR.
45 %ifndef VMX_SKIP_IDTR
46 lidt [xSP]
47 add xSP, xCB * 2
48 %endif
49 %ifndef VMX_SKIP_GDTR
50 lgdt [xSP]
51 add xSP, xCB * 2
52 %endif
53
54 push xDI
55 %ifndef VMX_SKIP_TR
56 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR).
57 %else
58 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR).
59 %endif
60
61 mov [ss:xDI + CPUMCTX.eax], eax
62 mov [ss:xDI + CPUMCTX.ebx], ebx
63 mov [ss:xDI + CPUMCTX.ecx], ecx
64 mov [ss:xDI + CPUMCTX.edx], edx
65 mov [ss:xDI + CPUMCTX.esi], esi
66 mov [ss:xDI + CPUMCTX.ebp], ebp
67 mov xAX, cr2
68 mov [ss:xDI + CPUMCTX.cr2], xAX
69
70 %ifdef RT_ARCH_AMD64
71 pop xAX ; The guest edi we pushed above.
72 mov dword [ss:xDI + CPUMCTX.edi], eax
73 %else
74 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
75 %endif
76
77 %ifndef VMX_SKIP_TR
78 ; Restore TSS selector; must mark it as not busy before using ltr (!)
79 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
80 ; @todo get rid of sgdt
81 pop xBX ; Saved TR
82 sub xSP, xCB * 2
83 sgdt [xSP]
84 mov xAX, xBX
85 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
86 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
87 and dword [ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
88 ltr bx
89 add xSP, xCB * 2
90 %endif
91
92 pop xAX ; Saved LDTR
93 %ifdef RT_ARCH_AMD64
94 cmp eax, 0
95 je %%skip_ldt_write32
96 %endif
97 lldt ax
98
99%%skip_ldt_write32:
100 add xSP, xCB ; pCtx
101
102 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
103 pop xDX ; Saved pCache
104
105 ; Note! If we get here as a result of invalid VMCS pointer, all the following
106 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
107 ; trouble only just less efficient.
108 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
109 cmp ecx, 0 ; Can't happen
110 je %%no_cached_read32
111 jmp %%cached_read32
112
113ALIGN(16)
114%%cached_read32:
115 dec xCX
116 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX * 4]
117 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
118 cmp xCX, 0
119 jnz %%cached_read32
120%%no_cached_read32:
121 %endif
122
123 ; Restore segment registers.
124 MYPOPSEGS xAX, ax
125
126 ; Restore the host XCR0 if necessary.
127 pop xCX
128 test ecx, ecx
129 jnz %%xcr0_after_skip
130 pop xAX
131 pop xDX
132 xsetbv ; ecx is already zero.
133%%xcr0_after_skip:
134
135 ; Restore general purpose registers.
136 MYPOPAD
137%endmacro
138
139
140;;
141; Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
142;
143; @returns VBox status code
144; @param fResume x86:[ebp+8], msc:rcx,gcc:rdi Whether to use vmlauch/vmresume.
145; @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Pointer to the guest-CPU context.
146; @param pCache x86:[ebp+10],msc:r8, gcc:rdx Pointer to the VMCS cache.
147; @param pVM x86:[ebp+14],msc:r9, gcc:rcx Pointer to the cross context VM structure.
148; @param pVCpu x86:[ebp+18],msc:[ebp+30],gcc:r8 Pointer to the cross context VMCPU structure.
149;
150ALIGNCODE(16)
151BEGINPROC MY_NAME(VMXR0StartVM32)
152 push xBP
153 mov xBP, xSP
154
155 pushf
156 cli
157
158 ;
159 ; Save all general purpose host registers.
160 ;
161 MYPUSHAD
162
163 ;
164 ; First we have to write some final guest CPU context registers.
165 ;
166 mov eax, VMX_VMCS_HOST_RIP
167%ifdef RT_ARCH_AMD64
168 lea r10, [.vmlaunch_done wrt rip]
169 vmwrite rax, r10
170%else
171 mov ecx, .vmlaunch_done
172 vmwrite eax, ecx
173%endif
174 ; Note: assumes success!
175
176 ;
177 ; Unify input parameter registers.
178 ;
179%ifdef RT_ARCH_AMD64
180 %ifdef ASM_CALL64_GCC
181 ; fResume already in rdi
182 ; pCtx already in rsi
183 mov rbx, rdx ; pCache
184 %else
185 mov rdi, rcx ; fResume
186 mov rsi, rdx ; pCtx
187 mov rbx, r8 ; pCache
188 %endif
189%else
190 mov edi, [ebp + 8] ; fResume
191 mov esi, [ebp + 12] ; pCtx
192 mov ebx, [ebp + 16] ; pCache
193%endif
194
195 ;
196 ; Save the host XCR0 and load the guest one if necessary.
197 ; Note! Trashes rdx and rcx.
198 ;
199%ifdef ASM_CALL64_MSC
200 mov rax, [xBP + 30h] ; pVCpu
201%elifdef ASM_CALL64_GCC
202 mov rax, r8 ; pVCpu
203%else
204 mov eax, [xBP + 18h] ; pVCpu
205%endif
206 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
207 jz .xcr0_before_skip
208
209 xor ecx, ecx
210 xgetbv ; Save the host one on the stack.
211 push xDX
212 push xAX
213
214 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
215 mov edx, [xSI + CPUMCTX.aXcr + 4]
216 xor ecx, ecx ; paranoia
217 xsetbv
218
219 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
220 jmp .xcr0_before_done
221
222.xcr0_before_skip:
223 push 3fh ; indicate that we need not.
224.xcr0_before_done:
225
226 ;
227 ; Save segment registers.
228 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).
229 ;
230 MYPUSHSEGS xAX, ax
231
232%ifdef VMX_USE_CACHED_VMCS_ACCESSES
233 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
234 cmp ecx, 0
235 je .no_cached_writes
236 mov edx, ecx
237 mov ecx, 0
238 jmp .cached_write
239
240ALIGN(16)
241.cached_write:
242 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
243 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
244 inc xCX
245 cmp xCX, xDX
246 jl .cached_write
247
248 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
249.no_cached_writes:
250
251 ; Save the pCache pointer.
252 push xBX
253%endif
254
255 ; Save the pCtx pointer.
256 push xSI
257
258 ; Save host LDTR.
259 xor eax, eax
260 sldt ax
261 push xAX
262
263%ifndef VMX_SKIP_TR
264 ; The host TR limit is reset to 0x67; save & restore it manually.
265 str eax
266 push xAX
267%endif
268
269%ifndef VMX_SKIP_GDTR
270 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
271 sub xSP, xCB * 2
272 sgdt [xSP]
273%endif
274%ifndef VMX_SKIP_IDTR
275 sub xSP, xCB * 2
276 sidt [xSP]
277%endif
278
279 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
280 mov xBX, [xSI + CPUMCTX.cr2]
281 mov xDX, cr2
282 cmp xBX, xDX
283 je .skip_cr2_write32
284 mov cr2, xBX
285
286.skip_cr2_write32:
287 mov eax, VMX_VMCS_HOST_RSP
288 vmwrite xAX, xSP
289 ; Note: assumes success!
290 ; Don't mess with ESP anymore!!!
291
292 ; Load guest general purpose registers.
293 mov eax, [xSI + CPUMCTX.eax]
294 mov ebx, [xSI + CPUMCTX.ebx]
295 mov ecx, [xSI + CPUMCTX.ecx]
296 mov edx, [xSI + CPUMCTX.edx]
297 mov ebp, [xSI + CPUMCTX.ebp]
298
299 ; Resume or start VM?
300 cmp xDI, 0 ; fResume
301 je .vmlaunch_launch
302
303 ; Load guest edi & esi.
304 mov edi, [xSI + CPUMCTX.edi]
305 mov esi, [xSI + CPUMCTX.esi]
306
307 vmresume
308 jmp .vmlaunch_done; ; Here if vmresume detected a failure.
309
310.vmlaunch_launch:
311 ; Save guest edi & esi.
312 mov edi, [xSI + CPUMCTX.edi]
313 mov esi, [xSI + CPUMCTX.esi]
314
315 vmlaunch
316 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
317
318ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
319.vmlaunch_done:
320 jc near .vmxstart_invalid_vmcs_ptr
321 jz near .vmxstart_start_failed
322
323 RESTORE_STATE_VM32
324 mov eax, VINF_SUCCESS
325
326.vmstart_end:
327 popf
328 pop xBP
329 ret
330
331.vmxstart_invalid_vmcs_ptr:
332 RESTORE_STATE_VM32
333 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
334 jmp .vmstart_end
335
336.vmxstart_start_failed:
337 RESTORE_STATE_VM32
338 mov eax, VERR_VMX_UNABLE_TO_START_VM
339 jmp .vmstart_end
340
341ENDPROC MY_NAME(VMXR0StartVM32)
342
343
344%ifdef RT_ARCH_AMD64
345;; @def RESTORE_STATE_VM64
346; Macro restoring essential host state and updating guest state
347; for 64-bit host, 64-bit guest for VT-x.
348;
349%macro RESTORE_STATE_VM64 0
350 ; Restore base and limit of the IDTR & GDTR
351 %ifndef VMX_SKIP_IDTR
352 lidt [xSP]
353 add xSP, xCB * 2
354 %endif
355 %ifndef VMX_SKIP_GDTR
356 lgdt [xSP]
357 add xSP, xCB * 2
358 %endif
359
360 push xDI
361 %ifndef VMX_SKIP_TR
362 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)
363 %else
364 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)
365 %endif
366
367 mov qword [xDI + CPUMCTX.eax], rax
368 mov qword [xDI + CPUMCTX.ebx], rbx
369 mov qword [xDI + CPUMCTX.ecx], rcx
370 mov qword [xDI + CPUMCTX.edx], rdx
371 mov qword [xDI + CPUMCTX.esi], rsi
372 mov qword [xDI + CPUMCTX.ebp], rbp
373 mov qword [xDI + CPUMCTX.r8], r8
374 mov qword [xDI + CPUMCTX.r9], r9
375 mov qword [xDI + CPUMCTX.r10], r10
376 mov qword [xDI + CPUMCTX.r11], r11
377 mov qword [xDI + CPUMCTX.r12], r12
378 mov qword [xDI + CPUMCTX.r13], r13
379 mov qword [xDI + CPUMCTX.r14], r14
380 mov qword [xDI + CPUMCTX.r15], r15
381 mov rax, cr2
382 mov qword [xDI + CPUMCTX.cr2], rax
383
384 pop xAX ; The guest rdi we pushed above
385 mov qword [xDI + CPUMCTX.edi], rax
386
387 %ifndef VMX_SKIP_TR
388 ; Restore TSS selector; must mark it as not busy before using ltr (!)
389 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
390 ; @todo get rid of sgdt
391 pop xBX ; Saved TR
392 sub xSP, xCB * 2
393 sgdt [xSP]
394 mov xAX, xBX
395 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
396 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
397 and dword [xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
398 ltr bx
399 add xSP, xCB * 2
400 %endif
401
402 pop xAX ; Saved LDTR
403 cmp eax, 0
404 je %%skip_ldt_write64
405 lldt ax
406
407%%skip_ldt_write64:
408 pop xSI ; pCtx (needed in rsi by the macros below)
409
410 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
411 pop xDX ; Saved pCache
412
413 ; Note! If we get here as a result of invalid VMCS pointer, all the following
414 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
415 ; trouble only just less efficient.
416 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
417 cmp ecx, 0 ; Can't happen
418 je %%no_cached_read64
419 jmp %%cached_read64
420
421ALIGN(16)
422%%cached_read64:
423 dec xCX
424 mov eax, [xDX + VMCSCACHE.Read.aField + xCX * 4]
425 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
426 cmp xCX, 0
427 jnz %%cached_read64
428%%no_cached_read64:
429 %endif
430
431 ; Restore segment registers.
432 MYPOPSEGS xAX, ax
433
434 ; Restore the host XCR0 if necessary.
435 pop xCX
436 test ecx, ecx
437 jnz %%xcr0_after_skip
438 pop xAX
439 pop xDX
440 xsetbv ; ecx is already zero.
441%%xcr0_after_skip:
442
443 ; Restore general purpose registers.
444 MYPOPAD
445%endmacro
446
447
448;;
449; Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
450;
451; @returns VBox status code
452; @param fResume msc:rcx, gcc:rdi Whether to use vmlauch/vmresume.
453; @param pCtx msc:rdx, gcc:rsi Pointer to the guest-CPU context.
454; @param pCache msc:r8, gcc:rdx Pointer to the VMCS cache.
455; @param pVM msc:r9, gcc:rcx Pointer to the cross context VM structure.
456; @param pVCpu msc:[ebp+30], gcc:r8 Pointer to the cross context VMCPU structure.
457;
458ALIGNCODE(16)
459BEGINPROC MY_NAME(VMXR0StartVM64)
460 push xBP
461 mov xBP, xSP
462
463 pushf
464 cli
465
466 ; Save all general purpose host registers.
467 MYPUSHAD
468
469 ; First we have to save some final CPU context registers.
470 lea r10, [.vmlaunch64_done wrt rip]
471 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
472 vmwrite rax, r10
473 ; Note: assumes success!
474
475 ;
476 ; Unify the input parameter registers.
477 ;
478%ifdef ASM_CALL64_GCC
479 ; fResume already in rdi
480 ; pCtx already in rsi
481 mov rbx, rdx ; pCache
482%else
483 mov rdi, rcx ; fResume
484 mov rsi, rdx ; pCtx
485 mov rbx, r8 ; pCache
486%endif
487
488 ;
489 ; Save the host XCR0 and load the guest one if necessary.
490 ; Note! Trashes rdx and rcx.
491 ;
492%ifdef ASM_CALL64_MSC
493 mov rax, [xBP + 30h] ; pVCpu
494%else
495 mov rax, r8 ; pVCpu
496%endif
497 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
498 jz .xcr0_before_skip
499
500 xor ecx, ecx
501 xgetbv ; Save the host one on the stack.
502 push xDX
503 push xAX
504
505 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
506 mov edx, [xSI + CPUMCTX.aXcr + 4]
507 xor ecx, ecx ; paranoia
508 xsetbv
509
510 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
511 jmp .xcr0_before_done
512
513.xcr0_before_skip:
514 push 3fh ; indicate that we need not.
515.xcr0_before_done:
516
517 ;
518 ; Save segment registers.
519 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).
520 ;
521 MYPUSHSEGS xAX, ax
522
523%ifdef VMX_USE_CACHED_VMCS_ACCESSES
524 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
525 cmp ecx, 0
526 je .no_cached_writes
527 mov edx, ecx
528 mov ecx, 0
529 jmp .cached_write
530
531ALIGN(16)
532.cached_write:
533 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
534 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
535 inc xCX
536 cmp xCX, xDX
537 jl .cached_write
538
539 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
540.no_cached_writes:
541
542 ; Save the pCache pointer.
543 push xBX
544%endif
545
546 ; Save the pCtx pointer.
547 push xSI
548
549 ; Save host LDTR.
550 xor eax, eax
551 sldt ax
552 push xAX
553
554%ifndef VMX_SKIP_TR
555 ; The host TR limit is reset to 0x67; save & restore it manually.
556 str eax
557 push xAX
558%endif
559
560%ifndef VMX_SKIP_GDTR
561 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
562 sub xSP, xCB * 2
563 sgdt [xSP]
564%endif
565%ifndef VMX_SKIP_IDTR
566 sub xSP, xCB * 2
567 sidt [xSP]
568%endif
569
570 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
571 mov rbx, qword [xSI + CPUMCTX.cr2]
572 mov rdx, cr2
573 cmp rbx, rdx
574 je .skip_cr2_write
575 mov cr2, rbx
576
577.skip_cr2_write:
578 mov eax, VMX_VMCS_HOST_RSP
579 vmwrite xAX, xSP
580 ; Note: assumes success!
581 ; Don't mess with ESP anymore!!!
582
583 ; Load guest general purpose registers.
584 mov rax, qword [xSI + CPUMCTX.eax]
585 mov rbx, qword [xSI + CPUMCTX.ebx]
586 mov rcx, qword [xSI + CPUMCTX.ecx]
587 mov rdx, qword [xSI + CPUMCTX.edx]
588 mov rbp, qword [xSI + CPUMCTX.ebp]
589 mov r8, qword [xSI + CPUMCTX.r8]
590 mov r9, qword [xSI + CPUMCTX.r9]
591 mov r10, qword [xSI + CPUMCTX.r10]
592 mov r11, qword [xSI + CPUMCTX.r11]
593 mov r12, qword [xSI + CPUMCTX.r12]
594 mov r13, qword [xSI + CPUMCTX.r13]
595 mov r14, qword [xSI + CPUMCTX.r14]
596 mov r15, qword [xSI + CPUMCTX.r15]
597
598 ; Resume or start VM?
599 cmp xDI, 0 ; fResume
600 je .vmlaunch64_launch
601
602 ; Load guest rdi & rsi.
603 mov rdi, qword [xSI + CPUMCTX.edi]
604 mov rsi, qword [xSI + CPUMCTX.esi]
605
606 vmresume
607 jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
608
609.vmlaunch64_launch:
610 ; Save guest rdi & rsi.
611 mov rdi, qword [xSI + CPUMCTX.edi]
612 mov rsi, qword [xSI + CPUMCTX.esi]
613
614 vmlaunch
615 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
616
617ALIGNCODE(16)
618.vmlaunch64_done:
619 jc near .vmxstart64_invalid_vmcs_ptr
620 jz near .vmxstart64_start_failed
621
622 RESTORE_STATE_VM64
623 mov eax, VINF_SUCCESS
624
625.vmstart64_end:
626 popf
627 pop xBP
628 ret
629
630.vmxstart64_invalid_vmcs_ptr:
631 RESTORE_STATE_VM64
632 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
633 jmp .vmstart64_end
634
635.vmxstart64_start_failed:
636 RESTORE_STATE_VM64
637 mov eax, VERR_VMX_UNABLE_TO_START_VM
638 jmp .vmstart64_end
639ENDPROC MY_NAME(VMXR0StartVM64)
640%endif ; RT_ARCH_AMD64
641
642
643;;
644; Prepares for and executes VMRUN (32 bits guests)
645;
646; @returns VBox status code
647; @param HCPhysVMCB Physical address of host VMCB.
648; @param HCPhysVMCB Physical address of guest VMCB.
649; @param pCtx Pointer to the guest CPU-context.
650; @param pVM msc:r9, gcc:rcx Pointer to the cross context VM structure.
651; @param pVCpu msc:[rsp+28],gcc:r8 Pointer to the cross context VMCPU structure.
652;
653ALIGNCODE(16)
654BEGINPROC MY_NAME(SVMR0VMRun)
655%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
656 %ifdef ASM_CALL64_GCC
657 push r8
658 push rcx
659 push rdx
660 push rsi
661 push rdi
662 %else
663 mov rax, [rsp + 28h]
664 push rax ; pVCpu
665 push r9 ; pVM
666 push r8 ; pCtx
667 push rdx ; HCPHYSGuestVMCB
668 push rcx ; HCPhysHostVMCB
669 %endif
670 push 0
671%endif
672 push xBP
673 mov xBP, xSP
674 pushf
675
676 ;
677 ; Save all general purpose host registers.
678 ;
679 MYPUSHAD
680
681 ;
682 ; Load pCtx into xSI.
683 ;
684 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
685
686 ;
687 ; Save the host XCR0 and load the guest one if necessary.
688 ;
689 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB * 2 + xCB * 2] ; pVCpu
690 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
691 jz .xcr0_before_skip
692
693 xor ecx, ecx
694 xgetbv ; Save the host one on the stack.
695 push xDX
696 push xAX
697
698 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
699 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
700 mov edx, [xSI + CPUMCTX.aXcr + 4]
701 xor ecx, ecx ; paranoia
702 xsetbv
703
704 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
705 jmp .xcr0_before_done
706
707.xcr0_before_skip:
708 push 3fh ; indicate that we need not.
709.xcr0_before_done:
710
711 ;
712 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.
713 ;
714 push xSI
715
716 ; Save host fs, gs, sysenter msr etc.
717 mov xAX, [xBP + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
718 push xAX ; save for the vmload after vmrun
719 vmsave
720
721 ; Setup eax for VMLOAD.
722 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
723
724 ; Load guest general purpose registers.
725 ; eax is loaded from the VMCB by VMRUN.
726 mov ebx, [xSI + CPUMCTX.ebx]
727 mov ecx, [xSI + CPUMCTX.ecx]
728 mov edx, [xSI + CPUMCTX.edx]
729 mov edi, [xSI + CPUMCTX.edi]
730 mov ebp, [xSI + CPUMCTX.ebp]
731 mov esi, [xSI + CPUMCTX.esi]
732
733 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
734 clgi
735 sti
736
737 ; Load guest fs, gs, sysenter msr etc.
738 vmload
739 ; Run the VM.
740 vmrun
741
742 ; eax is in the VMCB already; we can use it here.
743
744 ; Save guest fs, gs, sysenter msr etc.
745 vmsave
746
747 ; Load host fs, gs, sysenter msr etc.
748 pop xAX ; Pushed above
749 vmload
750
751 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
752 cli
753 stgi
754
755 ;
756 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).
757 ;
758 pop xAX
759
760 mov [ss:xAX + CPUMCTX.ebx], ebx
761 mov [ss:xAX + CPUMCTX.ecx], ecx
762 mov [ss:xAX + CPUMCTX.edx], edx
763 mov [ss:xAX + CPUMCTX.esi], esi
764 mov [ss:xAX + CPUMCTX.edi], edi
765 mov [ss:xAX + CPUMCTX.ebp], ebp
766
767 ;
768 ; Restore the host xcr0 if necessary.
769 ;
770 pop xCX
771 test ecx, ecx
772 jnz .xcr0_after_skip
773 pop xAX
774 pop xDX
775 xsetbv ; ecx is already zero.
776.xcr0_after_skip:
777
778 ;
779 ; Restore host general purpose registers.
780 ;
781 MYPOPAD
782
783 mov eax, VINF_SUCCESS
784
785 popf
786 pop xBP
787%ifdef RT_ARCH_AMD64
788 add xSP, 6*xCB
789%endif
790 ret
791ENDPROC MY_NAME(SVMR0VMRun)
792
793%ifdef RT_ARCH_AMD64
794;;
795; Prepares for and executes VMRUN (64 bits guests)
796;
797; @returns VBox status code
798; @param HCPhysVMCB Physical address of host VMCB.
799; @param HCPhysVMCB Physical address of guest VMCB.
800; @param pCtx Pointer to the guest-CPU context.
801; @param pVM msc:r9, gcc:rcx Pointer to the cross context VM structure.
802; @param pVCpu msc:[rsp+28],gcc:r8 Pointer to the cross context VMCPU structure.
803;
804ALIGNCODE(16)
805BEGINPROC MY_NAME(SVMR0VMRun64)
806 ; Fake a cdecl stack frame
807 %ifdef ASM_CALL64_GCC
808 push r8
809 push rcx
810 push rdx
811 push rsi
812 push rdi
813 %else
814 mov rax, [rsp + 28h]
815 push rax ; rbp + 30h pVCpu
816 push r9 ; rbp + 28h pVM
817 push r8 ; rbp + 20h pCtx
818 push rdx ; rbp + 18h HCPHYSGuestVMCB
819 push rcx ; rbp + 10h HCPhysHostVMCB
820 %endif
821 push 0 ; rbp + 08h "fake ret addr"
822 push rbp ; rbp + 00h
823 mov rbp, rsp
824 pushf
825
826 ; Manual save and restore:
827 ; - General purpose registers except RIP, RSP, RAX
828 ;
829 ; Trashed:
830 ; - CR2 (we don't care)
831 ; - LDTR (reset to 0)
832 ; - DRx (presumably not changed at all)
833 ; - DR7 (reset to 0x400)
834 ;
835
836 ;
837 ; Save all general purpose host registers.
838 ;
839 MYPUSHAD
840
841 ;
842 ; Load pCtx into xSI.
843 ;
844 mov xSI, [rbp + xCB * 2 + RTHCPHYS_CB * 2]
845
846 ;
847 ; Save the host XCR0 and load the guest one if necessary.
848 ;
849 mov rax, [xBP + 30h] ; pVCpu
850 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
851 jz .xcr0_before_skip
852
853 xor ecx, ecx
854 xgetbv ; Save the host one on the stack.
855 push xDX
856 push xAX
857
858 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
859 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one.
860 mov edx, [xSI + CPUMCTX.aXcr + 4]
861 xor ecx, ecx ; paranoia
862 xsetbv
863
864 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0).
865 jmp .xcr0_before_done
866
867.xcr0_before_skip:
868 push 3fh ; indicate that we need not.
869.xcr0_before_done:
870
871 ;
872 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards.
873 ;
874 push rsi
875
876 ;
877 ; Save host fs, gs, sysenter msr etc.
878 ;
879 mov rax, [rbp + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
880 push rax ; Save for the vmload after vmrun
881 vmsave
882
883 ; Setup eax for VMLOAD.
884 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
885
886 ; Load guest general purpose registers.
887 ; rax is loaded from the VMCB by VMRUN.
888 mov rbx, qword [xSI + CPUMCTX.ebx]
889 mov rcx, qword [xSI + CPUMCTX.ecx]
890 mov rdx, qword [xSI + CPUMCTX.edx]
891 mov rdi, qword [xSI + CPUMCTX.edi]
892 mov rbp, qword [xSI + CPUMCTX.ebp]
893 mov r8, qword [xSI + CPUMCTX.r8]
894 mov r9, qword [xSI + CPUMCTX.r9]
895 mov r10, qword [xSI + CPUMCTX.r10]
896 mov r11, qword [xSI + CPUMCTX.r11]
897 mov r12, qword [xSI + CPUMCTX.r12]
898 mov r13, qword [xSI + CPUMCTX.r13]
899 mov r14, qword [xSI + CPUMCTX.r14]
900 mov r15, qword [xSI + CPUMCTX.r15]
901 mov rsi, qword [xSI + CPUMCTX.esi]
902
903 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
904 clgi
905 sti
906
907 ; Load guest fs, gs, sysenter msr etc.
908 vmload
909 ; Run the VM.
910 vmrun
911
912 ; rax is in the VMCB already; we can use it here.
913
914 ; Save guest fs, gs, sysenter msr etc.
915 vmsave
916
917 ;
918 ; Load host fs, gs, sysenter msr etc.
919 ;
920 pop rax ; pushed above
921 vmload
922
923 ;
924 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
925 ;
926 cli
927 stgi
928
929 ;
930 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX).
931 ;
932 pop rax
933
934 mov qword [rax + CPUMCTX.ebx], rbx
935 mov qword [rax + CPUMCTX.ecx], rcx
936 mov qword [rax + CPUMCTX.edx], rdx
937 mov qword [rax + CPUMCTX.esi], rsi
938 mov qword [rax + CPUMCTX.edi], rdi
939 mov qword [rax + CPUMCTX.ebp], rbp
940 mov qword [rax + CPUMCTX.r8], r8
941 mov qword [rax + CPUMCTX.r9], r9
942 mov qword [rax + CPUMCTX.r10], r10
943 mov qword [rax + CPUMCTX.r11], r11
944 mov qword [rax + CPUMCTX.r12], r12
945 mov qword [rax + CPUMCTX.r13], r13
946 mov qword [rax + CPUMCTX.r14], r14
947 mov qword [rax + CPUMCTX.r15], r15
948
949 ;
950 ; Restore the host xcr0 if necessary.
951 ;
952 pop xCX
953 test ecx, ecx
954 jnz .xcr0_after_skip
955 pop xAX
956 pop xDX
957 xsetbv ; ecx is already zero.
958.xcr0_after_skip:
959
960 ;
961 ; Restore host general purpose registers.
962 ;
963 MYPOPAD
964
965 mov eax, VINF_SUCCESS
966
967 popf
968 pop rbp
969 add rsp, 6 * xCB
970 ret
971ENDPROC MY_NAME(SVMR0VMRun64)
972%endif ; RT_ARCH_AMD64
973
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette