VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac@ 48580

Last change on this file since 48580 was 48580, checked in by vboxsync, 12 years ago

vmm: tmp disable VMX_SKIP_IDTR for win

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 21.6 KB
Line 
1; $Id: HMR0Mixed.mac 48580 2013-09-20 11:53:05Z vboxsync $
2;; @file
3; HM - Ring-0 Host 32/64, Guest 32/64 world-switch routines
4;
5; Darwin uses this to build two versions in the hybrid case.
6; Included by HMR0A.asm with RT_ARCH_AMD64 defined or undefined.
7;
8
9;
10; Copyright (C) 2006-2013 Oracle Corporation
11;
12; This file is part of VirtualBox Open Source Edition (OSE), as
13; available from http://www.virtualbox.org. This file is free software;
14; you can redistribute it and/or modify it under the terms of the GNU
15; General Public License (GPL) as published by the Free Software
16; Foundation, in version 2 as it comes in the "COPYING" file of the
17; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
18; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
19;
20
21%ifdef RT_ARCH_AMD64
22 ;;
23 ; Keep these macro definitions in this file as it gets included and compiled
24 ; with RT_ARCH_AMD64 once and RT_ARCH_X86 once.
25 %define VMX_SKIP_GDTR
26 %ifndef RT_OS_DARWIN
27 ; Darwin (Mavericks) uses IDTR limit to store the CPUID so we need to restore it always. See @bugref{6875}.
28 %ifndef RT_OS_WINDOWS
29 ; Windows 8.1 RTM experiment.
30 %define VMX_SKIP_IDTR
31 %endif
32 %endif
33 %define VMX_SKIP_TR
34%endif
35
36;; @def RESTORESTATEVM32
37; Macro restoring essential host state and updating guest state
38; for common host, 32-bit guest for VT-x.
39;
40; @param 1 Jump label suffix 1.
41; @param 2 Jump label suffix 2.
42; @param 3 Jump label suffix 3.
43%macro RESTORESTATEVM32 3
44 ; Restore base and limit of the IDTR & GDTR.
45 %ifndef VMX_SKIP_IDTR
46 lidt [xSP]
47 add xSP, xCB * 2
48 %endif
49 %ifndef VMX_SKIP_GDTR
50 lgdt [xSP]
51 add xSP, xCB * 2
52 %endif
53
54 push xDI
55 %ifndef VMX_SKIP_TR
56 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR).
57 %else
58 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR).
59 %endif
60
61 mov [ss:xDI + CPUMCTX.eax], eax
62 mov [ss:xDI + CPUMCTX.ebx], ebx
63 mov [ss:xDI + CPUMCTX.ecx], ecx
64 mov [ss:xDI + CPUMCTX.edx], edx
65 mov [ss:xDI + CPUMCTX.esi], esi
66 mov [ss:xDI + CPUMCTX.ebp], ebp
67 mov xAX, cr2
68 mov [ss:xDI + CPUMCTX.cr2], xAX
69
70 %ifdef RT_ARCH_AMD64
71 pop xAX ; The guest edi we pushed above.
72 mov dword [ss:xDI + CPUMCTX.edi], eax
73 %else
74 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
75 %endif
76
77 %ifndef VMX_SKIP_TR
78 ; Restore TSS selector; must mark it as not busy before using ltr (!)
79 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
80 ; @todo get rid of sgdt
81 pop xBX ; Saved TR
82 sub xSP, xCB * 2
83 sgdt [xSP]
84 mov xAX, xBX
85 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
86 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
87 and dword [ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
88 ltr bx
89 add xSP, xCB * 2
90 %endif
91
92 pop xAX ; Saved LDTR
93 %ifdef RT_ARCH_AMD64
94 cmp eax, 0
95 je .skipldtwrite32%1
96 %endif
97 lldt ax
98
99.skipldtwrite32%1:
100 add xSP, xCB ; pCtx
101
102 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
103 pop xDX ; Saved pCache
104
105 ; Note! If we get here as a result of invalid VMCS pointer, all the following
106 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
107 ; trouble only just less efficient.
108 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
109 cmp ecx, 0 ; Can't happen
110 je .no_cached_read32%2
111 jmp .cached_read32%3
112
113ALIGN(16)
114.cached_read32%3:
115 dec xCX
116 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX * 4]
117 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
118 cmp xCX, 0
119 jnz .cached_read32%3
120.no_cached_read32%2:
121 %endif
122
123 ; Restore segment registers.
124 MYPOPSEGS xAX, ax
125
126 ; Restore general purpose registers.
127 MYPOPAD
128%endmacro
129
130
131;/**
132; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
133; *
134; * @returns VBox status code
135; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi vmlauch/vmresume
136; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Guest context
137; * @param pCache x86:[esp+10],msc:r8, gcc:rdx VMCS cache
138; */
139ALIGNCODE(16)
140BEGINPROC MY_NAME(VMXR0StartVM32)
141 push xBP
142 mov xBP, xSP
143
144 pushf
145 cli
146
147 ; Save all general purpose host registers.
148 MYPUSHAD
149
150 ; First we have to save some final CPU context registers.
151 mov eax, VMX_VMCS_HOST_RIP
152%ifdef RT_ARCH_AMD64
153 lea r10, [.vmlaunch_done wrt rip]
154 vmwrite rax, r10
155%else
156 mov ecx, .vmlaunch_done
157 vmwrite eax, ecx
158%endif
159 ; Note: assumes success!
160
161 ; Save the Guest CPU context pointer.
162%ifdef RT_ARCH_AMD64
163 %ifdef ASM_CALL64_GCC
164 ; fResume already in rdi
165 ; pCtx already in rsi
166 mov rbx, rdx ; pCache
167 %else
168 mov rdi, rcx ; fResume
169 mov rsi, rdx ; pCtx
170 mov rbx, r8 ; pCache
171 %endif
172%else
173 mov edi, [ebp + 8] ; fResume
174 mov esi, [ebp + 12] ; pCtx
175 mov ebx, [ebp + 16] ; pCache
176%endif
177
178 ; Save segment registers.
179 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
180 MYPUSHSEGS xAX, ax
181
182%ifdef VMX_USE_CACHED_VMCS_ACCESSES
183 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
184 cmp ecx, 0
185 je .no_cached_writes
186 mov edx, ecx
187 mov ecx, 0
188 jmp .cached_write
189
190ALIGN(16)
191.cached_write:
192 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
193 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
194 inc xCX
195 cmp xCX, xDX
196 jl .cached_write
197
198 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
199.no_cached_writes:
200
201 ; Save the pCache pointer.
202 push xBX
203%endif
204
205 ; Save the pCtx pointer.
206 push xSI
207
208 ; Save LDTR.
209 xor eax, eax
210 sldt ax
211 push xAX
212
213%ifndef VMX_SKIP_TR
214 ; The TR limit is reset to 0x67; restore it manually.
215 str eax
216 push xAX
217%endif
218
219%ifndef VMX_SKIP_GDTR
220 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
221 sub xSP, xCB * 2
222 sgdt [xSP]
223%endif
224%ifndef VMX_SKIP_IDTR
225 sub xSP, xCB * 2
226 sidt [xSP]
227%endif
228
229 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
230 mov xBX, [xSI + CPUMCTX.cr2]
231 mov xDX, cr2
232 cmp xBX, xDX
233 je .skipcr2write32
234 mov cr2, xBX
235
236.skipcr2write32:
237 mov eax, VMX_VMCS_HOST_RSP
238 vmwrite xAX, xSP
239 ; Note: assumes success!
240 ; Don't mess with ESP anymore!!!
241
242 ; Load Guest's general purpose registers.
243 mov eax, [xSI + CPUMCTX.eax]
244 mov ebx, [xSI + CPUMCTX.ebx]
245 mov ecx, [xSI + CPUMCTX.ecx]
246 mov edx, [xSI + CPUMCTX.edx]
247 mov ebp, [xSI + CPUMCTX.ebp]
248
249 ; Resume or start?
250 cmp xDI, 0 ; fResume
251 je .vmlaunch_launch
252
253 ; Restore edi & esi.
254 mov edi, [xSI + CPUMCTX.edi]
255 mov esi, [xSI + CPUMCTX.esi]
256
257 vmresume
258 jmp .vmlaunch_done; ; Here if vmresume detected a failure.
259
260.vmlaunch_launch:
261 ; Restore edi & esi.
262 mov edi, [xSI + CPUMCTX.edi]
263 mov esi, [xSI + CPUMCTX.esi]
264
265 vmlaunch
266 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
267
268ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
269.vmlaunch_done:
270 jc near .vmxstart_invalid_vmcs_ptr
271 jz near .vmxstart_start_failed
272
273 RESTORESTATEVM32 A, B, C
274 mov eax, VINF_SUCCESS
275
276.vmstart_end:
277 popf
278 pop xBP
279 ret
280
281.vmxstart_invalid_vmcs_ptr:
282 RESTORESTATEVM32 D, E, F
283 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
284 jmp .vmstart_end
285
286.vmxstart_start_failed:
287 RESTORESTATEVM32 G, H, I
288 mov eax, VERR_VMX_UNABLE_TO_START_VM
289 jmp .vmstart_end
290
291ENDPROC MY_NAME(VMXR0StartVM32)
292
293
294%ifdef RT_ARCH_AMD64
295;; @def RESTORESTATEVM64
296; Macro restoring essential host state and updating guest state
297; for 64-bit host, 64-bit guest for VT-x.
298;
299; @param 1 Jump label suffix 1.
300; @param 2 Jump label suffix 2.
301; @param 3 Jump label suffix 3.
302%macro RESTORESTATEVM64 3
303 ; Restore base and limit of the IDTR & GDTR
304 %ifndef VMX_SKIP_IDTR
305 lidt [xSP]
306 add xSP, xCB * 2
307 %endif
308 %ifndef VMX_SKIP_GDTR
309 lgdt [xSP]
310 add xSP, xCB * 2
311 %endif
312
313 push xDI
314 %ifndef VMX_SKIP_TR
315 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)
316 %else
317 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)
318 %endif
319
320 mov qword [xDI + CPUMCTX.eax], rax
321 mov qword [xDI + CPUMCTX.ebx], rbx
322 mov qword [xDI + CPUMCTX.ecx], rcx
323 mov qword [xDI + CPUMCTX.edx], rdx
324 mov qword [xDI + CPUMCTX.esi], rsi
325 mov qword [xDI + CPUMCTX.ebp], rbp
326 mov qword [xDI + CPUMCTX.r8], r8
327 mov qword [xDI + CPUMCTX.r9], r9
328 mov qword [xDI + CPUMCTX.r10], r10
329 mov qword [xDI + CPUMCTX.r11], r11
330 mov qword [xDI + CPUMCTX.r12], r12
331 mov qword [xDI + CPUMCTX.r13], r13
332 mov qword [xDI + CPUMCTX.r14], r14
333 mov qword [xDI + CPUMCTX.r15], r15
334 mov rax, cr2
335 mov qword [xDI + CPUMCTX.cr2], rax
336
337 pop xAX ; The guest edi we pushed above
338 mov qword [xDI + CPUMCTX.edi], rax
339
340 %ifndef VMX_SKIP_TR
341 ; Restore TSS selector; must mark it as not busy before using ltr (!)
342 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
343 ; @todo get rid of sgdt
344 pop xBX ; Saved TR
345 sub xSP, xCB * 2
346 sgdt [xSP]
347 mov xAX, xBX
348 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset.
349 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
350 and dword [xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit).
351 ltr bx
352 add xSP, xCB * 2
353 %endif
354
355 pop xAX ; Saved LDTR
356 cmp eax, 0
357 je .skipldtwrite64%1
358 lldt ax
359
360.skipldtwrite64%1:
361 pop xSI ; pCtx (needed in rsi by the macros below)
362
363 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
364 ; Save the guest MSRs and load the host MSRs.
365 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
366 LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
367 LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
368 LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
369 %endif
370
371 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
372 pop xDX ; Saved pCache
373
374 ; Note! If we get here as a result of invalid VMCS pointer, all the following
375 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
376 ; trouble only just less efficient.
377 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
378 cmp ecx, 0 ; Can't happen
379 je .no_cached_read64%2
380 jmp .cached_read64%3
381
382ALIGN(16)
383.cached_read64%3:
384 dec xCX
385 mov eax, [xDX + VMCSCACHE.Read.aField + xCX * 4]
386 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
387 cmp xCX, 0
388 jnz .cached_read64%3
389.no_cached_read64%2:
390 %endif
391
392 ; Restore segment registers.
393 MYPOPSEGS xAX, ax
394
395 ; Restore general purpose registers.
396 MYPOPAD
397%endmacro
398
399
400;/**
401; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
402; *
403; * @returns VBox status code
404; * @param fResume msc:rcx, gcc:rdi vmlauch/vmresume
405; * @param pCtx msc:rdx, gcc:rsi Guest context
406; * @param pCache msc:r8, gcc:rdx VMCS cache
407; */
408ALIGNCODE(16)
409BEGINPROC MY_NAME(VMXR0StartVM64)
410 push xBP
411 mov xBP, xSP
412
413 pushf
414 cli
415
416 ; Save all general purpose host registers.
417 MYPUSHAD
418
419 ; First we have to save some final CPU context registers.
420 lea r10, [.vmlaunch64_done wrt rip]
421 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
422 vmwrite rax, r10
423 ; Note: assumes success!
424
425 ; Save the Guest CPU context pointer.
426%ifdef ASM_CALL64_GCC
427 ; fResume already in rdi
428 ; pCtx already in rsi
429 mov rbx, rdx ; pCache
430%else
431 mov rdi, rcx ; fResume
432 mov rsi, rdx ; pCtx
433 mov rbx, r8 ; pCache
434%endif
435
436 ; Save segment registers.
437 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
438 MYPUSHSEGS xAX, ax
439
440%ifdef VMX_USE_CACHED_VMCS_ACCESSES
441 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
442 cmp ecx, 0
443 je .no_cached_writes
444 mov edx, ecx
445 mov ecx, 0
446 jmp .cached_write
447
448ALIGN(16)
449.cached_write:
450 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
451 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
452 inc xCX
453 cmp xCX, xDX
454 jl .cached_write
455
456 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
457.no_cached_writes:
458
459 ; Save the pCache pointer.
460 push xBX
461%endif
462
463%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
464 ; Save the host MSRs and load the guest MSRs.
465 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
466 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
467 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
468 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
469%endif
470
471 ; Save the pCtx pointer.
472 push xSI
473
474 ; Save LDTR.
475 xor eax, eax
476 sldt ax
477 push xAX
478
479%ifndef VMX_SKIP_TR
480 ; The TR limit is reset to 0x67; restore it manually.
481 str eax
482 push xAX
483%endif
484
485 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
486%ifndef VMX_SKIP_GDTR
487 sub xSP, xCB * 2
488 sgdt [xSP]
489%endif
490%ifndef VMX_SKIP_IDTR
491 sub xSP, xCB * 2
492 sidt [xSP]
493%endif
494
495 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
496 mov rbx, qword [xSI + CPUMCTX.cr2]
497 mov rdx, cr2
498 cmp rbx, rdx
499 je .skipcr2write
500 mov cr2, rbx
501
502.skipcr2write:
503 mov eax, VMX_VMCS_HOST_RSP
504 vmwrite xAX, xSP
505 ; Note: assumes success!
506 ; Don't mess with ESP anymore!!!
507
508 ; Restore Guest's general purpose registers.
509 mov rax, qword [xSI + CPUMCTX.eax]
510 mov rbx, qword [xSI + CPUMCTX.ebx]
511 mov rcx, qword [xSI + CPUMCTX.ecx]
512 mov rdx, qword [xSI + CPUMCTX.edx]
513 mov rbp, qword [xSI + CPUMCTX.ebp]
514 mov r8, qword [xSI + CPUMCTX.r8]
515 mov r9, qword [xSI + CPUMCTX.r9]
516 mov r10, qword [xSI + CPUMCTX.r10]
517 mov r11, qword [xSI + CPUMCTX.r11]
518 mov r12, qword [xSI + CPUMCTX.r12]
519 mov r13, qword [xSI + CPUMCTX.r13]
520 mov r14, qword [xSI + CPUMCTX.r14]
521 mov r15, qword [xSI + CPUMCTX.r15]
522
523 ; Resume or start?
524 cmp xDI, 0 ; fResume
525 je .vmlaunch64_launch
526
527 ; Restore edi & esi.
528 mov rdi, qword [xSI + CPUMCTX.edi]
529 mov rsi, qword [xSI + CPUMCTX.esi]
530
531 vmresume
532 jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
533
534.vmlaunch64_launch:
535 ; Restore rdi & rsi.
536 mov rdi, qword [xSI + CPUMCTX.edi]
537 mov rsi, qword [xSI + CPUMCTX.esi]
538
539 vmlaunch
540 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
541
542ALIGNCODE(16)
543.vmlaunch64_done:
544 jc near .vmxstart64_invalid_vmcs_ptr
545 jz near .vmxstart64_start_failed
546
547 RESTORESTATEVM64 a, b, c
548 mov eax, VINF_SUCCESS
549
550.vmstart64_end:
551 popf
552 pop xBP
553 ret
554
555.vmxstart64_invalid_vmcs_ptr:
556 RESTORESTATEVM64 d, e, f
557 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
558 jmp .vmstart64_end
559
560.vmxstart64_start_failed:
561 RESTORESTATEVM64 g, h, i
562 mov eax, VERR_VMX_UNABLE_TO_START_VM
563 jmp .vmstart64_end
564ENDPROC MY_NAME(VMXR0StartVM64)
565%endif ; RT_ARCH_AMD64
566
567
568;/**
569; * Prepares for and executes VMRUN (32 bits guests)
570; *
571; * @returns VBox status code
572; * @param HCPhysVMCB Physical address of host VMCB
573; * @param HCPhysVMCB Physical address of guest VMCB
574; * @param pCtx Guest context
575; */
576ALIGNCODE(16)
577BEGINPROC MY_NAME(SVMR0VMRun)
578%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
579 %ifdef ASM_CALL64_GCC
580 push rdx
581 push rsi
582 push rdi
583 %else
584 push r8
585 push rdx
586 push rcx
587 %endif
588 push 0
589%endif
590 push xBP
591 mov xBP, xSP
592 pushf
593
594 ; Save all general purpose host registers.
595 MYPUSHAD
596
597 ; Save the Guest CPU context pointer.
598 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
599 push xSI ; push for saving the state at the end
600
601 ; Save host fs, gs, sysenter msr etc.
602 mov xAX, [xBP + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
603 push xAX ; save for the vmload after vmrun
604 vmsave
605
606 ; Setup eax for VMLOAD.
607 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
608
609 ; Restore Guest's general purpose registers.
610 ; eax is loaded from the VMCB by VMRUN.
611 mov ebx, [xSI + CPUMCTX.ebx]
612 mov ecx, [xSI + CPUMCTX.ecx]
613 mov edx, [xSI + CPUMCTX.edx]
614 mov edi, [xSI + CPUMCTX.edi]
615 mov ebp, [xSI + CPUMCTX.ebp]
616 mov esi, [xSI + CPUMCTX.esi]
617
618 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
619 clgi
620 sti
621
622 ; Load guest fs, gs, sysenter msr etc.
623 vmload
624 ; Run the VM.
625 vmrun
626
627 ; eax is in the VMCB already; we can use it here.
628
629 ; Save guest fs, gs, sysenter msr etc.
630 vmsave
631
632 ; Load host fs, gs, sysenter msr etc.
633 pop xAX ; Pushed above
634 vmload
635
636 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
637 cli
638 stgi
639
640 pop xAX ; pCtx
641
642 mov [ss:xAX + CPUMCTX.ebx], ebx
643 mov [ss:xAX + CPUMCTX.ecx], ecx
644 mov [ss:xAX + CPUMCTX.edx], edx
645 mov [ss:xAX + CPUMCTX.esi], esi
646 mov [ss:xAX + CPUMCTX.edi], edi
647 mov [ss:xAX + CPUMCTX.ebp], ebp
648
649 ; Restore general purpose registers.
650 MYPOPAD
651
652 mov eax, VINF_SUCCESS
653
654 popf
655 pop xBP
656%ifdef RT_ARCH_AMD64
657 add xSP, 4*xCB
658%endif
659 ret
660ENDPROC MY_NAME(SVMR0VMRun)
661
662%ifdef RT_ARCH_AMD64
663;/**
664; * Prepares for and executes VMRUN (64 bits guests)
665; *
666; * @returns VBox status code
667; * @param HCPhysVMCB Physical address of host VMCB
668; * @param HCPhysVMCB Physical address of guest VMCB
669; * @param pCtx Guest context
670; */
671ALIGNCODE(16)
672BEGINPROC MY_NAME(SVMR0VMRun64)
673 ; Fake a cdecl stack frame
674 %ifdef ASM_CALL64_GCC
675 push rdx
676 push rsi
677 push rdi
678 %else
679 push r8
680 push rdx
681 push rcx
682 %endif
683 push 0
684 push rbp
685 mov rbp, rsp
686 pushf
687
688 ; Manual save and restore:
689 ; - General purpose registers except RIP, RSP, RAX
690 ;
691 ; Trashed:
692 ; - CR2 (we don't care)
693 ; - LDTR (reset to 0)
694 ; - DRx (presumably not changed at all)
695 ; - DR7 (reset to 0x400)
696 ;
697
698 ; Save all general purpose host registers.
699 MYPUSHAD
700
701 ; Save the Guest CPU context pointer.
702 mov rsi, [rbp + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
703 push rsi ; push for saving the state at the end
704
705 ; Save host fs, gs, sysenter msr etc.
706 mov rax, [rbp + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
707 push rax ; Save for the vmload after vmrun
708 vmsave
709
710 ; Setup eax for VMLOAD.
711 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
712
713 ; Restore Guest's general purpose registers.
714 ; rax is loaded from the VMCB by VMRUN.
715 mov rbx, qword [xSI + CPUMCTX.ebx]
716 mov rcx, qword [xSI + CPUMCTX.ecx]
717 mov rdx, qword [xSI + CPUMCTX.edx]
718 mov rdi, qword [xSI + CPUMCTX.edi]
719 mov rbp, qword [xSI + CPUMCTX.ebp]
720 mov r8, qword [xSI + CPUMCTX.r8]
721 mov r9, qword [xSI + CPUMCTX.r9]
722 mov r10, qword [xSI + CPUMCTX.r10]
723 mov r11, qword [xSI + CPUMCTX.r11]
724 mov r12, qword [xSI + CPUMCTX.r12]
725 mov r13, qword [xSI + CPUMCTX.r13]
726 mov r14, qword [xSI + CPUMCTX.r14]
727 mov r15, qword [xSI + CPUMCTX.r15]
728 mov rsi, qword [xSI + CPUMCTX.esi]
729
730 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
731 clgi
732 sti
733
734 ; Load guest fs, gs, sysenter msr etc.
735 vmload
736 ; Run the VM.
737 vmrun
738
739 ; rax is in the VMCB already; we can use it here.
740
741 ; Save guest fs, gs, sysenter msr etc.
742 vmsave
743
744 ; Load host fs, gs, sysenter msr etc.
745 pop rax ; pushed above
746 vmload
747
748 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
749 cli
750 stgi
751
752 pop rax ; pCtx
753
754 mov qword [rax + CPUMCTX.ebx], rbx
755 mov qword [rax + CPUMCTX.ecx], rcx
756 mov qword [rax + CPUMCTX.edx], rdx
757 mov qword [rax + CPUMCTX.esi], rsi
758 mov qword [rax + CPUMCTX.edi], rdi
759 mov qword [rax + CPUMCTX.ebp], rbp
760 mov qword [rax + CPUMCTX.r8], r8
761 mov qword [rax + CPUMCTX.r9], r9
762 mov qword [rax + CPUMCTX.r10], r10
763 mov qword [rax + CPUMCTX.r11], r11
764 mov qword [rax + CPUMCTX.r12], r12
765 mov qword [rax + CPUMCTX.r13], r13
766 mov qword [rax + CPUMCTX.r14], r14
767 mov qword [rax + CPUMCTX.r15], r15
768
769 ; Restore general purpose registers.
770 MYPOPAD
771
772 mov eax, VINF_SUCCESS
773
774 popf
775 pop rbp
776 add rsp, 4 * xCB
777 ret
778ENDPROC MY_NAME(SVMR0VMRun64)
779%endif ; RT_ARCH_AMD64
780
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette