VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac@ 45892

Last change on this file since 45892 was 45875, checked in by vboxsync, 12 years ago

VMM/VMMR0: Distinguish better between invalid VMXON, VMCS pointers and VMCS pointers passed to VMLAUNCH/VMRESUME.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 25.9 KB
Line 
1; $Id: HMR0Mixed.mac 45875 2013-05-02 12:52:33Z vboxsync $
2;; @file
3; HMR0Mixed.mac - Stuff that darwin needs to build two versions of.
4;
5; Included by HMR0A.asm with RT_ARCH_AMD64 defined or or undefined.
6;
7
8;
9; Copyright (C) 2006-2012 Oracle Corporation
10;
11; This file is part of VirtualBox Open Source Edition (OSE), as
12; available from http://www.virtualbox.org. This file is free software;
13; you can redistribute it and/or modify it under the terms of the GNU
14; General Public License (GPL) as published by the Free Software
15; Foundation, in version 2 as it comes in the "COPYING" file of the
16; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18;
19
20
21;/**
22; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
23; *
24; * @returns VBox status code
25; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi vmlauch/vmresume
26; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Guest context
27; * @param pCache x86:[esp+10],msc:r8, gcc:rdx VMCS cache
28; */
29ALIGNCODE(16)
30BEGINPROC MY_NAME(VMXR0StartVM32)
31 push xBP
32 mov xBP, xSP
33
34 pushf
35 cli
36
37 ;/* Save all general purpose host registers. */
38 MYPUSHAD
39
40 ;/* First we have to save some final CPU context registers. */
41 mov eax, VMX_VMCS_HOST_RIP
42%ifdef RT_ARCH_AMD64
43 lea r10, [.vmlaunch_done wrt rip]
44 vmwrite rax, r10
45%else
46 mov ecx, .vmlaunch_done
47 vmwrite eax, ecx
48%endif
49 ;/* Note: assumes success... */
50
51 ;/* Manual save and restore:
52 ; * - General purpose registers except RIP, RSP
53 ; *
54 ; * Trashed:
55 ; * - CR2 (we don't care)
56 ; * - LDTR (reset to 0)
57 ; * - DRx (presumably not changed at all)
58 ; * - DR7 (reset to 0x400)
59 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
60 ; *
61 ; */
62
63 ;/* Save the Guest CPU context pointer. */
64%ifdef RT_ARCH_AMD64
65 %ifdef ASM_CALL64_GCC
66 ; fResume already in rdi
67 ; pCtx already in rsi
68 mov rbx, rdx ; pCache
69 %else
70 mov rdi, rcx ; fResume
71 mov rsi, rdx ; pCtx
72 mov rbx, r8 ; pCache
73 %endif
74%else
75 mov edi, [ebp + 8] ; fResume
76 mov esi, [ebp + 12] ; pCtx
77 mov ebx, [ebp + 16] ; pCache
78%endif
79
80 ;/* Save segment registers */
81 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
82 MYPUSHSEGS xAX, ax
83
84%ifdef VMX_USE_CACHED_VMCS_ACCESSES
85 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
86 cmp ecx, 0
87 je .no_cached_writes
88 mov edx, ecx
89 mov ecx, 0
90 jmp .cached_write
91
92ALIGN(16)
93.cached_write:
94 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
95 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
96 inc xCX
97 cmp xCX, xDX
98 jl .cached_write
99
100 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
101.no_cached_writes:
102
103 ; Save the pCache pointer
104 push xBX
105%endif
106
107 ; Save the pCtx pointer
108 push xSI
109
110 ; Save LDTR
111 xor eax, eax
112 sldt ax
113 push xAX
114
115 ; The TR limit is reset to 0x67; restore it manually
116 str eax
117 push xAX
118
119 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
120 sub xSP, xS*2
121 sgdt [xSP]
122
123 sub xSP, xS*2
124 sidt [xSP]
125
126%ifdef VBOX_WITH_DR6_EXPERIMENT
127 ; Restore DR6 - experiment, not safe!
128 mov xBX, [xSI + CPUMCTX.dr6]
129 mov dr6, xBX
130%endif
131
132 ; Restore CR2
133 mov xBX, [xSI + CPUMCTX.cr2]
134 mov xDX, cr2
135 cmp xBX, xDX
136 je .skipcr2write32
137 mov cr2, xBX
138
139.skipcr2write32:
140 mov eax, VMX_VMCS_HOST_RSP
141 vmwrite xAX, xSP
142 ;/* Note: assumes success... */
143 ;/* Don't mess with ESP anymore!! */
144
145 ;/* Restore Guest's general purpose registers. */
146 mov eax, [xSI + CPUMCTX.eax]
147 mov ebx, [xSI + CPUMCTX.ebx]
148 mov ecx, [xSI + CPUMCTX.ecx]
149 mov edx, [xSI + CPUMCTX.edx]
150 mov ebp, [xSI + CPUMCTX.ebp]
151
152 ; resume or start?
153 cmp xDI, 0 ; fResume
154 je .vmlauch_lauch
155
156 ;/* Restore edi & esi. */
157 mov edi, [xSI + CPUMCTX.edi]
158 mov esi, [xSI + CPUMCTX.esi]
159
160 vmresume
161 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
162
163.vmlauch_lauch:
164 ;/* Restore edi & esi. */
165 mov edi, [xSI + CPUMCTX.edi]
166 mov esi, [xSI + CPUMCTX.esi]
167
168 vmlaunch
169 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
170
171ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
172.vmlaunch_done:
173 jc near .vmxstart_invalid_vmcs_ptr
174 jz near .vmxstart_start_failed
175
176 ; Restore base and limit of the IDTR & GDTR
177 lidt [xSP]
178 add xSP, xS*2
179 lgdt [xSP]
180 add xSP, xS*2
181
182 push xDI
183 mov xDI, [xSP + xS * 3] ; pCtx (*3 to skip the saved LDTR + TR)
184
185 mov [ss:xDI + CPUMCTX.eax], eax
186 mov [ss:xDI + CPUMCTX.ebx], ebx
187 mov [ss:xDI + CPUMCTX.ecx], ecx
188 mov [ss:xDI + CPUMCTX.edx], edx
189 mov [ss:xDI + CPUMCTX.esi], esi
190 mov [ss:xDI + CPUMCTX.ebp], ebp
191%ifndef VBOX_WITH_OLD_VTX_CODE
192 mov xAX, cr2
193 mov [ss:xDI + CPUMCTX.cr2], xAX
194%endif
195
196%ifdef RT_ARCH_AMD64
197 pop xAX ; the guest edi we pushed above
198 mov dword [ss:xDI + CPUMCTX.edi], eax
199%else
200 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
201%endif
202
203%ifdef VBOX_WITH_DR6_EXPERIMENT
204 ; Save DR6 - experiment, not safe!
205 mov xAX, dr6
206 mov [ss:xDI + CPUMCTX.dr6], xAX
207%endif
208
209 ; Restore TSS selector; must mark it as not busy before using ltr (!)
210 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
211 ; @todo get rid of sgdt
212 pop xBX ; saved TR
213 sub xSP, xS*2
214 sgdt [xSP]
215 mov xAX, xBX
216 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
217 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
218 and dword [ss:xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
219 ltr bx
220 add xSP, xS*2
221
222 pop xAX ; saved LDTR
223 lldt ax
224
225 add xSP, xS ; pCtx
226
227%ifdef VMX_USE_CACHED_VMCS_ACCESSES
228 pop xDX ; saved pCache
229
230 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
231 cmp ecx, 0 ; can't happen
232 je .no_cached_reads
233 jmp .cached_read
234
235ALIGN(16)
236.cached_read:
237 dec xCX
238 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX*4]
239 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
240 cmp xCX, 0
241 jnz .cached_read
242.no_cached_reads:
243
244%ifdef VBOX_WITH_OLD_VTX_CODE
245 ; Save CR2 for EPT
246 mov xAX, cr2
247 mov [ss:xDX + VMCSCACHE.cr2], xAX
248%endif
249%endif
250
251 ; Restore segment registers
252 MYPOPSEGS xAX, ax
253
254 ; Restore general purpose registers
255 MYPOPAD
256
257 mov eax, VINF_SUCCESS
258
259.vmstart_end:
260 popf
261 pop xBP
262 ret
263
264
265.vmxstart_invalid_vmcs_ptr:
266 ; Restore base and limit of the IDTR & GDTR
267 lidt [xSP]
268 add xSP, xS*2
269 lgdt [xSP]
270 add xSP, xS*2
271
272 ; Restore TSS selector; must mark it as not busy before using ltr (!)
273 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
274 ; @todo get rid of sgdt
275 pop xBX ; saved TR
276 sub xSP, xS*2
277 sgdt [xSP]
278 mov xAX, xBX
279 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
280 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
281 and dword [ss:xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
282 ltr bx
283 add xSP, xS*2
284
285 pop xAX ; saved LDTR
286 lldt ax
287
288%ifdef VMX_USE_CACHED_VMCS_ACCESSES
289 add xSP, xS*2 ; pCtx + pCache
290%else
291 add xSP, xS ; pCtx
292%endif
293
294 ; Restore segment registers
295 MYPOPSEGS xAX, ax
296
297 ; Restore all general purpose host registers.
298 MYPOPAD
299 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
300 jmp .vmstart_end
301
302.vmxstart_start_failed:
303 ; Restore base and limit of the IDTR & GDTR
304 lidt [xSP]
305 add xSP, xS*2
306 lgdt [xSP]
307 add xSP, xS*2
308
309 ; Restore TSS selector; must mark it as not busy before using ltr (!)
310 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
311 ; @todo get rid of sgdt
312 pop xBX ; saved TR
313 sub xSP, xS*2
314 sgdt [xSP]
315 mov xAX, xBX
316 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
317 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
318 and dword [ss:xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
319 ltr bx
320 add xSP, xS*2
321
322 pop xAX ; saved LDTR
323 lldt ax
324
325%ifdef VMX_USE_CACHED_VMCS_ACCESSES
326 add xSP, xS*2 ; pCtx + pCache
327%else
328 add xSP, xS ; pCtx
329%endif
330
331 ; Restore segment registers
332 MYPOPSEGS xAX, ax
333
334 ; Restore all general purpose host registers.
335 MYPOPAD
336 mov eax, VERR_VMX_UNABLE_TO_START_VM
337 jmp .vmstart_end
338
339ENDPROC MY_NAME(VMXR0StartVM32)
340
341%ifdef RT_ARCH_AMD64
342;/**
343; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
344; *
345; * @returns VBox status code
346; * @param fResume msc:rcx, gcc:rdi vmlauch/vmresume
347; * @param pCtx msc:rdx, gcc:rsi Guest context
348; * @param pCache msc:r8, gcc:rdx VMCS cache
349; */
350ALIGNCODE(16)
351BEGINPROC MY_NAME(VMXR0StartVM64)
352 push xBP
353 mov xBP, xSP
354
355 pushf
356 cli
357
358 ;/* Save all general purpose host registers. */
359 MYPUSHAD
360
361 ;/* First we have to save some final CPU context registers. */
362 lea r10, [.vmlaunch64_done wrt rip]
363 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
364 vmwrite rax, r10
365 ;/* Note: assumes success... */
366
367 ;/* Manual save and restore:
368 ; * - General purpose registers except RIP, RSP
369 ; *
370 ; * Trashed:
371 ; * - CR2 (we don't care)
372 ; * - LDTR (reset to 0)
373 ; * - DRx (presumably not changed at all)
374 ; * - DR7 (reset to 0x400)
375 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
376 ; *
377 ; */
378
379 ;/* Save the Guest CPU context pointer. */
380%ifdef ASM_CALL64_GCC
381 ; fResume already in rdi
382 ; pCtx already in rsi
383 mov rbx, rdx ; pCache
384%else
385 mov rdi, rcx ; fResume
386 mov rsi, rdx ; pCtx
387 mov rbx, r8 ; pCache
388%endif
389
390 ;/* Save segment registers */
391 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
392 MYPUSHSEGS xAX, ax
393
394%ifdef VMX_USE_CACHED_VMCS_ACCESSES
395 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
396 cmp ecx, 0
397 je .no_cached_writes
398 mov edx, ecx
399 mov ecx, 0
400 jmp .cached_write
401
402ALIGN(16)
403.cached_write:
404 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
405 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
406 inc xCX
407 cmp xCX, xDX
408 jl .cached_write
409
410 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
411.no_cached_writes:
412
413 ; Save the pCache pointer
414 push xBX
415%endif
416
417%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
418 ; Save the host MSRs and load the guest MSRs
419 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
420 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
421 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
422 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
423%else
424%ifdef VBOX_WITH_OLD_VTX_CODE
425 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
426 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
427%endif
428%endif
429
430 ; Save the pCtx pointer
431 push xSI
432
433 ; Save LDTR
434 xor eax, eax
435 sldt ax
436 push xAX
437
438 ; The TR limit is reset to 0x67; restore it manually
439 str eax
440 push xAX
441
442 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
443 sub xSP, xS*2
444 sgdt [xSP]
445
446 sub xSP, xS*2
447 sidt [xSP]
448
449%ifdef VBOX_WITH_DR6_EXPERIMENT
450 ; Restore DR6 - experiment, not safe!
451 mov xBX, [xSI + CPUMCTX.dr6]
452 mov dr6, xBX
453%endif
454
455 ; Restore CR2
456 mov rbx, qword [xSI + CPUMCTX.cr2]
457 mov rdx, cr2
458 cmp rbx, rdx
459 je .skipcr2write
460 mov cr2, rbx
461
462.skipcr2write:
463 mov eax, VMX_VMCS_HOST_RSP
464 vmwrite xAX, xSP
465 ;/* Note: assumes success... */
466 ;/* Don't mess with ESP anymore!! */
467
468 ;/* Restore Guest's general purpose registers. */
469 mov rax, qword [xSI + CPUMCTX.eax]
470 mov rbx, qword [xSI + CPUMCTX.ebx]
471 mov rcx, qword [xSI + CPUMCTX.ecx]
472 mov rdx, qword [xSI + CPUMCTX.edx]
473 mov rbp, qword [xSI + CPUMCTX.ebp]
474 mov r8, qword [xSI + CPUMCTX.r8]
475 mov r9, qword [xSI + CPUMCTX.r9]
476 mov r10, qword [xSI + CPUMCTX.r10]
477 mov r11, qword [xSI + CPUMCTX.r11]
478 mov r12, qword [xSI + CPUMCTX.r12]
479 mov r13, qword [xSI + CPUMCTX.r13]
480 mov r14, qword [xSI + CPUMCTX.r14]
481 mov r15, qword [xSI + CPUMCTX.r15]
482
483 ; resume or start?
484 cmp xDI, 0 ; fResume
485 je .vmlauch64_lauch
486
487 ;/* Restore edi & esi. */
488 mov rdi, qword [xSI + CPUMCTX.edi]
489 mov rsi, qword [xSI + CPUMCTX.esi]
490
491 vmresume
492 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */
493
494.vmlauch64_lauch:
495 ;/* Restore rdi & rsi. */
496 mov rdi, qword [xSI + CPUMCTX.edi]
497 mov rsi, qword [xSI + CPUMCTX.esi]
498
499 vmlaunch
500 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
501
502ALIGNCODE(16)
503.vmlaunch64_done:
504 jc near .vmxstart64_invalid_vmcs_ptr
505 jz near .vmxstart64_start_failed
506
507 ; Restore base and limit of the IDTR & GDTR
508 lidt [xSP]
509 add xSP, xS*2
510 lgdt [xSP]
511 add xSP, xS*2
512
513 push xDI
514 mov xDI, [xSP + xS * 3] ; pCtx (*3 to skip the saved LDTR + TR)
515
516 mov qword [xDI + CPUMCTX.eax], rax
517 mov qword [xDI + CPUMCTX.ebx], rbx
518 mov qword [xDI + CPUMCTX.ecx], rcx
519 mov qword [xDI + CPUMCTX.edx], rdx
520 mov qword [xDI + CPUMCTX.esi], rsi
521 mov qword [xDI + CPUMCTX.ebp], rbp
522 mov qword [xDI + CPUMCTX.r8], r8
523 mov qword [xDI + CPUMCTX.r9], r9
524 mov qword [xDI + CPUMCTX.r10], r10
525 mov qword [xDI + CPUMCTX.r11], r11
526 mov qword [xDI + CPUMCTX.r12], r12
527 mov qword [xDI + CPUMCTX.r13], r13
528 mov qword [xDI + CPUMCTX.r14], r14
529 mov qword [xDI + CPUMCTX.r15], r15
530%ifndef VBOX_WITH_OLD_VTX_CODE
531 mov rax, cr2
532 mov qword [xDI + CPUMCTX.cr2], rax
533%endif
534
535 pop xAX ; the guest edi we pushed above
536 mov qword [xDI + CPUMCTX.edi], rax
537
538%ifdef VBOX_WITH_DR6_EXPERIMENT
539 ; Save DR6 - experiment, not safe!
540 mov xAX, dr6
541 mov [xDI + CPUMCTX.dr6], xAX
542%endif
543
544 ; Restore TSS selector; must mark it as not busy before using ltr (!)
545 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
546 ; @todo get rid of sgdt
547 pop xBX ; saved TR
548 sub xSP, xS*2
549 sgdt [xSP]
550 mov xAX, xBX
551 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
552 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
553 and dword [xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
554 ltr bx
555 add xSP, xS*2
556
557 pop xAX ; saved LDTR
558 lldt ax
559
560 pop xSI ; pCtx (needed in rsi by the macros below)
561
562%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
563 ; Save the guest MSRs and load the host MSRs
564 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
565 LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
566 LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
567 LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
568%else
569%ifdef VBOX_WITH_OLD_VTX_CODE
570 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
571 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
572%endif
573%endif
574
575%ifdef VMX_USE_CACHED_VMCS_ACCESSES
576 pop xDX ; saved pCache
577
578 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
579 cmp ecx, 0 ; can't happen
580 je .no_cached_reads
581 jmp .cached_read
582
583ALIGN(16)
584.cached_read:
585 dec xCX
586 mov eax, [xDX + VMCSCACHE.Read.aField + xCX*4]
587 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
588 cmp xCX, 0
589 jnz .cached_read
590.no_cached_reads:
591
592%ifdef VBOX_WITH_OLD_VTX_CODE
593 ; Save CR2 for EPT
594 mov xAX, cr2
595 mov [xDX + VMCSCACHE.cr2], xAX
596%endif
597%endif
598
599 ; Restore segment registers
600 MYPOPSEGS xAX, ax
601
602 ; Restore general purpose registers
603 MYPOPAD
604
605 mov eax, VINF_SUCCESS
606
607.vmstart64_end:
608 popf
609 pop xBP
610 ret
611
612
613.vmxstart64_invalid_vmcs_ptr:
614 ; Restore base and limit of the IDTR & GDTR
615 lidt [xSP]
616 add xSP, xS*2
617 lgdt [xSP]
618 add xSP, xS*2
619
620 ; Restore TSS selector; must mark it as not busy before using ltr (!)
621 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
622 ; @todo get rid of sgdt
623 pop xBX ; saved TR
624 sub xSP, xS*2
625 sgdt [xSP]
626 mov xAX, xBX
627 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
628 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
629 and dword [xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
630 ltr bx
631 add xSP, xS*2
632
633 pop xAX ; saved LDTR
634 lldt ax
635
636 pop xSI ; pCtx (needed in rsi by the macros below)
637
638%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
639 ; Load the host MSRs
640 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
641 LOADHOSTMSR MSR_K8_SF_MASK
642 LOADHOSTMSR MSR_K6_STAR
643 LOADHOSTMSR MSR_K8_LSTAR
644%else
645%ifdef VBOX_WITH_OLD_VTX_CODE
646 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
647 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
648%endif
649%endif
650
651%ifdef VMX_USE_CACHED_VMCS_ACCESSES
652 add xSP, xS ; pCache
653%endif
654
655 ; Restore segment registers
656 MYPOPSEGS xAX, ax
657
658 ; Restore all general purpose host registers.
659 MYPOPAD
660 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
661 jmp .vmstart64_end
662
663.vmxstart64_start_failed:
664 ; Restore base and limit of the IDTR & GDTR
665 lidt [xSP]
666 add xSP, xS*2
667 lgdt [xSP]
668 add xSP, xS*2
669
670 ; Restore TSS selector; must mark it as not busy before using ltr (!)
671 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
672 ; @todo get rid of sgdt
673 pop xBX ; saved TR
674 sub xSP, xS*2
675 sgdt [xSP]
676 mov xAX, xBX
677 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
678 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
679 and dword [xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
680 ltr bx
681 add xSP, xS*2
682
683 pop xAX ; saved LDTR
684 lldt ax
685
686 pop xSI ; pCtx (needed in rsi by the macros below)
687
688%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
689 ; Load the host MSRs
690 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
691 LOADHOSTMSR MSR_K8_SF_MASK
692 LOADHOSTMSR MSR_K6_STAR
693 LOADHOSTMSR MSR_K8_LSTAR
694%else
695%ifdef VBOX_WITH_OLD_VTX_CODE
696 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
697 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
698%endif
699%endif
700
701%ifdef VMX_USE_CACHED_VMCS_ACCESSES
702 add xSP, xS ; pCache
703%endif
704
705 ; Restore segment registers
706 MYPOPSEGS xAX, ax
707
708 ; Restore all general purpose host registers.
709 MYPOPAD
710 mov eax, VERR_VMX_UNABLE_TO_START_VM
711 jmp .vmstart64_end
712ENDPROC MY_NAME(VMXR0StartVM64)
713%endif ; RT_ARCH_AMD64
714
715
716;/**
717; * Prepares for and executes VMRUN (32 bits guests)
718; *
719; * @returns VBox status code
720; * @param HCPhysVMCB Physical address of host VMCB
721; * @param HCPhysVMCB Physical address of guest VMCB
722; * @param pCtx Guest context
723; */
724ALIGNCODE(16)
725BEGINPROC MY_NAME(SVMR0VMRun)
726%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
727 %ifdef ASM_CALL64_GCC
728 push rdx
729 push rsi
730 push rdi
731 %else
732 push r8
733 push rdx
734 push rcx
735 %endif
736 push 0
737%endif
738 push xBP
739 mov xBP, xSP
740 pushf
741
742 ;/* Manual save and restore:
743 ; * - General purpose registers except RIP, RSP, RAX
744 ; *
745 ; * Trashed:
746 ; * - CR2 (we don't care)
747 ; * - LDTR (reset to 0)
748 ; * - DRx (presumably not changed at all)
749 ; * - DR7 (reset to 0x400)
750 ; */
751
752 ;/* Save all general purpose host registers. */
753 MYPUSHAD
754
755 ;/* Save the Guest CPU context pointer. */
756 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
757 push xSI ; push for saving the state at the end
758
759 ; save host fs, gs, sysenter msr etc
760 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
761 push xAX ; save for the vmload after vmrun
762 vmsave
763
764 ; setup eax for VMLOAD
765 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
766
767 ;/* Restore Guest's general purpose registers. */
768 ;/* EAX is loaded from the VMCB by VMRUN */
769 mov ebx, [xSI + CPUMCTX.ebx]
770 mov ecx, [xSI + CPUMCTX.ecx]
771 mov edx, [xSI + CPUMCTX.edx]
772 mov edi, [xSI + CPUMCTX.edi]
773 mov ebp, [xSI + CPUMCTX.ebp]
774 mov esi, [xSI + CPUMCTX.esi]
775
776 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
777 clgi
778 sti
779
780 ; load guest fs, gs, sysenter msr etc
781 vmload
782 ; run the VM
783 vmrun
784
785 ;/* EAX is in the VMCB already; we can use it here. */
786
787 ; save guest fs, gs, sysenter msr etc
788 vmsave
789
790 ; load host fs, gs, sysenter msr etc
791 pop xAX ; pushed above
792 vmload
793
794 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
795 cli
796 stgi
797
798 pop xAX ; pCtx
799
800 mov [ss:xAX + CPUMCTX.ebx], ebx
801 mov [ss:xAX + CPUMCTX.ecx], ecx
802 mov [ss:xAX + CPUMCTX.edx], edx
803 mov [ss:xAX + CPUMCTX.esi], esi
804 mov [ss:xAX + CPUMCTX.edi], edi
805 mov [ss:xAX + CPUMCTX.ebp], ebp
806
807 ; Restore general purpose registers
808 MYPOPAD
809
810 mov eax, VINF_SUCCESS
811
812 popf
813 pop xBP
814%ifdef RT_ARCH_AMD64
815 add xSP, 4*xS
816%endif
817 ret
818ENDPROC MY_NAME(SVMR0VMRun)
819
820%ifdef RT_ARCH_AMD64
821;/**
822; * Prepares for and executes VMRUN (64 bits guests)
823; *
824; * @returns VBox status code
825; * @param HCPhysVMCB Physical address of host VMCB
826; * @param HCPhysVMCB Physical address of guest VMCB
827; * @param pCtx Guest context
828; */
829ALIGNCODE(16)
830BEGINPROC MY_NAME(SVMR0VMRun64)
831 ; fake a cdecl stack frame
832 %ifdef ASM_CALL64_GCC
833 push rdx
834 push rsi
835 push rdi
836 %else
837 push r8
838 push rdx
839 push rcx
840 %endif
841 push 0
842 push rbp
843 mov rbp, rsp
844 pushf
845
846 ;/* Manual save and restore:
847 ; * - General purpose registers except RIP, RSP, RAX
848 ; *
849 ; * Trashed:
850 ; * - CR2 (we don't care)
851 ; * - LDTR (reset to 0)
852 ; * - DRx (presumably not changed at all)
853 ; * - DR7 (reset to 0x400)
854 ; */
855
856 ;/* Save all general purpose host registers. */
857 MYPUSHAD
858
859 ;/* Save the Guest CPU context pointer. */
860 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx
861 push rsi ; push for saving the state at the end
862
863 ; save host fs, gs, sysenter msr etc
864 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
865 push rax ; save for the vmload after vmrun
866 vmsave
867
868 ; setup eax for VMLOAD
869 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
870
871 ;/* Restore Guest's general purpose registers. */
872 ;/* RAX is loaded from the VMCB by VMRUN */
873 mov rbx, qword [xSI + CPUMCTX.ebx]
874 mov rcx, qword [xSI + CPUMCTX.ecx]
875 mov rdx, qword [xSI + CPUMCTX.edx]
876 mov rdi, qword [xSI + CPUMCTX.edi]
877 mov rbp, qword [xSI + CPUMCTX.ebp]
878 mov r8, qword [xSI + CPUMCTX.r8]
879 mov r9, qword [xSI + CPUMCTX.r9]
880 mov r10, qword [xSI + CPUMCTX.r10]
881 mov r11, qword [xSI + CPUMCTX.r11]
882 mov r12, qword [xSI + CPUMCTX.r12]
883 mov r13, qword [xSI + CPUMCTX.r13]
884 mov r14, qword [xSI + CPUMCTX.r14]
885 mov r15, qword [xSI + CPUMCTX.r15]
886 mov rsi, qword [xSI + CPUMCTX.esi]
887
888 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
889 clgi
890 sti
891
892 ; load guest fs, gs, sysenter msr etc
893 vmload
894 ; run the VM
895 vmrun
896
897 ;/* RAX is in the VMCB already; we can use it here. */
898
899 ; save guest fs, gs, sysenter msr etc
900 vmsave
901
902 ; load host fs, gs, sysenter msr etc
903 pop rax ; pushed above
904 vmload
905
906 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
907 cli
908 stgi
909
910 pop rax ; pCtx
911
912 mov qword [rax + CPUMCTX.ebx], rbx
913 mov qword [rax + CPUMCTX.ecx], rcx
914 mov qword [rax + CPUMCTX.edx], rdx
915 mov qword [rax + CPUMCTX.esi], rsi
916 mov qword [rax + CPUMCTX.edi], rdi
917 mov qword [rax + CPUMCTX.ebp], rbp
918 mov qword [rax + CPUMCTX.r8], r8
919 mov qword [rax + CPUMCTX.r9], r9
920 mov qword [rax + CPUMCTX.r10], r10
921 mov qword [rax + CPUMCTX.r11], r11
922 mov qword [rax + CPUMCTX.r12], r12
923 mov qword [rax + CPUMCTX.r13], r13
924 mov qword [rax + CPUMCTX.r14], r14
925 mov qword [rax + CPUMCTX.r15], r15
926
927 ; Restore general purpose registers
928 MYPOPAD
929
930 mov eax, VINF_SUCCESS
931
932 popf
933 pop rbp
934 add rsp, 4*xS
935 ret
936ENDPROC MY_NAME(SVMR0VMRun64)
937%endif ; RT_ARCH_AMD64
938
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette