VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac@ 46336

Last change on this file since 46336 was 46267, checked in by vboxsync, 12 years ago

VMM: Optimized 64-bit host VT-x world-switch.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 26.4 KB
Line 
1; $Id: HMR0Mixed.mac 46267 2013-05-26 11:29:24Z vboxsync $
2;; @file
3; HMR0Mixed.mac - Stuff that darwin needs to build two versions of.
4;
5; Included by HMR0A.asm with RT_ARCH_AMD64 defined or or undefined.
6;
7
8;
9; Copyright (C) 2006-2012 Oracle Corporation
10;
11; This file is part of VirtualBox Open Source Edition (OSE), as
12; available from http://www.virtualbox.org. This file is free software;
13; you can redistribute it and/or modify it under the terms of the GNU
14; General Public License (GPL) as published by the Free Software
15; Foundation, in version 2 as it comes in the "COPYING" file of the
16; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18;
19
20%ifndef VBOX_WITH_OLD_VTX_CODE
21 %ifdef RT_ARCH_AMD64
22 %define VMX_SKIP_GDTR_IDTR
23 %endif
24%endif
25
26;/**
27; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
28; *
29; * @returns VBox status code
30; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi vmlauch/vmresume
31; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Guest context
32; * @param pCache x86:[esp+10],msc:r8, gcc:rdx VMCS cache
33; */
34ALIGNCODE(16)
35BEGINPROC MY_NAME(VMXR0StartVM32)
36 push xBP
37 mov xBP, xSP
38
39 pushf
40 cli
41
42 ; Save all general purpose host registers.
43 MYPUSHAD
44
45 ; First we have to save some final CPU context registers.
46 mov eax, VMX_VMCS_HOST_RIP
47%ifdef RT_ARCH_AMD64
48 lea r10, [.vmlaunch_done wrt rip]
49 vmwrite rax, r10
50%else
51 mov ecx, .vmlaunch_done
52 vmwrite eax, ecx
53%endif
54 ; Note: assumes success!
55
56 ; Manual save and restore:
57 ; - General purpose registers except RIP, RSP.
58 ;
59 ; Trashed:
60 ; - CR2 (we don't care).
61 ; - LDTR (reset to 0).
62 ; - DRx (presumably not changed at all).
63 ; - DR7 (reset to 0x400).
64 ; - EFLAGS (reset to RT_BIT(1); not relevant).
65
66 ; Save the Guest CPU context pointer.
67%ifdef RT_ARCH_AMD64
68 %ifdef ASM_CALL64_GCC
69 ; fResume already in rdi
70 ; pCtx already in rsi
71 mov rbx, rdx ; pCache
72 %else
73 mov rdi, rcx ; fResume
74 mov rsi, rdx ; pCtx
75 mov rbx, r8 ; pCache
76 %endif
77%else
78 mov edi, [ebp + 8] ; fResume
79 mov esi, [ebp + 12] ; pCtx
80 mov ebx, [ebp + 16] ; pCache
81%endif
82
83 ; Save segment registers.
84 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
85 MYPUSHSEGS xAX, ax
86
87%ifdef VMX_USE_CACHED_VMCS_ACCESSES
88 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
89 cmp ecx, 0
90 je .no_cached_writes
91 mov edx, ecx
92 mov ecx, 0
93 jmp .cached_write
94
95ALIGN(16)
96.cached_write:
97 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
98 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
99 inc xCX
100 cmp xCX, xDX
101 jl .cached_write
102
103 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
104.no_cached_writes:
105
106 ; Save the pCache pointer.
107 push xBX
108%endif
109
110 ; Save the pCtx pointer.
111 push xSI
112
113 ; Save LDTR.
114 xor eax, eax
115 sldt ax
116 push xAX
117
118 ; The TR limit is reset to 0x67; restore it manually.
119 str eax
120 push xAX
121
122 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
123%ifdef VMX_SKIP_GDTR_IDTR
124 sub xSP, xS*2
125 sgdt [xSP]
126
127 sub xSP, xS*2
128 sidt [xSP]
129%endif
130
131%ifdef VBOX_WITH_DR6_EXPERIMENT
132 ; Load DR6 - experiment, not safe!
133 mov xBX, [xSI + CPUMCTX.dr6]
134 mov dr6, xBX
135%endif
136
137 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
138 mov xBX, [xSI + CPUMCTX.cr2]
139 mov xDX, cr2
140 cmp xBX, xDX
141 je .skipcr2write32
142 mov cr2, xBX
143
144.skipcr2write32:
145 mov eax, VMX_VMCS_HOST_RSP
146 vmwrite xAX, xSP
147 ; Note: assumes success!
148 ; Don't mess with ESP anymore!!!
149
150 ; Load Guest's general purpose registers.
151 mov eax, [xSI + CPUMCTX.eax]
152 mov ebx, [xSI + CPUMCTX.ebx]
153 mov ecx, [xSI + CPUMCTX.ecx]
154 mov edx, [xSI + CPUMCTX.edx]
155 mov ebp, [xSI + CPUMCTX.ebp]
156
157 ; Resume or start?
158 cmp xDI, 0 ; fResume
159 je .vmlaunch_launch
160
161 ; Restore edi & esi.
162 mov edi, [xSI + CPUMCTX.edi]
163 mov esi, [xSI + CPUMCTX.esi]
164
165 vmresume
166 jmp .vmlaunch_done; ; Here if vmresume detected a failure.
167
168.vmlaunch_launch:
169 ; Restore edi & esi.
170 mov edi, [xSI + CPUMCTX.edi]
171 mov esi, [xSI + CPUMCTX.esi]
172
173 vmlaunch
174 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
175
176ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
177.vmlaunch_done:
178 jc near .vmxstart_invalid_vmcs_ptr
179 jz near .vmxstart_start_failed
180
181 ; Restore base and limit of the IDTR & GDTR.
182%ifdef VMX_SKIP_GDTR_IDTR
183 lidt [xSP]
184 add xSP, xS*2
185 lgdt [xSP]
186 add xSP, xS*2
187%endif
188
189 push xDI
190 mov xDI, [xSP + xS * 3] ; pCtx (*3 to skip the saved LDTR + TR).
191
192 mov [ss:xDI + CPUMCTX.eax], eax
193 mov [ss:xDI + CPUMCTX.ebx], ebx
194 mov [ss:xDI + CPUMCTX.ecx], ecx
195 mov [ss:xDI + CPUMCTX.edx], edx
196 mov [ss:xDI + CPUMCTX.esi], esi
197 mov [ss:xDI + CPUMCTX.ebp], ebp
198%ifndef VBOX_WITH_OLD_VTX_CODE
199 mov xAX, cr2
200 mov [ss:xDI + CPUMCTX.cr2], xAX
201%endif
202
203%ifdef RT_ARCH_AMD64
204 pop xAX ; The guest edi we pushed above.
205 mov dword [ss:xDI + CPUMCTX.edi], eax
206%else
207 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
208%endif
209
210%ifdef VBOX_WITH_DR6_EXPERIMENT
211 ; Restore DR6 - experiment, not safe!
212 mov xAX, dr6
213 mov [ss:xDI + CPUMCTX.dr6], xAX
214%endif
215
216 ; Restore TSS selector; must mark it as not busy before using ltr (!)
217 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
218 ; @todo get rid of sgdt
219 pop xBX ; Saved TR
220 sub xSP, xS*2
221 sgdt [xSP]
222 mov xAX, xBX
223 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
224 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
225 and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
226 ltr bx
227 add xSP, xS*2
228
229 pop xAX ; Saved LDTR
230 lldt ax
231
232 add xSP, xS ; pCtx
233
234%ifdef VMX_USE_CACHED_VMCS_ACCESSES
235 pop xDX ; Saved pCache
236
237 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
238 cmp ecx, 0 ; Can't happen
239 je .no_cached_reads
240 jmp .cached_read
241
242ALIGN(16)
243.cached_read:
244 dec xCX
245 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX*4]
246 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
247 cmp xCX, 0
248 jnz .cached_read
249.no_cached_reads:
250
251%ifdef VBOX_WITH_OLD_VTX_CODE
252 ; Restore CR2 into VMCS-cache field (for EPT).
253 mov xAX, cr2
254 mov [ss:xDX + VMCSCACHE.cr2], xAX
255%endif
256%endif
257
258 ; Restore segment registers.
259 MYPOPSEGS xAX, ax
260
261 ; Restore general purpose registers.
262 MYPOPAD
263
264 mov eax, VINF_SUCCESS
265
266.vmstart_end:
267 popf
268 pop xBP
269 ret
270
271
272.vmxstart_invalid_vmcs_ptr:
273 ; Restore base and limit of the IDTR & GDTR
274%ifdef VMX_SKIP_GDTR_IDTR
275 lidt [xSP]
276 add xSP, xS*2
277 lgdt [xSP]
278 add xSP, xS*2
279%endif
280
281 ; Restore TSS selector; must mark it as not busy before using ltr (!)
282 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
283 ; @todo get rid of sgdt
284 pop xBX ; Saved TR
285 sub xSP, xS*2
286 sgdt [xSP]
287 mov xAX, xBX
288 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
289 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
290 and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
291 ltr bx
292 add xSP, xS*2
293
294 pop xAX ; Saved LDTR
295 lldt ax
296
297%ifdef VMX_USE_CACHED_VMCS_ACCESSES
298 add xSP, xS*2 ; pCtx + pCache
299%else
300 add xSP, xS ; pCtx
301%endif
302
303 ; Restore segment registers.
304 MYPOPSEGS xAX, ax
305
306 ; Restore all general purpose host registers.
307 MYPOPAD
308 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
309 jmp .vmstart_end
310
311.vmxstart_start_failed:
312 ; Restore base and limit of the IDTR & GDTR.
313%ifdef VMX_SKIP_GDTR_IDTR
314 lidt [xSP]
315 add xSP, xS*2
316 lgdt [xSP]
317 add xSP, xS*2
318%endif
319
320 ; Restore TSS selector; must mark it as not busy before using ltr (!)
321 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
322 ; @todo get rid of sgdt
323 pop xBX ; Saved TR
324 sub xSP, xS*2
325 sgdt [xSP]
326 mov xAX, xBX
327 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
328 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
329 and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
330 ltr bx
331 add xSP, xS*2
332
333 pop xAX ; Saved LDTR
334 lldt ax
335
336%ifdef VMX_USE_CACHED_VMCS_ACCESSES
337 add xSP, xS*2 ; pCtx + pCache
338%else
339 add xSP, xS ; pCtx
340%endif
341
342 ; Restore segment registers.
343 MYPOPSEGS xAX, ax
344
345 ; Restore all general purpose host registers.
346 MYPOPAD
347 mov eax, VERR_VMX_UNABLE_TO_START_VM
348 jmp .vmstart_end
349
350ENDPROC MY_NAME(VMXR0StartVM32)
351
352%ifdef RT_ARCH_AMD64
353;/**
354; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
355; *
356; * @returns VBox status code
357; * @param fResume msc:rcx, gcc:rdi vmlauch/vmresume
358; * @param pCtx msc:rdx, gcc:rsi Guest context
359; * @param pCache msc:r8, gcc:rdx VMCS cache
360; */
361ALIGNCODE(16)
362BEGINPROC MY_NAME(VMXR0StartVM64)
363 push xBP
364 mov xBP, xSP
365
366 pushf
367 cli
368
369 ; Save all general purpose host registers.
370 MYPUSHAD
371
372 ; First we have to save some final CPU context registers.
373 lea r10, [.vmlaunch64_done wrt rip]
374 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
375 vmwrite rax, r10
376 ; Note: assumes success!
377
378 ; Manual save and restore:
379 ; - General purpose registers except RIP, RSP.
380 ;
381 ; Trashed:
382 ; - CR2 (we don't care).
383 ; - LDTR (reset to 0).
384 ; - DRx (presumably not changed at all).
385 ; - DR7 (reset to 0x400).
386 ; - EFLAGS (reset to RT_BIT(1); not relevant).
387
388 ; Save the Guest CPU context pointer.
389%ifdef ASM_CALL64_GCC
390 ; fResume already in rdi
391 ; pCtx already in rsi
392 mov rbx, rdx ; pCache
393%else
394 mov rdi, rcx ; fResume
395 mov rsi, rdx ; pCtx
396 mov rbx, r8 ; pCache
397%endif
398
399 ; Save segment registers.
400 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
401 MYPUSHSEGS xAX, ax
402
403%ifdef VMX_USE_CACHED_VMCS_ACCESSES
404 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
405 cmp ecx, 0
406 je .no_cached_writes
407 mov edx, ecx
408 mov ecx, 0
409 jmp .cached_write
410
411ALIGN(16)
412.cached_write:
413 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
414 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
415 inc xCX
416 cmp xCX, xDX
417 jl .cached_write
418
419 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
420.no_cached_writes:
421
422 ; Save the pCache pointer.
423 push xBX
424%endif
425
426%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
427 ; Save the host MSRs and load the guest MSRs.
428 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
429 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
430 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
431 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
432%else
433%ifdef VBOX_WITH_OLD_VTX_CODE
434 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
435 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
436%endif
437%endif
438
439 ; Save the pCtx pointer.
440 push xSI
441
442 ; Save LDTR.
443 xor eax, eax
444 sldt ax
445 push xAX
446
447 ; The TR limit is reset to 0x67; restore it manually.
448 str eax
449 push xAX
450
451 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
452%ifdef VMX_SKIP_GDTR_IDTR
453 sub xSP, xS*2
454 sgdt [xSP]
455
456 sub xSP, xS*2
457 sidt [xSP]
458%endif
459
460%ifdef VBOX_WITH_DR6_EXPERIMENT
461 ; Load DR6 - experiment, not safe!
462 mov xBX, [xSI + CPUMCTX.dr6]
463 mov dr6, xBX
464%endif
465
466 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
467 mov rbx, qword [xSI + CPUMCTX.cr2]
468 mov rdx, cr2
469 cmp rbx, rdx
470 je .skipcr2write
471 mov cr2, rbx
472
473.skipcr2write:
474 mov eax, VMX_VMCS_HOST_RSP
475 vmwrite xAX, xSP
476 ; Note: assumes success!
477 ; Don't mess with ESP anymore!!!
478
479 ; Restore Guest's general purpose registers.
480 mov rax, qword [xSI + CPUMCTX.eax]
481 mov rbx, qword [xSI + CPUMCTX.ebx]
482 mov rcx, qword [xSI + CPUMCTX.ecx]
483 mov rdx, qword [xSI + CPUMCTX.edx]
484 mov rbp, qword [xSI + CPUMCTX.ebp]
485 mov r8, qword [xSI + CPUMCTX.r8]
486 mov r9, qword [xSI + CPUMCTX.r9]
487 mov r10, qword [xSI + CPUMCTX.r10]
488 mov r11, qword [xSI + CPUMCTX.r11]
489 mov r12, qword [xSI + CPUMCTX.r12]
490 mov r13, qword [xSI + CPUMCTX.r13]
491 mov r14, qword [xSI + CPUMCTX.r14]
492 mov r15, qword [xSI + CPUMCTX.r15]
493
494 ; Resume or start?
495 cmp xDI, 0 ; fResume
496 je .vmlaunch64_launch
497
498 ; Restore edi & esi.
499 mov rdi, qword [xSI + CPUMCTX.edi]
500 mov rsi, qword [xSI + CPUMCTX.esi]
501
502 vmresume
503 jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
504
505.vmlaunch64_launch:
506 ; Restore rdi & rsi.
507 mov rdi, qword [xSI + CPUMCTX.edi]
508 mov rsi, qword [xSI + CPUMCTX.esi]
509
510 vmlaunch
511 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
512
513ALIGNCODE(16)
514.vmlaunch64_done:
515 jc near .vmxstart64_invalid_vmcs_ptr
516 jz near .vmxstart64_start_failed
517
518 ; Restore base and limit of the IDTR & GDTR
519%ifdef VMX_SKIP_GDTR_IDTR
520 lidt [xSP]
521 add xSP, xS*2
522 lgdt [xSP]
523 add xSP, xS*2
524%endif
525
526 push xDI
527 mov xDI, [xSP + xS * 3] ; pCtx (*3 to skip the saved LDTR + TR)
528
529 mov qword [xDI + CPUMCTX.eax], rax
530 mov qword [xDI + CPUMCTX.ebx], rbx
531 mov qword [xDI + CPUMCTX.ecx], rcx
532 mov qword [xDI + CPUMCTX.edx], rdx
533 mov qword [xDI + CPUMCTX.esi], rsi
534 mov qword [xDI + CPUMCTX.ebp], rbp
535 mov qword [xDI + CPUMCTX.r8], r8
536 mov qword [xDI + CPUMCTX.r9], r9
537 mov qword [xDI + CPUMCTX.r10], r10
538 mov qword [xDI + CPUMCTX.r11], r11
539 mov qword [xDI + CPUMCTX.r12], r12
540 mov qword [xDI + CPUMCTX.r13], r13
541 mov qword [xDI + CPUMCTX.r14], r14
542 mov qword [xDI + CPUMCTX.r15], r15
543%ifndef VBOX_WITH_OLD_VTX_CODE
544 mov rax, cr2
545 mov qword [xDI + CPUMCTX.cr2], rax
546%endif
547
548 pop xAX ; The guest edi we pushed above
549 mov qword [xDI + CPUMCTX.edi], rax
550
551%ifdef VBOX_WITH_DR6_EXPERIMENT
552 ; Restore DR6 - experiment, not safe!
553 mov xAX, dr6
554 mov [xDI + CPUMCTX.dr6], xAX
555%endif
556
557 ; Restore TSS selector; must mark it as not busy before using ltr (!)
558 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
559 ; @todo get rid of sgdt
560 pop xBX ; Saved TR
561 sub xSP, xS*2
562 sgdt [xSP]
563 mov xAX, xBX
564 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
565 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
566 and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
567 ltr bx
568 add xSP, xS*2
569
570 pop xAX ; Saved LDTR
571 lldt ax
572
573 pop xSI ; pCtx (needed in rsi by the macros below)
574
575%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
576 ; Save the guest MSRs and load the host MSRs.
577 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
578 LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
579 LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
580 LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
581%else
582%ifdef VBOX_WITH_OLD_VTX_CODE
583 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
584 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
585%endif
586%endif
587
588%ifdef VMX_USE_CACHED_VMCS_ACCESSES
589 pop xDX ; Saved pCache
590
591 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
592 cmp ecx, 0 ; Can't happen
593 je .no_cached_reads
594 jmp .cached_read
595
596ALIGN(16)
597.cached_read:
598 dec xCX
599 mov eax, [xDX + VMCSCACHE.Read.aField + xCX*4]
600 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
601 cmp xCX, 0
602 jnz .cached_read
603.no_cached_reads:
604
605%ifdef VBOX_WITH_OLD_VTX_CODE
606 ; Restore CR2 into VMCS-cache field (for EPT).
607 mov xAX, cr2
608 mov [xDX + VMCSCACHE.cr2], xAX
609%endif
610%endif
611
612 ; Restore segment registers.
613 MYPOPSEGS xAX, ax
614
615 ; Restore general purpose registers.
616 MYPOPAD
617
618 mov eax, VINF_SUCCESS
619
620.vmstart64_end:
621 popf
622 pop xBP
623 ret
624
625
626.vmxstart64_invalid_vmcs_ptr:
627 ; Restore base and limit of the IDTR & GDTR.
628%ifdef VMX_SKIP_GDTR_IDTR
629 lidt [xSP]
630 add xSP, xS*2
631 lgdt [xSP]
632 add xSP, xS*2
633%endif
634
635 ; Restore TSS selector; must mark it as not busy before using ltr (!)
636 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
637 ; @todo get rid of sgdt
638 pop xBX ; Saved TR
639 sub xSP, xS*2
640 sgdt [xSP]
641 mov xAX, xBX
642 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
643 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
644 and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
645 ltr bx
646 add xSP, xS*2
647
648 pop xAX ; Saved LDTR
649 lldt ax
650
651 pop xSI ; pCtx (needed in rsi by the macros below)
652
653%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
654 ; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed.
655 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
656 LOADHOSTMSR MSR_K8_SF_MASK
657 LOADHOSTMSR MSR_K6_STAR
658 LOADHOSTMSR MSR_K8_LSTAR
659%else
660%ifdef VBOX_WITH_OLD_VTX_CODE
661 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
662 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
663%endif
664%endif
665
666%ifdef VMX_USE_CACHED_VMCS_ACCESSES
667 add xSP, xS ; pCache
668%endif
669
670 ; Restore segment registers.
671 MYPOPSEGS xAX, ax
672
673 ; Restore all general purpose host registers.
674 MYPOPAD
675 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
676 jmp .vmstart64_end
677
678.vmxstart64_start_failed:
679 ; Restore base and limit of the IDTR & GDTR.
680%ifdef VMX_SKIP_GDTR_IDTR
681 lidt [xSP]
682 add xSP, xS*2
683 lgdt [xSP]
684 add xSP, xS*2
685%endif
686
687 ; Restore TSS selector; must mark it as not busy before using ltr (!)
688 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
689 ; @todo get rid of sgdt
690 pop xBX ; Saved TR
691 sub xSP, xS*2
692 sgdt [xSP]
693 mov xAX, xBX
694 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
695 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
696 and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
697 ltr bx
698 add xSP, xS*2
699
700 pop xAX ; Saved LDTR
701 lldt ax
702
703 pop xSI ; pCtx (needed in rsi by the macros below).
704
705%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
706 ; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed.
707 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
708 LOADHOSTMSR MSR_K8_SF_MASK
709 LOADHOSTMSR MSR_K6_STAR
710 LOADHOSTMSR MSR_K8_LSTAR
711%else
712%ifdef VBOX_WITH_OLD_VTX_CODE
713 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
714 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
715%endif
716%endif
717
718%ifdef VMX_USE_CACHED_VMCS_ACCESSES
719 add xSP, xS ; pCache
720%endif
721
722 ; Restore segment registers.
723 MYPOPSEGS xAX, ax
724
725 ; Restore all general purpose host registers.
726 MYPOPAD
727 mov eax, VERR_VMX_UNABLE_TO_START_VM
728 jmp .vmstart64_end
729ENDPROC MY_NAME(VMXR0StartVM64)
730%endif ; RT_ARCH_AMD64
731
732
733;/**
734; * Prepares for and executes VMRUN (32 bits guests)
735; *
736; * @returns VBox status code
737; * @param HCPhysVMCB Physical address of host VMCB
738; * @param HCPhysVMCB Physical address of guest VMCB
739; * @param pCtx Guest context
740; */
741ALIGNCODE(16)
742BEGINPROC MY_NAME(SVMR0VMRun)
743%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
744 %ifdef ASM_CALL64_GCC
745 push rdx
746 push rsi
747 push rdi
748 %else
749 push r8
750 push rdx
751 push rcx
752 %endif
753 push 0
754%endif
755 push xBP
756 mov xBP, xSP
757 pushf
758
759 ; Manual save and restore:
760 ; - General purpose registers except RIP, RSP, RAX
761 ;
762 ; Trashed:
763 ; - CR2 (we don't care)
764 ; - LDTR (reset to 0)
765 ; - DRx (presumably not changed at all)
766 ; - DR7 (reset to 0x400)
767
768 ; Save all general purpose host registers.
769 MYPUSHAD
770
771 ; Save the Guest CPU context pointer.
772 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
773 push xSI ; push for saving the state at the end
774
775 ; Save host fs, gs, sysenter msr etc.
776 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
777 push xAX ; save for the vmload after vmrun
778 vmsave
779
780 ; Setup eax for VMLOAD.
781 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
782
783 ; Restore Guest's general purpose registers.
784 ; eax is loaded from the VMCB by VMRUN.
785 mov ebx, [xSI + CPUMCTX.ebx]
786 mov ecx, [xSI + CPUMCTX.ecx]
787 mov edx, [xSI + CPUMCTX.edx]
788 mov edi, [xSI + CPUMCTX.edi]
789 mov ebp, [xSI + CPUMCTX.ebp]
790 mov esi, [xSI + CPUMCTX.esi]
791
792 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
793 clgi
794 sti
795
796 ; Load guest fs, gs, sysenter msr etc.
797 vmload
798 ; Run the VM.
799 vmrun
800
801 ; eax is in the VMCB already; we can use it here.
802
803 ; Save guest fs, gs, sysenter msr etc.
804 vmsave
805
806 ; Load host fs, gs, sysenter msr etc.
807 pop xAX ; Pushed above
808 vmload
809
810 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
811 cli
812 stgi
813
814 pop xAX ; pCtx
815
816 mov [ss:xAX + CPUMCTX.ebx], ebx
817 mov [ss:xAX + CPUMCTX.ecx], ecx
818 mov [ss:xAX + CPUMCTX.edx], edx
819 mov [ss:xAX + CPUMCTX.esi], esi
820 mov [ss:xAX + CPUMCTX.edi], edi
821 mov [ss:xAX + CPUMCTX.ebp], ebp
822
823 ; Restore general purpose registers.
824 MYPOPAD
825
826 mov eax, VINF_SUCCESS
827
828 popf
829 pop xBP
830%ifdef RT_ARCH_AMD64
831 add xSP, 4*xS
832%endif
833 ret
834ENDPROC MY_NAME(SVMR0VMRun)
835
836%ifdef RT_ARCH_AMD64
837;/**
838; * Prepares for and executes VMRUN (64 bits guests)
839; *
840; * @returns VBox status code
841; * @param HCPhysVMCB Physical address of host VMCB
842; * @param HCPhysVMCB Physical address of guest VMCB
843; * @param pCtx Guest context
844; */
845ALIGNCODE(16)
846BEGINPROC MY_NAME(SVMR0VMRun64)
847 ; Fake a cdecl stack frame
848 %ifdef ASM_CALL64_GCC
849 push rdx
850 push rsi
851 push rdi
852 %else
853 push r8
854 push rdx
855 push rcx
856 %endif
857 push 0
858 push rbp
859 mov rbp, rsp
860 pushf
861
862 ; Manual save and restore:
863 ; - General purpose registers except RIP, RSP, RAX
864 ;
865 ; Trashed:
866 ; - CR2 (we don't care)
867 ; - LDTR (reset to 0)
868 ; - DRx (presumably not changed at all)
869 ; - DR7 (reset to 0x400)
870 ;
871
872 ; Save all general purpose host registers.
873 MYPUSHAD
874
875 ; Save the Guest CPU context pointer.
876 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx
877 push rsi ; push for saving the state at the end
878
879 ; Save host fs, gs, sysenter msr etc.
880 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
881 push rax ; Save for the vmload after vmrun
882 vmsave
883
884 ; Setup eax for VMLOAD.
885 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
886
887 ; Restore Guest's general purpose registers.
888 ; rax is loaded from the VMCB by VMRUN.
889 mov rbx, qword [xSI + CPUMCTX.ebx]
890 mov rcx, qword [xSI + CPUMCTX.ecx]
891 mov rdx, qword [xSI + CPUMCTX.edx]
892 mov rdi, qword [xSI + CPUMCTX.edi]
893 mov rbp, qword [xSI + CPUMCTX.ebp]
894 mov r8, qword [xSI + CPUMCTX.r8]
895 mov r9, qword [xSI + CPUMCTX.r9]
896 mov r10, qword [xSI + CPUMCTX.r10]
897 mov r11, qword [xSI + CPUMCTX.r11]
898 mov r12, qword [xSI + CPUMCTX.r12]
899 mov r13, qword [xSI + CPUMCTX.r13]
900 mov r14, qword [xSI + CPUMCTX.r14]
901 mov r15, qword [xSI + CPUMCTX.r15]
902 mov rsi, qword [xSI + CPUMCTX.esi]
903
904 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
905 clgi
906 sti
907
908 ; Load guest fs, gs, sysenter msr etc.
909 vmload
910 ; Run the VM.
911 vmrun
912
913 ; rax is in the VMCB already; we can use it here.
914
915 ; Save guest fs, gs, sysenter msr etc.
916 vmsave
917
918 ; Load host fs, gs, sysenter msr etc.
919 pop rax ; pushed above
920 vmload
921
922 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
923 cli
924 stgi
925
926 pop rax ; pCtx
927
928 mov qword [rax + CPUMCTX.ebx], rbx
929 mov qword [rax + CPUMCTX.ecx], rcx
930 mov qword [rax + CPUMCTX.edx], rdx
931 mov qword [rax + CPUMCTX.esi], rsi
932 mov qword [rax + CPUMCTX.edi], rdi
933 mov qword [rax + CPUMCTX.ebp], rbp
934 mov qword [rax + CPUMCTX.r8], r8
935 mov qword [rax + CPUMCTX.r9], r9
936 mov qword [rax + CPUMCTX.r10], r10
937 mov qword [rax + CPUMCTX.r11], r11
938 mov qword [rax + CPUMCTX.r12], r12
939 mov qword [rax + CPUMCTX.r13], r13
940 mov qword [rax + CPUMCTX.r14], r14
941 mov qword [rax + CPUMCTX.r15], r15
942
943 ; Restore general purpose registers.
944 MYPOPAD
945
946 mov eax, VINF_SUCCESS
947
948 popf
949 pop rbp
950 add rsp, 4*xS
951 ret
952ENDPROC MY_NAME(SVMR0VMRun64)
953%endif ; RT_ARCH_AMD64
954
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette