VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac@ 47776

Last change on this file since 47776 was 47652, checked in by vboxsync, 11 years ago

VMM: Removed all VBOX_WITH_OLD_[VTX|AMDV]_CODE bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 25.5 KB
Line 
1; $Id: HMR0Mixed.mac 47652 2013-08-09 14:56:17Z vboxsync $
2;; @file
3; HM - Ring-0 Host 32/64, Guest 32/64 world-switch routines
4;
5; Darwin uses this to build two versions in the hybrid case.
6; Included by HMR0A.asm with RT_ARCH_AMD64 defined or undefined.
7;
8
9;
10; Copyright (C) 2006-2013 Oracle Corporation
11;
12; This file is part of VirtualBox Open Source Edition (OSE), as
13; available from http://www.virtualbox.org. This file is free software;
14; you can redistribute it and/or modify it under the terms of the GNU
15; General Public License (GPL) as published by the Free Software
16; Foundation, in version 2 as it comes in the "COPYING" file of the
17; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
18; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
19;
20
21%ifdef RT_ARCH_AMD64
22 ;;
23 ; Keep these macro definitions in this file as it gets included and compiled
24 ; with RT_ARCH_AMD64 once and RT_ARCH_X86 once.
25 %define VMX_SKIP_GDTR
26 %ifndef RT_OS_DARWIN
27 ; Darwin (Mavericks) uses IDTR limit to store the CPUID so we need to restore it always. See @bugref{6875}.
28 %define VMX_SKIP_IDTR
29 %endif
30 %define VMX_SKIP_TR
31%endif
32
33;/**
34; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
35; *
36; * @returns VBox status code
37; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi vmlauch/vmresume
38; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Guest context
39; * @param pCache x86:[esp+10],msc:r8, gcc:rdx VMCS cache
40; */
41ALIGNCODE(16)
42BEGINPROC MY_NAME(VMXR0StartVM32)
43 push xBP
44 mov xBP, xSP
45
46 pushf
47 cli
48
49 ; Save all general purpose host registers.
50 MYPUSHAD
51
52 ; First we have to save some final CPU context registers.
53 mov eax, VMX_VMCS_HOST_RIP
54%ifdef RT_ARCH_AMD64
55 lea r10, [.vmlaunch_done wrt rip]
56 vmwrite rax, r10
57%else
58 mov ecx, .vmlaunch_done
59 vmwrite eax, ecx
60%endif
61 ; Note: assumes success!
62
63 ; Save the Guest CPU context pointer.
64%ifdef RT_ARCH_AMD64
65 %ifdef ASM_CALL64_GCC
66 ; fResume already in rdi
67 ; pCtx already in rsi
68 mov rbx, rdx ; pCache
69 %else
70 mov rdi, rcx ; fResume
71 mov rsi, rdx ; pCtx
72 mov rbx, r8 ; pCache
73 %endif
74%else
75 mov edi, [ebp + 8] ; fResume
76 mov esi, [ebp + 12] ; pCtx
77 mov ebx, [ebp + 16] ; pCache
78%endif
79
80 ; Save segment registers.
81 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
82 MYPUSHSEGS xAX, ax
83
84%ifdef VMX_USE_CACHED_VMCS_ACCESSES
85 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
86 cmp ecx, 0
87 je .no_cached_writes
88 mov edx, ecx
89 mov ecx, 0
90 jmp .cached_write
91
92ALIGN(16)
93.cached_write:
94 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
95 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
96 inc xCX
97 cmp xCX, xDX
98 jl .cached_write
99
100 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
101.no_cached_writes:
102
103 ; Save the pCache pointer.
104 push xBX
105%endif
106
107 ; Save the pCtx pointer.
108 push xSI
109
110 ; Save LDTR.
111 xor eax, eax
112 sldt ax
113 push xAX
114
115%ifndef VMX_SKIP_TR
116 ; The TR limit is reset to 0x67; restore it manually.
117 str eax
118 push xAX
119%endif
120
121%ifndef VMX_SKIP_GDTR
122 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
123 sub xSP, xCB * 2
124 sgdt [xSP]
125%endif
126%ifndef VMX_SKIP_IDTR
127 sub xSP, xCB * 2
128 sidt [xSP]
129%endif
130
131 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
132 mov xBX, [xSI + CPUMCTX.cr2]
133 mov xDX, cr2
134 cmp xBX, xDX
135 je .skipcr2write32
136 mov cr2, xBX
137
138.skipcr2write32:
139 mov eax, VMX_VMCS_HOST_RSP
140 vmwrite xAX, xSP
141 ; Note: assumes success!
142 ; Don't mess with ESP anymore!!!
143
144 ; Load Guest's general purpose registers.
145 mov eax, [xSI + CPUMCTX.eax]
146 mov ebx, [xSI + CPUMCTX.ebx]
147 mov ecx, [xSI + CPUMCTX.ecx]
148 mov edx, [xSI + CPUMCTX.edx]
149 mov ebp, [xSI + CPUMCTX.ebp]
150
151 ; Resume or start?
152 cmp xDI, 0 ; fResume
153 je .vmlaunch_launch
154
155 ; Restore edi & esi.
156 mov edi, [xSI + CPUMCTX.edi]
157 mov esi, [xSI + CPUMCTX.esi]
158
159 vmresume
160 jmp .vmlaunch_done; ; Here if vmresume detected a failure.
161
162.vmlaunch_launch:
163 ; Restore edi & esi.
164 mov edi, [xSI + CPUMCTX.edi]
165 mov esi, [xSI + CPUMCTX.esi]
166
167 vmlaunch
168 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
169
170ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
171.vmlaunch_done:
172 jc near .vmxstart_invalid_vmcs_ptr
173 jz near .vmxstart_start_failed
174
175 ; Restore base and limit of the IDTR & GDTR.
176%ifndef VMX_SKIP_IDTR
177 lidt [xSP]
178 add xSP, xCB * 2
179%endif
180%ifndef VMX_SKIP_GDTR
181 lgdt [xSP]
182 add xSP, xCB * 2
183%endif
184
185 push xDI
186%ifndef VMX_SKIP_TR
187 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR).
188%else
189 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR).
190%endif
191
192 mov [ss:xDI + CPUMCTX.eax], eax
193 mov [ss:xDI + CPUMCTX.ebx], ebx
194 mov [ss:xDI + CPUMCTX.ecx], ecx
195 mov [ss:xDI + CPUMCTX.edx], edx
196 mov [ss:xDI + CPUMCTX.esi], esi
197 mov [ss:xDI + CPUMCTX.ebp], ebp
198 mov xAX, cr2
199 mov [ss:xDI + CPUMCTX.cr2], xAX
200
201%ifdef RT_ARCH_AMD64
202 pop xAX ; The guest edi we pushed above.
203 mov dword [ss:xDI + CPUMCTX.edi], eax
204%else
205 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
206%endif
207
208%ifndef VMX_SKIP_TR
209 ; Restore TSS selector; must mark it as not busy before using ltr (!)
210 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
211 ; @todo get rid of sgdt
212 pop xBX ; Saved TR
213 sub xSP, xCB * 2
214 sgdt [xSP]
215 mov xAX, xBX
216 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
217 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
218 and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
219 ltr bx
220 add xSP, xCB * 2
221%endif
222
223 pop xAX ; Saved LDTR
224%ifdef RT_ARCH_AMD64
225 cmp xAX, 0
226 je .skipldtwrite32
227%endif
228 lldt ax
229
230.skipldtwrite32:
231 add xSP, xCB ; pCtx
232
233%ifdef VMX_USE_CACHED_VMCS_ACCESSES
234 pop xDX ; Saved pCache
235
236 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
237 cmp ecx, 0 ; Can't happen
238 je .no_cached_reads
239 jmp .cached_read
240
241ALIGN(16)
242.cached_read:
243 dec xCX
244 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX * 4]
245 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
246 cmp xCX, 0
247 jnz .cached_read
248.no_cached_reads:
249%endif
250
251 ; Restore segment registers.
252 MYPOPSEGS xAX, ax
253
254 ; Restore general purpose registers.
255 MYPOPAD
256
257 mov eax, VINF_SUCCESS
258
259.vmstart_end:
260 popf
261 pop xBP
262 ret
263
264
265.vmxstart_invalid_vmcs_ptr:
266 ; Restore base and limit of the IDTR & GDTR
267%ifndef VMX_SKIP_IDTR
268 lidt [xSP]
269 add xSP, xCB * 2
270%endif
271%ifndef VMX_SKIP_GDTR
272 lgdt [xSP]
273 add xSP, xCB * 2
274%endif
275
276%ifndef VMX_SKIP_TR
277 ; Restore TSS selector; must mark it as not busy before using ltr (!)
278 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
279 ; @todo get rid of sgdt
280 pop xBX ; Saved TR
281 sub xSP, xCB * 2
282 sgdt [xSP]
283 mov xAX, xBX
284 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
285 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
286 and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
287 ltr bx
288 add xSP, xCB * 2
289%endif
290
291 pop xAX ; Saved LDTR
292 lldt ax ; Don't bother with conditional restoration in the error case.
293
294%ifdef VMX_USE_CACHED_VMCS_ACCESSES
295 add xSP, xCB * 2 ; pCtx + pCache
296%else
297 add xSP, xCB ; pCtx
298%endif
299
300 ; Restore segment registers.
301 MYPOPSEGS xAX, ax
302
303 ; Restore all general purpose host registers.
304 MYPOPAD
305 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
306 jmp .vmstart_end
307
308.vmxstart_start_failed:
309 ; Restore base and limit of the IDTR & GDTR.
310%ifndef VMX_SKIP_IDTR
311 lidt [xSP]
312 add xSP, xCB * 2
313%endif
314%ifndef VMX_SKIP_GDTR
315 lgdt [xSP]
316 add xSP, xCB * 2
317%endif
318
319%ifndef VMX_SKIP_TR
320 ; Restore TSS selector; must mark it as not busy before using ltr (!)
321 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
322 ; @todo get rid of sgdt
323 pop xBX ; Saved TR
324 sub xSP, xCB * 2
325 sgdt [xSP]
326 mov xAX, xBX
327 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
328 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
329 and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
330 ltr bx
331 add xSP, xCB * 2
332%endif
333
334 pop xAX ; Saved LDTR
335 lldt ax ; Don't bother with conditional restoration in the error case
336
337%ifdef VMX_USE_CACHED_VMCS_ACCESSES
338 add xSP, xCB * 2 ; pCtx + pCache
339%else
340 add xSP, xCB ; pCtx
341%endif
342
343 ; Restore segment registers.
344 MYPOPSEGS xAX, ax
345
346 ; Restore all general purpose host registers.
347 MYPOPAD
348 mov eax, VERR_VMX_UNABLE_TO_START_VM
349 jmp .vmstart_end
350
351ENDPROC MY_NAME(VMXR0StartVM32)
352
353%ifdef RT_ARCH_AMD64
354;/**
355; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
356; *
357; * @returns VBox status code
358; * @param fResume msc:rcx, gcc:rdi vmlauch/vmresume
359; * @param pCtx msc:rdx, gcc:rsi Guest context
360; * @param pCache msc:r8, gcc:rdx VMCS cache
361; */
362ALIGNCODE(16)
363BEGINPROC MY_NAME(VMXR0StartVM64)
364 push xBP
365 mov xBP, xSP
366
367 pushf
368 cli
369
370 ; Save all general purpose host registers.
371 MYPUSHAD
372
373 ; First we have to save some final CPU context registers.
374 lea r10, [.vmlaunch64_done wrt rip]
375 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
376 vmwrite rax, r10
377 ; Note: assumes success!
378
379 ; Save the Guest CPU context pointer.
380%ifdef ASM_CALL64_GCC
381 ; fResume already in rdi
382 ; pCtx already in rsi
383 mov rbx, rdx ; pCache
384%else
385 mov rdi, rcx ; fResume
386 mov rsi, rdx ; pCtx
387 mov rbx, r8 ; pCache
388%endif
389
390 ; Save segment registers.
391 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
392 MYPUSHSEGS xAX, ax
393
394%ifdef VMX_USE_CACHED_VMCS_ACCESSES
395 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
396 cmp ecx, 0
397 je .no_cached_writes
398 mov edx, ecx
399 mov ecx, 0
400 jmp .cached_write
401
402ALIGN(16)
403.cached_write:
404 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
405 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
406 inc xCX
407 cmp xCX, xDX
408 jl .cached_write
409
410 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
411.no_cached_writes:
412
413 ; Save the pCache pointer.
414 push xBX
415%endif
416
417%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
418 ; Save the host MSRs and load the guest MSRs.
419 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
420 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
421 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
422 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
423%endif
424
425 ; Save the pCtx pointer.
426 push xSI
427
428 ; Save LDTR.
429 xor eax, eax
430 sldt ax
431 push xAX
432
433%ifndef VMX_SKIP_TR
434 ; The TR limit is reset to 0x67; restore it manually.
435 str eax
436 push xAX
437%endif
438
439 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
440%ifndef VMX_SKIP_GDTR
441 sub xSP, xCB * 2
442 sgdt [xSP]
443%endif
444%ifndef VMX_SKIP_IDTR
445 sub xSP, xCB * 2
446 sidt [xSP]
447%endif
448
449 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
450 mov rbx, qword [xSI + CPUMCTX.cr2]
451 mov rdx, cr2
452 cmp rbx, rdx
453 je .skipcr2write
454 mov cr2, rbx
455
456.skipcr2write:
457 mov eax, VMX_VMCS_HOST_RSP
458 vmwrite xAX, xSP
459 ; Note: assumes success!
460 ; Don't mess with ESP anymore!!!
461
462 ; Restore Guest's general purpose registers.
463 mov rax, qword [xSI + CPUMCTX.eax]
464 mov rbx, qword [xSI + CPUMCTX.ebx]
465 mov rcx, qword [xSI + CPUMCTX.ecx]
466 mov rdx, qword [xSI + CPUMCTX.edx]
467 mov rbp, qword [xSI + CPUMCTX.ebp]
468 mov r8, qword [xSI + CPUMCTX.r8]
469 mov r9, qword [xSI + CPUMCTX.r9]
470 mov r10, qword [xSI + CPUMCTX.r10]
471 mov r11, qword [xSI + CPUMCTX.r11]
472 mov r12, qword [xSI + CPUMCTX.r12]
473 mov r13, qword [xSI + CPUMCTX.r13]
474 mov r14, qword [xSI + CPUMCTX.r14]
475 mov r15, qword [xSI + CPUMCTX.r15]
476
477 ; Resume or start?
478 cmp xDI, 0 ; fResume
479 je .vmlaunch64_launch
480
481 ; Restore edi & esi.
482 mov rdi, qword [xSI + CPUMCTX.edi]
483 mov rsi, qword [xSI + CPUMCTX.esi]
484
485 vmresume
486 jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
487
488.vmlaunch64_launch:
489 ; Restore rdi & rsi.
490 mov rdi, qword [xSI + CPUMCTX.edi]
491 mov rsi, qword [xSI + CPUMCTX.esi]
492
493 vmlaunch
494 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
495
496ALIGNCODE(16)
497.vmlaunch64_done:
498 jc near .vmxstart64_invalid_vmcs_ptr
499 jz near .vmxstart64_start_failed
500
501 ; Restore base and limit of the IDTR & GDTR
502%ifndef VMX_SKIP_IDTR
503 lidt [xSP]
504 add xSP, xCB * 2
505%endif
506%ifndef VMX_SKIP_GDTR
507 lgdt [xSP]
508 add xSP, xCB * 2
509%endif
510
511 push xDI
512%ifndef VMX_SKIP_TR
513 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)
514%else
515 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)
516%endif
517
518 mov qword [xDI + CPUMCTX.eax], rax
519 mov qword [xDI + CPUMCTX.ebx], rbx
520 mov qword [xDI + CPUMCTX.ecx], rcx
521 mov qword [xDI + CPUMCTX.edx], rdx
522 mov qword [xDI + CPUMCTX.esi], rsi
523 mov qword [xDI + CPUMCTX.ebp], rbp
524 mov qword [xDI + CPUMCTX.r8], r8
525 mov qword [xDI + CPUMCTX.r9], r9
526 mov qword [xDI + CPUMCTX.r10], r10
527 mov qword [xDI + CPUMCTX.r11], r11
528 mov qword [xDI + CPUMCTX.r12], r12
529 mov qword [xDI + CPUMCTX.r13], r13
530 mov qword [xDI + CPUMCTX.r14], r14
531 mov qword [xDI + CPUMCTX.r15], r15
532 mov rax, cr2
533 mov qword [xDI + CPUMCTX.cr2], rax
534
535 pop xAX ; The guest edi we pushed above
536 mov qword [xDI + CPUMCTX.edi], rax
537
538%ifndef VMX_SKIP_TR
539 ; Restore TSS selector; must mark it as not busy before using ltr (!)
540 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
541 ; @todo get rid of sgdt
542 pop xBX ; Saved TR
543 sub xSP, xCB * 2
544 sgdt [xSP]
545 mov xAX, xBX
546 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
547 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
548 and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
549 ltr bx
550 add xSP, xCB * 2
551%endif
552
553 pop xAX ; Saved LDTR
554 cmp xAX, 0
555 je .skipldtwrite64
556 lldt ax
557
558.skipldtwrite64:
559 pop xSI ; pCtx (needed in rsi by the macros below)
560
561%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
562 ; Save the guest MSRs and load the host MSRs.
563 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
564 LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
565 LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
566 LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
567%endif
568
569%ifdef VMX_USE_CACHED_VMCS_ACCESSES
570 pop xDX ; Saved pCache
571
572 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
573 cmp ecx, 0 ; Can't happen
574 je .no_cached_reads
575 jmp .cached_read
576
577ALIGN(16)
578.cached_read:
579 dec xCX
580 mov eax, [xDX + VMCSCACHE.Read.aField + xCX * 4]
581 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
582 cmp xCX, 0
583 jnz .cached_read
584.no_cached_reads:
585%endif
586
587 ; Restore segment registers.
588 MYPOPSEGS xAX, ax
589
590 ; Restore general purpose registers.
591 MYPOPAD
592
593 mov eax, VINF_SUCCESS
594
595.vmstart64_end:
596 popf
597 pop xBP
598 ret
599
600
601.vmxstart64_invalid_vmcs_ptr:
602 ; Restore base and limit of the IDTR & GDTR.
603%ifndef VMX_SKIP_IDTR
604 lidt [xSP]
605 add xSP, xCB * 2
606%endif
607%ifndef VMX_SKIP_GDTR
608 lgdt [xSP]
609 add xSP, xCB * 2
610%endif
611
612%ifndef VMX_SKIP_TR
613 ; Restore TSS selector; must mark it as not busy before using ltr (!)
614 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
615 ; @todo get rid of sgdt
616 pop xBX ; Saved TR
617 sub xSP, xCB * 2
618 sgdt [xSP]
619 mov xAX, xBX
620 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
621 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
622 and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
623 ltr bx
624 add xSP, xCB * 2
625%endif
626
627 pop xAX ; Saved LDTR
628 lldt ax ; Don't bother with conditional restoration in the error case.
629
630 pop xSI ; pCtx (needed in rsi by the macros below)
631
632%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
633 ; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed.
634 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
635 LOADHOSTMSR MSR_K8_SF_MASK
636 LOADHOSTMSR MSR_K6_STAR
637 LOADHOSTMSR MSR_K8_LSTAR
638%endif
639
640%ifdef VMX_USE_CACHED_VMCS_ACCESSES
641 add xSP, xCB ; pCache
642%endif
643
644 ; Restore segment registers.
645 MYPOPSEGS xAX, ax
646
647 ; Restore all general purpose host registers.
648 MYPOPAD
649 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
650 jmp .vmstart64_end
651
652.vmxstart64_start_failed:
653 ; Restore base and limit of the IDTR & GDTR.
654%ifndef VMX_SKIP_IDTR
655 lidt [xSP]
656 add xSP, xCB * 2
657%endif
658%ifndef VMX_SKIP_GDTR
659 lgdt [xSP]
660 add xSP, xCB * 2
661%endif
662
663%ifndef VMX_SKIP_TR
664 ; Restore TSS selector; must mark it as not busy before using ltr (!)
665 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
666 ; @todo get rid of sgdt
667 pop xBX ; Saved TR
668 sub xSP, xCB * 2
669 sgdt [xSP]
670 mov xAX, xBX
671 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
672 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
673 and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
674 ltr bx
675 add xSP, xCB * 2
676%endif
677
678 pop xAX ; Saved LDTR
679 lldt ax ; Don't bother with conditional restoration in the error case.
680
681 pop xSI ; pCtx (needed in rsi by the macros below).
682
683%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
684 ; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed.
685 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
686 LOADHOSTMSR MSR_K8_SF_MASK
687 LOADHOSTMSR MSR_K6_STAR
688 LOADHOSTMSR MSR_K8_LSTAR
689%endif
690
691%ifdef VMX_USE_CACHED_VMCS_ACCESSES
692 add xSP, xCB ; pCache
693%endif
694
695 ; Restore segment registers.
696 MYPOPSEGS xAX, ax
697
698 ; Restore all general purpose host registers.
699 MYPOPAD
700 mov eax, VERR_VMX_UNABLE_TO_START_VM
701 jmp .vmstart64_end
702ENDPROC MY_NAME(VMXR0StartVM64)
703%endif ; RT_ARCH_AMD64
704
705
706;/**
707; * Prepares for and executes VMRUN (32 bits guests)
708; *
709; * @returns VBox status code
710; * @param HCPhysVMCB Physical address of host VMCB
711; * @param HCPhysVMCB Physical address of guest VMCB
712; * @param pCtx Guest context
713; */
714ALIGNCODE(16)
715BEGINPROC MY_NAME(SVMR0VMRun)
716%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
717 %ifdef ASM_CALL64_GCC
718 push rdx
719 push rsi
720 push rdi
721 %else
722 push r8
723 push rdx
724 push rcx
725 %endif
726 push 0
727%endif
728 push xBP
729 mov xBP, xSP
730 pushf
731
732 ; Save all general purpose host registers.
733 MYPUSHAD
734
735 ; Save the Guest CPU context pointer.
736 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
737 push xSI ; push for saving the state at the end
738
739 ; Save host fs, gs, sysenter msr etc.
740 mov xAX, [xBP + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
741 push xAX ; save for the vmload after vmrun
742 vmsave
743
744 ; Setup eax for VMLOAD.
745 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
746
747 ; Restore Guest's general purpose registers.
748 ; eax is loaded from the VMCB by VMRUN.
749 mov ebx, [xSI + CPUMCTX.ebx]
750 mov ecx, [xSI + CPUMCTX.ecx]
751 mov edx, [xSI + CPUMCTX.edx]
752 mov edi, [xSI + CPUMCTX.edi]
753 mov ebp, [xSI + CPUMCTX.ebp]
754 mov esi, [xSI + CPUMCTX.esi]
755
756 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
757 clgi
758 sti
759
760 ; Load guest fs, gs, sysenter msr etc.
761 vmload
762 ; Run the VM.
763 vmrun
764
765 ; eax is in the VMCB already; we can use it here.
766
767 ; Save guest fs, gs, sysenter msr etc.
768 vmsave
769
770 ; Load host fs, gs, sysenter msr etc.
771 pop xAX ; Pushed above
772 vmload
773
774 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
775 cli
776 stgi
777
778 pop xAX ; pCtx
779
780 mov [ss:xAX + CPUMCTX.ebx], ebx
781 mov [ss:xAX + CPUMCTX.ecx], ecx
782 mov [ss:xAX + CPUMCTX.edx], edx
783 mov [ss:xAX + CPUMCTX.esi], esi
784 mov [ss:xAX + CPUMCTX.edi], edi
785 mov [ss:xAX + CPUMCTX.ebp], ebp
786
787 ; Restore general purpose registers.
788 MYPOPAD
789
790 mov eax, VINF_SUCCESS
791
792 popf
793 pop xBP
794%ifdef RT_ARCH_AMD64
795 add xSP, 4*xCB
796%endif
797 ret
798ENDPROC MY_NAME(SVMR0VMRun)
799
800%ifdef RT_ARCH_AMD64
801;/**
802; * Prepares for and executes VMRUN (64 bits guests)
803; *
804; * @returns VBox status code
805; * @param HCPhysVMCB Physical address of host VMCB
806; * @param HCPhysVMCB Physical address of guest VMCB
807; * @param pCtx Guest context
808; */
809ALIGNCODE(16)
810BEGINPROC MY_NAME(SVMR0VMRun64)
811 ; Fake a cdecl stack frame
812 %ifdef ASM_CALL64_GCC
813 push rdx
814 push rsi
815 push rdi
816 %else
817 push r8
818 push rdx
819 push rcx
820 %endif
821 push 0
822 push rbp
823 mov rbp, rsp
824 pushf
825
826 ; Manual save and restore:
827 ; - General purpose registers except RIP, RSP, RAX
828 ;
829 ; Trashed:
830 ; - CR2 (we don't care)
831 ; - LDTR (reset to 0)
832 ; - DRx (presumably not changed at all)
833 ; - DR7 (reset to 0x400)
834 ;
835
836 ; Save all general purpose host registers.
837 MYPUSHAD
838
839 ; Save the Guest CPU context pointer.
840 mov rsi, [rbp + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
841 push rsi ; push for saving the state at the end
842
843 ; Save host fs, gs, sysenter msr etc.
844 mov rax, [rbp + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
845 push rax ; Save for the vmload after vmrun
846 vmsave
847
848 ; Setup eax for VMLOAD.
849 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
850
851 ; Restore Guest's general purpose registers.
852 ; rax is loaded from the VMCB by VMRUN.
853 mov rbx, qword [xSI + CPUMCTX.ebx]
854 mov rcx, qword [xSI + CPUMCTX.ecx]
855 mov rdx, qword [xSI + CPUMCTX.edx]
856 mov rdi, qword [xSI + CPUMCTX.edi]
857 mov rbp, qword [xSI + CPUMCTX.ebp]
858 mov r8, qword [xSI + CPUMCTX.r8]
859 mov r9, qword [xSI + CPUMCTX.r9]
860 mov r10, qword [xSI + CPUMCTX.r10]
861 mov r11, qword [xSI + CPUMCTX.r11]
862 mov r12, qword [xSI + CPUMCTX.r12]
863 mov r13, qword [xSI + CPUMCTX.r13]
864 mov r14, qword [xSI + CPUMCTX.r14]
865 mov r15, qword [xSI + CPUMCTX.r15]
866 mov rsi, qword [xSI + CPUMCTX.esi]
867
868 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
869 clgi
870 sti
871
872 ; Load guest fs, gs, sysenter msr etc.
873 vmload
874 ; Run the VM.
875 vmrun
876
877 ; rax is in the VMCB already; we can use it here.
878
879 ; Save guest fs, gs, sysenter msr etc.
880 vmsave
881
882 ; Load host fs, gs, sysenter msr etc.
883 pop rax ; pushed above
884 vmload
885
886 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
887 cli
888 stgi
889
890 pop rax ; pCtx
891
892 mov qword [rax + CPUMCTX.ebx], rbx
893 mov qword [rax + CPUMCTX.ecx], rcx
894 mov qword [rax + CPUMCTX.edx], rdx
895 mov qword [rax + CPUMCTX.esi], rsi
896 mov qword [rax + CPUMCTX.edi], rdi
897 mov qword [rax + CPUMCTX.ebp], rbp
898 mov qword [rax + CPUMCTX.r8], r8
899 mov qword [rax + CPUMCTX.r9], r9
900 mov qword [rax + CPUMCTX.r10], r10
901 mov qword [rax + CPUMCTX.r11], r11
902 mov qword [rax + CPUMCTX.r12], r12
903 mov qword [rax + CPUMCTX.r13], r13
904 mov qword [rax + CPUMCTX.r14], r14
905 mov qword [rax + CPUMCTX.r15], r15
906
907 ; Restore general purpose registers.
908 MYPOPAD
909
910 mov eax, VINF_SUCCESS
911
912 popf
913 pop rbp
914 add rsp, 4 * xCB
915 ret
916ENDPROC MY_NAME(SVMR0VMRun64)
917%endif ; RT_ARCH_AMD64
918
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette