VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac@ 47454

Last change on this file since 47454 was 47033, checked in by vboxsync, 12 years ago

VMM/VMMR0: HMR0 fix regression caused by r86900.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 26.2 KB
Line 
1; $Id: HMR0Mixed.mac 47033 2013-07-08 11:29:23Z vboxsync $
2;; @file
3; HMR0Mixed.mac - Stuff that darwin needs to build two versions of.
4;
5; Included by HMR0A.asm with RT_ARCH_AMD64 defined or or undefined.
6;
7
8;
9; Copyright (C) 2006-2013 Oracle Corporation
10;
11; This file is part of VirtualBox Open Source Edition (OSE), as
12; available from http://www.virtualbox.org. This file is free software;
13; you can redistribute it and/or modify it under the terms of the GNU
14; General Public License (GPL) as published by the Free Software
15; Foundation, in version 2 as it comes in the "COPYING" file of the
16; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18;
19
20%ifndef VBOX_WITH_OLD_VTX_CODE
21 %ifdef RT_ARCH_AMD64
22 %define VMX_SKIP_GDTR_IDTR
23 %define VMX_SKIP_TR
24 %endif
25%endif
26
27;/**
28; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
29; *
30; * @returns VBox status code
31; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi vmlauch/vmresume
32; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Guest context
33; * @param pCache x86:[esp+10],msc:r8, gcc:rdx VMCS cache
34; */
35ALIGNCODE(16)
36BEGINPROC MY_NAME(VMXR0StartVM32)
37 push xBP
38 mov xBP, xSP
39
40 pushf
41 cli
42
43 ; Save all general purpose host registers.
44 MYPUSHAD
45
46 ; First we have to save some final CPU context registers.
47 mov eax, VMX_VMCS_HOST_RIP
48%ifdef RT_ARCH_AMD64
49 lea r10, [.vmlaunch_done wrt rip]
50 vmwrite rax, r10
51%else
52 mov ecx, .vmlaunch_done
53 vmwrite eax, ecx
54%endif
55 ; Note: assumes success!
56
57 ; Save the Guest CPU context pointer.
58%ifdef RT_ARCH_AMD64
59 %ifdef ASM_CALL64_GCC
60 ; fResume already in rdi
61 ; pCtx already in rsi
62 mov rbx, rdx ; pCache
63 %else
64 mov rdi, rcx ; fResume
65 mov rsi, rdx ; pCtx
66 mov rbx, r8 ; pCache
67 %endif
68%else
69 mov edi, [ebp + 8] ; fResume
70 mov esi, [ebp + 12] ; pCtx
71 mov ebx, [ebp + 16] ; pCache
72%endif
73
74 ; Save segment registers.
75 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
76 MYPUSHSEGS xAX, ax
77
78%ifdef VMX_USE_CACHED_VMCS_ACCESSES
79 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
80 cmp ecx, 0
81 je .no_cached_writes
82 mov edx, ecx
83 mov ecx, 0
84 jmp .cached_write
85
86ALIGN(16)
87.cached_write:
88 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
89 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
90 inc xCX
91 cmp xCX, xDX
92 jl .cached_write
93
94 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
95.no_cached_writes:
96
97 ; Save the pCache pointer.
98 push xBX
99%endif
100
101 ; Save the pCtx pointer.
102 push xSI
103
104 ; Save LDTR.
105 xor eax, eax
106 sldt ax
107 push xAX
108
109%ifndef VMX_SKIP_TR
110 ; The TR limit is reset to 0x67; restore it manually.
111 str eax
112 push xAX
113%endif
114
115%ifndef VMX_SKIP_GDTR_IDTR
116 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
117 sub xSP, xCB * 2
118 sgdt [xSP]
119
120 sub xSP, xCB * 2
121 sidt [xSP]
122%endif
123
124 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
125 mov xBX, [xSI + CPUMCTX.cr2]
126 mov xDX, cr2
127 cmp xBX, xDX
128 je .skipcr2write32
129 mov cr2, xBX
130
131.skipcr2write32:
132 mov eax, VMX_VMCS_HOST_RSP
133 vmwrite xAX, xSP
134 ; Note: assumes success!
135 ; Don't mess with ESP anymore!!!
136
137 ; Load Guest's general purpose registers.
138 mov eax, [xSI + CPUMCTX.eax]
139 mov ebx, [xSI + CPUMCTX.ebx]
140 mov ecx, [xSI + CPUMCTX.ecx]
141 mov edx, [xSI + CPUMCTX.edx]
142 mov ebp, [xSI + CPUMCTX.ebp]
143
144 ; Resume or start?
145 cmp xDI, 0 ; fResume
146 je .vmlaunch_launch
147
148 ; Restore edi & esi.
149 mov edi, [xSI + CPUMCTX.edi]
150 mov esi, [xSI + CPUMCTX.esi]
151
152 vmresume
153 jmp .vmlaunch_done; ; Here if vmresume detected a failure.
154
155.vmlaunch_launch:
156 ; Restore edi & esi.
157 mov edi, [xSI + CPUMCTX.edi]
158 mov esi, [xSI + CPUMCTX.esi]
159
160 vmlaunch
161 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
162
163ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
164.vmlaunch_done:
165 jc near .vmxstart_invalid_vmcs_ptr
166 jz near .vmxstart_start_failed
167
168 ; Restore base and limit of the IDTR & GDTR.
169%ifndef VMX_SKIP_GDTR_IDTR
170 lidt [xSP]
171 add xSP, xCB * 2
172 lgdt [xSP]
173 add xSP, xCB * 2
174%endif
175
176 push xDI
177%ifndef VMX_SKIP_TR
178 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR).
179%else
180 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR).
181%endif
182
183 mov [ss:xDI + CPUMCTX.eax], eax
184 mov [ss:xDI + CPUMCTX.ebx], ebx
185 mov [ss:xDI + CPUMCTX.ecx], ecx
186 mov [ss:xDI + CPUMCTX.edx], edx
187 mov [ss:xDI + CPUMCTX.esi], esi
188 mov [ss:xDI + CPUMCTX.ebp], ebp
189%ifndef VBOX_WITH_OLD_VTX_CODE
190 mov xAX, cr2
191 mov [ss:xDI + CPUMCTX.cr2], xAX
192%endif
193
194%ifdef RT_ARCH_AMD64
195 pop xAX ; The guest edi we pushed above.
196 mov dword [ss:xDI + CPUMCTX.edi], eax
197%else
198 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
199%endif
200
201%ifndef VMX_SKIP_TR
202 ; Restore TSS selector; must mark it as not busy before using ltr (!)
203 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
204 ; @todo get rid of sgdt
205 pop xBX ; Saved TR
206 sub xSP, xCB * 2
207 sgdt [xSP]
208 mov xAX, xBX
209 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
210 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
211 and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
212 ltr bx
213 add xSP, xCB * 2
214%endif
215
216 pop xAX ; Saved LDTR
217%ifdef RT_ARCH_AMD64
218 cmp xAX, 0
219 je .skipldtwrite32
220%endif
221 lldt ax
222
223.skipldtwrite32:
224 add xSP, xCB ; pCtx
225
226%ifdef VMX_USE_CACHED_VMCS_ACCESSES
227 pop xDX ; Saved pCache
228
229 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
230 cmp ecx, 0 ; Can't happen
231 je .no_cached_reads
232 jmp .cached_read
233
234ALIGN(16)
235.cached_read:
236 dec xCX
237 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX * 4]
238 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
239 cmp xCX, 0
240 jnz .cached_read
241.no_cached_reads:
242
243%ifdef VBOX_WITH_OLD_VTX_CODE
244 ; Restore CR2 into VMCS-cache field (for EPT).
245 mov xAX, cr2
246 mov [ss:xDX + VMCSCACHE.cr2], xAX
247%endif
248%endif
249
250 ; Restore segment registers.
251 MYPOPSEGS xAX, ax
252
253 ; Restore general purpose registers.
254 MYPOPAD
255
256 mov eax, VINF_SUCCESS
257
258.vmstart_end:
259 popf
260 pop xBP
261 ret
262
263
264.vmxstart_invalid_vmcs_ptr:
265 ; Restore base and limit of the IDTR & GDTR
266%ifndef VMX_SKIP_GDTR_IDTR
267 lidt [xSP]
268 add xSP, xCB * 2
269 lgdt [xSP]
270 add xSP, xCB * 2
271%endif
272
273%ifndef VMX_SKIP_TR
274 ; Restore TSS selector; must mark it as not busy before using ltr (!)
275 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
276 ; @todo get rid of sgdt
277 pop xBX ; Saved TR
278 sub xSP, xCB * 2
279 sgdt [xSP]
280 mov xAX, xBX
281 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
282 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
283 and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
284 ltr bx
285 add xSP, xCB * 2
286%endif
287
288 pop xAX ; Saved LDTR
289 lldt ax ; Don't bother with conditional restoration in the error case.
290
291%ifdef VMX_USE_CACHED_VMCS_ACCESSES
292 add xSP, xCB * 2 ; pCtx + pCache
293%else
294 add xSP, xCB ; pCtx
295%endif
296
297 ; Restore segment registers.
298 MYPOPSEGS xAX, ax
299
300 ; Restore all general purpose host registers.
301 MYPOPAD
302 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
303 jmp .vmstart_end
304
305.vmxstart_start_failed:
306 ; Restore base and limit of the IDTR & GDTR.
307%ifndef VMX_SKIP_GDTR_IDTR
308 lidt [xSP]
309 add xSP, xCB * 2
310 lgdt [xSP]
311 add xSP, xCB * 2
312%endif
313
314%ifndef VMX_SKIP_TR
315 ; Restore TSS selector; must mark it as not busy before using ltr (!)
316 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
317 ; @todo get rid of sgdt
318 pop xBX ; Saved TR
319 sub xSP, xCB * 2
320 sgdt [xSP]
321 mov xAX, xBX
322 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
323 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
324 and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
325 ltr bx
326 add xSP, xCB * 2
327%endif
328
329 pop xAX ; Saved LDTR
330 lldt ax ; Don't bother with conditional restoration in the error case
331
332%ifdef VMX_USE_CACHED_VMCS_ACCESSES
333 add xSP, xCB * 2 ; pCtx + pCache
334%else
335 add xSP, xCB ; pCtx
336%endif
337
338 ; Restore segment registers.
339 MYPOPSEGS xAX, ax
340
341 ; Restore all general purpose host registers.
342 MYPOPAD
343 mov eax, VERR_VMX_UNABLE_TO_START_VM
344 jmp .vmstart_end
345
346ENDPROC MY_NAME(VMXR0StartVM32)
347
348%ifdef RT_ARCH_AMD64
349;/**
350; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
351; *
352; * @returns VBox status code
353; * @param fResume msc:rcx, gcc:rdi vmlauch/vmresume
354; * @param pCtx msc:rdx, gcc:rsi Guest context
355; * @param pCache msc:r8, gcc:rdx VMCS cache
356; */
357ALIGNCODE(16)
358BEGINPROC MY_NAME(VMXR0StartVM64)
359 push xBP
360 mov xBP, xSP
361
362 pushf
363 cli
364
365 ; Save all general purpose host registers.
366 MYPUSHAD
367
368 ; First we have to save some final CPU context registers.
369 lea r10, [.vmlaunch64_done wrt rip]
370 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
371 vmwrite rax, r10
372 ; Note: assumes success!
373
374 ; Save the Guest CPU context pointer.
375%ifdef ASM_CALL64_GCC
376 ; fResume already in rdi
377 ; pCtx already in rsi
378 mov rbx, rdx ; pCache
379%else
380 mov rdi, rcx ; fResume
381 mov rsi, rdx ; pCtx
382 mov rbx, r8 ; pCache
383%endif
384
385 ; Save segment registers.
386 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
387 MYPUSHSEGS xAX, ax
388
389%ifdef VMX_USE_CACHED_VMCS_ACCESSES
390 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
391 cmp ecx, 0
392 je .no_cached_writes
393 mov edx, ecx
394 mov ecx, 0
395 jmp .cached_write
396
397ALIGN(16)
398.cached_write:
399 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
400 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
401 inc xCX
402 cmp xCX, xDX
403 jl .cached_write
404
405 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
406.no_cached_writes:
407
408 ; Save the pCache pointer.
409 push xBX
410%endif
411
412%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
413 ; Save the host MSRs and load the guest MSRs.
414 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
415 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
416 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
417 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
418%else
419%ifdef VBOX_WITH_OLD_VTX_CODE
420 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
421 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
422%endif
423%endif
424
425 ; Save the pCtx pointer.
426 push xSI
427
428 ; Save LDTR.
429 xor eax, eax
430 sldt ax
431 push xAX
432
433%ifndef VMX_SKIP_TR
434 ; The TR limit is reset to 0x67; restore it manually.
435 str eax
436 push xAX
437%endif
438
439 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
440%ifndef VMX_SKIP_GDTR_IDTR
441 sub xSP, xCB * 2
442 sgdt [xSP]
443
444 sub xSP, xCB * 2
445 sidt [xSP]
446%endif
447
448 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
449 mov rbx, qword [xSI + CPUMCTX.cr2]
450 mov rdx, cr2
451 cmp rbx, rdx
452 je .skipcr2write
453 mov cr2, rbx
454
455.skipcr2write:
456 mov eax, VMX_VMCS_HOST_RSP
457 vmwrite xAX, xSP
458 ; Note: assumes success!
459 ; Don't mess with ESP anymore!!!
460
461 ; Restore Guest's general purpose registers.
462 mov rax, qword [xSI + CPUMCTX.eax]
463 mov rbx, qword [xSI + CPUMCTX.ebx]
464 mov rcx, qword [xSI + CPUMCTX.ecx]
465 mov rdx, qword [xSI + CPUMCTX.edx]
466 mov rbp, qword [xSI + CPUMCTX.ebp]
467 mov r8, qword [xSI + CPUMCTX.r8]
468 mov r9, qword [xSI + CPUMCTX.r9]
469 mov r10, qword [xSI + CPUMCTX.r10]
470 mov r11, qword [xSI + CPUMCTX.r11]
471 mov r12, qword [xSI + CPUMCTX.r12]
472 mov r13, qword [xSI + CPUMCTX.r13]
473 mov r14, qword [xSI + CPUMCTX.r14]
474 mov r15, qword [xSI + CPUMCTX.r15]
475
476 ; Resume or start?
477 cmp xDI, 0 ; fResume
478 je .vmlaunch64_launch
479
480 ; Restore edi & esi.
481 mov rdi, qword [xSI + CPUMCTX.edi]
482 mov rsi, qword [xSI + CPUMCTX.esi]
483
484 vmresume
485 jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
486
487.vmlaunch64_launch:
488 ; Restore rdi & rsi.
489 mov rdi, qword [xSI + CPUMCTX.edi]
490 mov rsi, qword [xSI + CPUMCTX.esi]
491
492 vmlaunch
493 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
494
495ALIGNCODE(16)
496.vmlaunch64_done:
497 jc near .vmxstart64_invalid_vmcs_ptr
498 jz near .vmxstart64_start_failed
499
500 ; Restore base and limit of the IDTR & GDTR
501%ifndef VMX_SKIP_GDTR_IDTR
502 lidt [xSP]
503 add xSP, xCB * 2
504 lgdt [xSP]
505 add xSP, xCB * 2
506%endif
507
508 push xDI
509%ifndef VMX_SKIP_TR
510 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, LDTR, TR)
511%else
512 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)
513%endif
514
515 mov qword [xDI + CPUMCTX.eax], rax
516 mov qword [xDI + CPUMCTX.ebx], rbx
517 mov qword [xDI + CPUMCTX.ecx], rcx
518 mov qword [xDI + CPUMCTX.edx], rdx
519 mov qword [xDI + CPUMCTX.esi], rsi
520 mov qword [xDI + CPUMCTX.ebp], rbp
521 mov qword [xDI + CPUMCTX.r8], r8
522 mov qword [xDI + CPUMCTX.r9], r9
523 mov qword [xDI + CPUMCTX.r10], r10
524 mov qword [xDI + CPUMCTX.r11], r11
525 mov qword [xDI + CPUMCTX.r12], r12
526 mov qword [xDI + CPUMCTX.r13], r13
527 mov qword [xDI + CPUMCTX.r14], r14
528 mov qword [xDI + CPUMCTX.r15], r15
529%ifndef VBOX_WITH_OLD_VTX_CODE
530 mov rax, cr2
531 mov qword [xDI + CPUMCTX.cr2], rax
532%endif
533
534 pop xAX ; The guest edi we pushed above
535 mov qword [xDI + CPUMCTX.edi], rax
536
537%ifndef VMX_SKIP_TR
538 ; Restore TSS selector; must mark it as not busy before using ltr (!)
539 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
540 ; @todo get rid of sgdt
541 pop xBX ; Saved TR
542 sub xSP, xCB * 2
543 sgdt [xSP]
544 mov xAX, xBX
545 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
546 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
547 and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
548 ltr bx
549 add xSP, xCB * 2
550%endif
551
552 pop xAX ; Saved LDTR
553 cmp xAX, 0
554 je .skipldtwrite64
555 lldt ax
556
557.skipldtwrite64:
558 pop xSI ; pCtx (needed in rsi by the macros below)
559
560%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
561 ; Save the guest MSRs and load the host MSRs.
562 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
563 LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
564 LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
565 LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
566%else
567%ifdef VBOX_WITH_OLD_VTX_CODE
568 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
569 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
570%endif
571%endif
572
573%ifdef VMX_USE_CACHED_VMCS_ACCESSES
574 pop xDX ; Saved pCache
575
576 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
577 cmp ecx, 0 ; Can't happen
578 je .no_cached_reads
579 jmp .cached_read
580
581ALIGN(16)
582.cached_read:
583 dec xCX
584 mov eax, [xDX + VMCSCACHE.Read.aField + xCX * 4]
585 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
586 cmp xCX, 0
587 jnz .cached_read
588.no_cached_reads:
589
590%ifdef VBOX_WITH_OLD_VTX_CODE
591 ; Restore CR2 into VMCS-cache field (for EPT).
592 mov xAX, cr2
593 mov [xDX + VMCSCACHE.cr2], xAX
594%endif
595%endif
596
597 ; Restore segment registers.
598 MYPOPSEGS xAX, ax
599
600 ; Restore general purpose registers.
601 MYPOPAD
602
603 mov eax, VINF_SUCCESS
604
605.vmstart64_end:
606 popf
607 pop xBP
608 ret
609
610
611.vmxstart64_invalid_vmcs_ptr:
612 ; Restore base and limit of the IDTR & GDTR.
613%ifndef VMX_SKIP_GDTR_IDTR
614 lidt [xSP]
615 add xSP, xCB * 2
616 lgdt [xSP]
617 add xSP, xCB * 2
618%endif
619
620%ifndef VMX_SKIP_TR
621 ; Restore TSS selector; must mark it as not busy before using ltr (!)
622 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
623 ; @todo get rid of sgdt
624 pop xBX ; Saved TR
625 sub xSP, xCB * 2
626 sgdt [xSP]
627 mov xAX, xBX
628 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
629 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
630 and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
631 ltr bx
632 add xSP, xCB * 2
633%endif
634
635 pop xAX ; Saved LDTR
636 lldt ax ; Don't bother with conditional restoration in the error case.
637
638 pop xSI ; pCtx (needed in rsi by the macros below)
639
640%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
641 ; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed.
642 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
643 LOADHOSTMSR MSR_K8_SF_MASK
644 LOADHOSTMSR MSR_K6_STAR
645 LOADHOSTMSR MSR_K8_LSTAR
646%else
647%ifdef VBOX_WITH_OLD_VTX_CODE
648 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
649 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
650%endif
651%endif
652
653%ifdef VMX_USE_CACHED_VMCS_ACCESSES
654 add xSP, xCB ; pCache
655%endif
656
657 ; Restore segment registers.
658 MYPOPSEGS xAX, ax
659
660 ; Restore all general purpose host registers.
661 MYPOPAD
662 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
663 jmp .vmstart64_end
664
665.vmxstart64_start_failed:
666 ; Restore base and limit of the IDTR & GDTR.
667%ifndef VMX_SKIP_GDTR_IDTR
668 lidt [xSP]
669 add xSP, xCB * 2
670 lgdt [xSP]
671 add xSP, xCB * 2
672%endif
673
674%ifndef VMX_SKIP_TR
675 ; Restore TSS selector; must mark it as not busy before using ltr (!)
676 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
677 ; @todo get rid of sgdt
678 pop xBX ; Saved TR
679 sub xSP, xCB * 2
680 sgdt [xSP]
681 mov xAX, xBX
682 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
683 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
684 and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
685 ltr bx
686 add xSP, xCB * 2
687%endif
688
689 pop xAX ; Saved LDTR
690 lldt ax ; Don't bother with conditional restoration in the error case.
691
692 pop xSI ; pCtx (needed in rsi by the macros below).
693
694%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
695 ; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed.
696 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
697 LOADHOSTMSR MSR_K8_SF_MASK
698 LOADHOSTMSR MSR_K6_STAR
699 LOADHOSTMSR MSR_K8_LSTAR
700%else
701%ifdef VBOX_WITH_OLD_VTX_CODE
702 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
703 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
704%endif
705%endif
706
707%ifdef VMX_USE_CACHED_VMCS_ACCESSES
708 add xSP, xCB ; pCache
709%endif
710
711 ; Restore segment registers.
712 MYPOPSEGS xAX, ax
713
714 ; Restore all general purpose host registers.
715 MYPOPAD
716 mov eax, VERR_VMX_UNABLE_TO_START_VM
717 jmp .vmstart64_end
718ENDPROC MY_NAME(VMXR0StartVM64)
719%endif ; RT_ARCH_AMD64
720
721
722;/**
723; * Prepares for and executes VMRUN (32 bits guests)
724; *
725; * @returns VBox status code
726; * @param HCPhysVMCB Physical address of host VMCB
727; * @param HCPhysVMCB Physical address of guest VMCB
728; * @param pCtx Guest context
729; */
730ALIGNCODE(16)
731BEGINPROC MY_NAME(SVMR0VMRun)
732%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
733 %ifdef ASM_CALL64_GCC
734 push rdx
735 push rsi
736 push rdi
737 %else
738 push r8
739 push rdx
740 push rcx
741 %endif
742 push 0
743%endif
744 push xBP
745 mov xBP, xSP
746 pushf
747
748 ; Save all general purpose host registers.
749 MYPUSHAD
750
751 ; Save the Guest CPU context pointer.
752 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
753 push xSI ; push for saving the state at the end
754
755 ; Save host fs, gs, sysenter msr etc.
756 mov xAX, [xBP + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
757 push xAX ; save for the vmload after vmrun
758 vmsave
759
760 ; Setup eax for VMLOAD.
761 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
762
763 ; Restore Guest's general purpose registers.
764 ; eax is loaded from the VMCB by VMRUN.
765 mov ebx, [xSI + CPUMCTX.ebx]
766 mov ecx, [xSI + CPUMCTX.ecx]
767 mov edx, [xSI + CPUMCTX.edx]
768 mov edi, [xSI + CPUMCTX.edi]
769 mov ebp, [xSI + CPUMCTX.ebp]
770 mov esi, [xSI + CPUMCTX.esi]
771
772 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
773 clgi
774 sti
775
776 ; Load guest fs, gs, sysenter msr etc.
777 vmload
778 ; Run the VM.
779 vmrun
780
781 ; eax is in the VMCB already; we can use it here.
782
783 ; Save guest fs, gs, sysenter msr etc.
784 vmsave
785
786 ; Load host fs, gs, sysenter msr etc.
787 pop xAX ; Pushed above
788 vmload
789
790 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
791 cli
792 stgi
793
794 pop xAX ; pCtx
795
796 mov [ss:xAX + CPUMCTX.ebx], ebx
797 mov [ss:xAX + CPUMCTX.ecx], ecx
798 mov [ss:xAX + CPUMCTX.edx], edx
799 mov [ss:xAX + CPUMCTX.esi], esi
800 mov [ss:xAX + CPUMCTX.edi], edi
801 mov [ss:xAX + CPUMCTX.ebp], ebp
802
803 ; Restore general purpose registers.
804 MYPOPAD
805
806 mov eax, VINF_SUCCESS
807
808 popf
809 pop xBP
810%ifdef RT_ARCH_AMD64
811 add xSP, 4*xCB
812%endif
813 ret
814ENDPROC MY_NAME(SVMR0VMRun)
815
816%ifdef RT_ARCH_AMD64
817;/**
818; * Prepares for and executes VMRUN (64 bits guests)
819; *
820; * @returns VBox status code
821; * @param HCPhysVMCB Physical address of host VMCB
822; * @param HCPhysVMCB Physical address of guest VMCB
823; * @param pCtx Guest context
824; */
825ALIGNCODE(16)
826BEGINPROC MY_NAME(SVMR0VMRun64)
827 ; Fake a cdecl stack frame
828 %ifdef ASM_CALL64_GCC
829 push rdx
830 push rsi
831 push rdi
832 %else
833 push r8
834 push rdx
835 push rcx
836 %endif
837 push 0
838 push rbp
839 mov rbp, rsp
840 pushf
841
842 ; Manual save and restore:
843 ; - General purpose registers except RIP, RSP, RAX
844 ;
845 ; Trashed:
846 ; - CR2 (we don't care)
847 ; - LDTR (reset to 0)
848 ; - DRx (presumably not changed at all)
849 ; - DR7 (reset to 0x400)
850 ;
851
852 ; Save all general purpose host registers.
853 MYPUSHAD
854
855 ; Save the Guest CPU context pointer.
856 mov rsi, [rbp + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
857 push rsi ; push for saving the state at the end
858
859 ; Save host fs, gs, sysenter msr etc.
860 mov rax, [rbp + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
861 push rax ; Save for the vmload after vmrun
862 vmsave
863
864 ; Setup eax for VMLOAD.
865 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
866
867 ; Restore Guest's general purpose registers.
868 ; rax is loaded from the VMCB by VMRUN.
869 mov rbx, qword [xSI + CPUMCTX.ebx]
870 mov rcx, qword [xSI + CPUMCTX.ecx]
871 mov rdx, qword [xSI + CPUMCTX.edx]
872 mov rdi, qword [xSI + CPUMCTX.edi]
873 mov rbp, qword [xSI + CPUMCTX.ebp]
874 mov r8, qword [xSI + CPUMCTX.r8]
875 mov r9, qword [xSI + CPUMCTX.r9]
876 mov r10, qword [xSI + CPUMCTX.r10]
877 mov r11, qword [xSI + CPUMCTX.r11]
878 mov r12, qword [xSI + CPUMCTX.r12]
879 mov r13, qword [xSI + CPUMCTX.r13]
880 mov r14, qword [xSI + CPUMCTX.r14]
881 mov r15, qword [xSI + CPUMCTX.r15]
882 mov rsi, qword [xSI + CPUMCTX.esi]
883
884 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
885 clgi
886 sti
887
888 ; Load guest fs, gs, sysenter msr etc.
889 vmload
890 ; Run the VM.
891 vmrun
892
893 ; rax is in the VMCB already; we can use it here.
894
895 ; Save guest fs, gs, sysenter msr etc.
896 vmsave
897
898 ; Load host fs, gs, sysenter msr etc.
899 pop rax ; pushed above
900 vmload
901
902 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
903 cli
904 stgi
905
906 pop rax ; pCtx
907
908 mov qword [rax + CPUMCTX.ebx], rbx
909 mov qword [rax + CPUMCTX.ecx], rcx
910 mov qword [rax + CPUMCTX.edx], rdx
911 mov qword [rax + CPUMCTX.esi], rsi
912 mov qword [rax + CPUMCTX.edi], rdi
913 mov qword [rax + CPUMCTX.ebp], rbp
914 mov qword [rax + CPUMCTX.r8], r8
915 mov qword [rax + CPUMCTX.r9], r9
916 mov qword [rax + CPUMCTX.r10], r10
917 mov qword [rax + CPUMCTX.r11], r11
918 mov qword [rax + CPUMCTX.r12], r12
919 mov qword [rax + CPUMCTX.r13], r13
920 mov qword [rax + CPUMCTX.r14], r14
921 mov qword [rax + CPUMCTX.r15], r15
922
923 ; Restore general purpose registers.
924 MYPOPAD
925
926 mov eax, VINF_SUCCESS
927
928 popf
929 pop rbp
930 add rsp, 4 * xCB
931 ret
932ENDPROC MY_NAME(SVMR0VMRun64)
933%endif ; RT_ARCH_AMD64
934
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette