VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac@ 46191

Last change on this file since 46191 was 46099, checked in by vboxsync, 12 years ago

VMM/VMMR0: Tidying of the assembly code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 26.0 KB
Line 
1; $Id: HMR0Mixed.mac 46099 2013-05-15 14:23:49Z vboxsync $
2;; @file
3; HMR0Mixed.mac - Stuff that darwin needs to build two versions of.
4;
5; Included by HMR0A.asm with RT_ARCH_AMD64 defined or or undefined.
6;
7
8;
9; Copyright (C) 2006-2012 Oracle Corporation
10;
11; This file is part of VirtualBox Open Source Edition (OSE), as
12; available from http://www.virtualbox.org. This file is free software;
13; you can redistribute it and/or modify it under the terms of the GNU
14; General Public License (GPL) as published by the Free Software
15; Foundation, in version 2 as it comes in the "COPYING" file of the
16; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18;
19
20
21;/**
22; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
23; *
24; * @returns VBox status code
25; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi vmlauch/vmresume
26; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Guest context
27; * @param pCache x86:[esp+10],msc:r8, gcc:rdx VMCS cache
28; */
29ALIGNCODE(16)
30BEGINPROC MY_NAME(VMXR0StartVM32)
31 push xBP
32 mov xBP, xSP
33
34 pushf
35 cli
36
37 ; Save all general purpose host registers.
38 MYPUSHAD
39
40 ; First we have to save some final CPU context registers.
41 mov eax, VMX_VMCS_HOST_RIP
42%ifdef RT_ARCH_AMD64
43 lea r10, [.vmlaunch_done wrt rip]
44 vmwrite rax, r10
45%else
46 mov ecx, .vmlaunch_done
47 vmwrite eax, ecx
48%endif
49 ; Note: assumes success!
50
51 ; Manual save and restore:
52 ; - General purpose registers except RIP, RSP.
53 ;
54 ; Trashed:
55 ; - CR2 (we don't care).
56 ; - LDTR (reset to 0).
57 ; - DRx (presumably not changed at all).
58 ; - DR7 (reset to 0x400).
59 ; - EFLAGS (reset to RT_BIT(1); not relevant).
60
61 ; Save the Guest CPU context pointer.
62%ifdef RT_ARCH_AMD64
63 %ifdef ASM_CALL64_GCC
64 ; fResume already in rdi
65 ; pCtx already in rsi
66 mov rbx, rdx ; pCache
67 %else
68 mov rdi, rcx ; fResume
69 mov rsi, rdx ; pCtx
70 mov rbx, r8 ; pCache
71 %endif
72%else
73 mov edi, [ebp + 8] ; fResume
74 mov esi, [ebp + 12] ; pCtx
75 mov ebx, [ebp + 16] ; pCache
76%endif
77
78 ; Save segment registers.
79 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
80 MYPUSHSEGS xAX, ax
81
82%ifdef VMX_USE_CACHED_VMCS_ACCESSES
83 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
84 cmp ecx, 0
85 je .no_cached_writes
86 mov edx, ecx
87 mov ecx, 0
88 jmp .cached_write
89
90ALIGN(16)
91.cached_write:
92 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
93 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
94 inc xCX
95 cmp xCX, xDX
96 jl .cached_write
97
98 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
99.no_cached_writes:
100
101 ; Save the pCache pointer.
102 push xBX
103%endif
104
105 ; Save the pCtx pointer.
106 push xSI
107
108 ; Save LDTR.
109 xor eax, eax
110 sldt ax
111 push xAX
112
113 ; The TR limit is reset to 0x67; restore it manually.
114 str eax
115 push xAX
116
117 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
118 sub xSP, xS*2
119 sgdt [xSP]
120
121 sub xSP, xS*2
122 sidt [xSP]
123
124%ifdef VBOX_WITH_DR6_EXPERIMENT
125 ; Load DR6 - experiment, not safe!
126 mov xBX, [xSI + CPUMCTX.dr6]
127 mov dr6, xBX
128%endif
129
130 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
131 mov xBX, [xSI + CPUMCTX.cr2]
132 mov xDX, cr2
133 cmp xBX, xDX
134 je .skipcr2write32
135 mov cr2, xBX
136
137.skipcr2write32:
138 mov eax, VMX_VMCS_HOST_RSP
139 vmwrite xAX, xSP
140 ; Note: assumes success!
141 ; Don't mess with ESP anymore!!!
142
143 ; Load Guest's general purpose registers.
144 mov eax, [xSI + CPUMCTX.eax]
145 mov ebx, [xSI + CPUMCTX.ebx]
146 mov ecx, [xSI + CPUMCTX.ecx]
147 mov edx, [xSI + CPUMCTX.edx]
148 mov ebp, [xSI + CPUMCTX.ebp]
149
150 ; Resume or start?
151 cmp xDI, 0 ; fResume
152 je .vmlaunch_launch
153
154 ; Restore edi & esi.
155 mov edi, [xSI + CPUMCTX.edi]
156 mov esi, [xSI + CPUMCTX.esi]
157
158 vmresume
159 jmp .vmlaunch_done; ; Here if vmresume detected a failure.
160
161.vmlaunch_launch:
162 ; Restore edi & esi.
163 mov edi, [xSI + CPUMCTX.edi]
164 mov esi, [xSI + CPUMCTX.esi]
165
166 vmlaunch
167 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
168
169ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
170.vmlaunch_done:
171 jc near .vmxstart_invalid_vmcs_ptr
172 jz near .vmxstart_start_failed
173
174 ; Restore base and limit of the IDTR & GDTR.
175 lidt [xSP]
176 add xSP, xS*2
177 lgdt [xSP]
178 add xSP, xS*2
179
180 push xDI
181 mov xDI, [xSP + xS * 3] ; pCtx (*3 to skip the saved LDTR + TR).
182
183 mov [ss:xDI + CPUMCTX.eax], eax
184 mov [ss:xDI + CPUMCTX.ebx], ebx
185 mov [ss:xDI + CPUMCTX.ecx], ecx
186 mov [ss:xDI + CPUMCTX.edx], edx
187 mov [ss:xDI + CPUMCTX.esi], esi
188 mov [ss:xDI + CPUMCTX.ebp], ebp
189%ifndef VBOX_WITH_OLD_VTX_CODE
190 mov xAX, cr2
191 mov [ss:xDI + CPUMCTX.cr2], xAX
192%endif
193
194%ifdef RT_ARCH_AMD64
195 pop xAX ; The guest edi we pushed above.
196 mov dword [ss:xDI + CPUMCTX.edi], eax
197%else
198 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
199%endif
200
201%ifdef VBOX_WITH_DR6_EXPERIMENT
202 ; Restore DR6 - experiment, not safe!
203 mov xAX, dr6
204 mov [ss:xDI + CPUMCTX.dr6], xAX
205%endif
206
207 ; Restore TSS selector; must mark it as not busy before using ltr (!)
208 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
209 ; @todo get rid of sgdt
210 pop xBX ; Saved TR
211 sub xSP, xS*2
212 sgdt [xSP]
213 mov xAX, xBX
214 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
215 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
216 and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
217 ltr bx
218 add xSP, xS*2
219
220 pop xAX ; Saved LDTR
221 lldt ax
222
223 add xSP, xS ; pCtx
224
225%ifdef VMX_USE_CACHED_VMCS_ACCESSES
226 pop xDX ; Saved pCache
227
228 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
229 cmp ecx, 0 ; Can't happen
230 je .no_cached_reads
231 jmp .cached_read
232
233ALIGN(16)
234.cached_read:
235 dec xCX
236 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX*4]
237 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
238 cmp xCX, 0
239 jnz .cached_read
240.no_cached_reads:
241
242%ifdef VBOX_WITH_OLD_VTX_CODE
243 ; Restore CR2 into VMCS-cache field (for EPT).
244 mov xAX, cr2
245 mov [ss:xDX + VMCSCACHE.cr2], xAX
246%endif
247%endif
248
249 ; Restore segment registers.
250 MYPOPSEGS xAX, ax
251
252 ; Restore general purpose registers.
253 MYPOPAD
254
255 mov eax, VINF_SUCCESS
256
257.vmstart_end:
258 popf
259 pop xBP
260 ret
261
262
263.vmxstart_invalid_vmcs_ptr:
264 ; Restore base and limit of the IDTR & GDTR
265 lidt [xSP]
266 add xSP, xS*2
267 lgdt [xSP]
268 add xSP, xS*2
269
270 ; Restore TSS selector; must mark it as not busy before using ltr (!)
271 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
272 ; @todo get rid of sgdt
273 pop xBX ; Saved TR
274 sub xSP, xS*2
275 sgdt [xSP]
276 mov xAX, xBX
277 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
278 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
279 and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
280 ltr bx
281 add xSP, xS*2
282
283 pop xAX ; Saved LDTR
284 lldt ax
285
286%ifdef VMX_USE_CACHED_VMCS_ACCESSES
287 add xSP, xS*2 ; pCtx + pCache
288%else
289 add xSP, xS ; pCtx
290%endif
291
292 ; Restore segment registers.
293 MYPOPSEGS xAX, ax
294
295 ; Restore all general purpose host registers.
296 MYPOPAD
297 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
298 jmp .vmstart_end
299
300.vmxstart_start_failed:
301 ; Restore base and limit of the IDTR & GDTR.
302 lidt [xSP]
303 add xSP, xS*2
304 lgdt [xSP]
305 add xSP, xS*2
306
307 ; Restore TSS selector; must mark it as not busy before using ltr (!)
308 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
309 ; @todo get rid of sgdt
310 pop xBX ; Saved TR
311 sub xSP, xS*2
312 sgdt [xSP]
313 mov xAX, xBX
314 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
315 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
316 and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
317 ltr bx
318 add xSP, xS*2
319
320 pop xAX ; Saved LDTR
321 lldt ax
322
323%ifdef VMX_USE_CACHED_VMCS_ACCESSES
324 add xSP, xS*2 ; pCtx + pCache
325%else
326 add xSP, xS ; pCtx
327%endif
328
329 ; Restore segment registers.
330 MYPOPSEGS xAX, ax
331
332 ; Restore all general purpose host registers.
333 MYPOPAD
334 mov eax, VERR_VMX_UNABLE_TO_START_VM
335 jmp .vmstart_end
336
337ENDPROC MY_NAME(VMXR0StartVM32)
338
339%ifdef RT_ARCH_AMD64
340;/**
341; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
342; *
343; * @returns VBox status code
344; * @param fResume msc:rcx, gcc:rdi vmlauch/vmresume
345; * @param pCtx msc:rdx, gcc:rsi Guest context
346; * @param pCache msc:r8, gcc:rdx VMCS cache
347; */
348ALIGNCODE(16)
349BEGINPROC MY_NAME(VMXR0StartVM64)
350 push xBP
351 mov xBP, xSP
352
353 pushf
354 cli
355
356 ; Save all general purpose host registers.
357 MYPUSHAD
358
359 ; First we have to save some final CPU context registers.
360 lea r10, [.vmlaunch64_done wrt rip]
361 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
362 vmwrite rax, r10
363 ; Note: assumes success!
364
365 ; Manual save and restore:
366 ; - General purpose registers except RIP, RSP.
367 ;
368 ; Trashed:
369 ; - CR2 (we don't care).
370 ; - LDTR (reset to 0).
371 ; - DRx (presumably not changed at all).
372 ; - DR7 (reset to 0x400).
373 ; - EFLAGS (reset to RT_BIT(1); not relevant).
374
375 ; Save the Guest CPU context pointer.
376%ifdef ASM_CALL64_GCC
377 ; fResume already in rdi
378 ; pCtx already in rsi
379 mov rbx, rdx ; pCache
380%else
381 mov rdi, rcx ; fResume
382 mov rsi, rdx ; pCtx
383 mov rbx, r8 ; pCache
384%endif
385
386 ; Save segment registers.
387 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
388 MYPUSHSEGS xAX, ax
389
390%ifdef VMX_USE_CACHED_VMCS_ACCESSES
391 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
392 cmp ecx, 0
393 je .no_cached_writes
394 mov edx, ecx
395 mov ecx, 0
396 jmp .cached_write
397
398ALIGN(16)
399.cached_write:
400 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
401 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
402 inc xCX
403 cmp xCX, xDX
404 jl .cached_write
405
406 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
407.no_cached_writes:
408
409 ; Save the pCache pointer.
410 push xBX
411%endif
412
413%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
414 ; Save the host MSRs and load the guest MSRs.
415 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
416 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
417 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
418 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
419%else
420%ifdef VBOX_WITH_OLD_VTX_CODE
421 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
422 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
423%endif
424%endif
425
426 ; Save the pCtx pointer.
427 push xSI
428
429 ; Save LDTR.
430 xor eax, eax
431 sldt ax
432 push xAX
433
434 ; The TR limit is reset to 0x67; restore it manually.
435 str eax
436 push xAX
437
438 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
439 sub xSP, xS*2
440 sgdt [xSP]
441
442 sub xSP, xS*2
443 sidt [xSP]
444
445%ifdef VBOX_WITH_DR6_EXPERIMENT
446 ; Load DR6 - experiment, not safe!
447 mov xBX, [xSI + CPUMCTX.dr6]
448 mov dr6, xBX
449%endif
450
451 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
452 mov rbx, qword [xSI + CPUMCTX.cr2]
453 mov rdx, cr2
454 cmp rbx, rdx
455 je .skipcr2write
456 mov cr2, rbx
457
458.skipcr2write:
459 mov eax, VMX_VMCS_HOST_RSP
460 vmwrite xAX, xSP
461 ; Note: assumes success!
462 ; Don't mess with ESP anymore!!!
463
464 ; Restore Guest's general purpose registers.
465 mov rax, qword [xSI + CPUMCTX.eax]
466 mov rbx, qword [xSI + CPUMCTX.ebx]
467 mov rcx, qword [xSI + CPUMCTX.ecx]
468 mov rdx, qword [xSI + CPUMCTX.edx]
469 mov rbp, qword [xSI + CPUMCTX.ebp]
470 mov r8, qword [xSI + CPUMCTX.r8]
471 mov r9, qword [xSI + CPUMCTX.r9]
472 mov r10, qword [xSI + CPUMCTX.r10]
473 mov r11, qword [xSI + CPUMCTX.r11]
474 mov r12, qword [xSI + CPUMCTX.r12]
475 mov r13, qword [xSI + CPUMCTX.r13]
476 mov r14, qword [xSI + CPUMCTX.r14]
477 mov r15, qword [xSI + CPUMCTX.r15]
478
479 ; Resume or start?
480 cmp xDI, 0 ; fResume
481 je .vmlaunch64_launch
482
483 ; Restore edi & esi.
484 mov rdi, qword [xSI + CPUMCTX.edi]
485 mov rsi, qword [xSI + CPUMCTX.esi]
486
487 vmresume
488 jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
489
490.vmlaunch64_launch:
491 ; Restore rdi & rsi.
492 mov rdi, qword [xSI + CPUMCTX.edi]
493 mov rsi, qword [xSI + CPUMCTX.esi]
494
495 vmlaunch
496 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
497
498ALIGNCODE(16)
499.vmlaunch64_done:
500 jc near .vmxstart64_invalid_vmcs_ptr
501 jz near .vmxstart64_start_failed
502
503 ; Restore base and limit of the IDTR & GDTR
504 lidt [xSP]
505 add xSP, xS*2
506 lgdt [xSP]
507 add xSP, xS*2
508
509 push xDI
510 mov xDI, [xSP + xS * 3] ; pCtx (*3 to skip the saved LDTR + TR)
511
512 mov qword [xDI + CPUMCTX.eax], rax
513 mov qword [xDI + CPUMCTX.ebx], rbx
514 mov qword [xDI + CPUMCTX.ecx], rcx
515 mov qword [xDI + CPUMCTX.edx], rdx
516 mov qword [xDI + CPUMCTX.esi], rsi
517 mov qword [xDI + CPUMCTX.ebp], rbp
518 mov qword [xDI + CPUMCTX.r8], r8
519 mov qword [xDI + CPUMCTX.r9], r9
520 mov qword [xDI + CPUMCTX.r10], r10
521 mov qword [xDI + CPUMCTX.r11], r11
522 mov qword [xDI + CPUMCTX.r12], r12
523 mov qword [xDI + CPUMCTX.r13], r13
524 mov qword [xDI + CPUMCTX.r14], r14
525 mov qword [xDI + CPUMCTX.r15], r15
526%ifndef VBOX_WITH_OLD_VTX_CODE
527 mov rax, cr2
528 mov qword [xDI + CPUMCTX.cr2], rax
529%endif
530
531 pop xAX ; The guest edi we pushed above
532 mov qword [xDI + CPUMCTX.edi], rax
533
534%ifdef VBOX_WITH_DR6_EXPERIMENT
535 ; Restore DR6 - experiment, not safe!
536 mov xAX, dr6
537 mov [xDI + CPUMCTX.dr6], xAX
538%endif
539
540 ; Restore TSS selector; must mark it as not busy before using ltr (!)
541 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
542 ; @todo get rid of sgdt
543 pop xBX ; Saved TR
544 sub xSP, xS*2
545 sgdt [xSP]
546 mov xAX, xBX
547 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
548 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
549 and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
550 ltr bx
551 add xSP, xS*2
552
553 pop xAX ; Saved LDTR
554 lldt ax
555
556 pop xSI ; pCtx (needed in rsi by the macros below)
557
558%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
559 ; Save the guest MSRs and load the host MSRs.
560 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
561 LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
562 LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
563 LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
564%else
565%ifdef VBOX_WITH_OLD_VTX_CODE
566 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
567 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
568%endif
569%endif
570
571%ifdef VMX_USE_CACHED_VMCS_ACCESSES
572 pop xDX ; Saved pCache
573
574 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
575 cmp ecx, 0 ; Can't happen
576 je .no_cached_reads
577 jmp .cached_read
578
579ALIGN(16)
580.cached_read:
581 dec xCX
582 mov eax, [xDX + VMCSCACHE.Read.aField + xCX*4]
583 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
584 cmp xCX, 0
585 jnz .cached_read
586.no_cached_reads:
587
588%ifdef VBOX_WITH_OLD_VTX_CODE
589 ; Restore CR2 into VMCS-cache field (for EPT).
590 mov xAX, cr2
591 mov [xDX + VMCSCACHE.cr2], xAX
592%endif
593%endif
594
595 ; Restore segment registers.
596 MYPOPSEGS xAX, ax
597
598 ; Restore general purpose registers.
599 MYPOPAD
600
601 mov eax, VINF_SUCCESS
602
603.vmstart64_end:
604 popf
605 pop xBP
606 ret
607
608
609.vmxstart64_invalid_vmcs_ptr:
610 ; Restore base and limit of the IDTR & GDTR.
611 lidt [xSP]
612 add xSP, xS*2
613 lgdt [xSP]
614 add xSP, xS*2
615
616 ; Restore TSS selector; must mark it as not busy before using ltr (!)
617 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
618 ; @todo get rid of sgdt
619 pop xBX ; Saved TR
620 sub xSP, xS*2
621 sgdt [xSP]
622 mov xAX, xBX
623 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
624 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
625 and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
626 ltr bx
627 add xSP, xS*2
628
629 pop xAX ; Saved LDTR
630 lldt ax
631
632 pop xSI ; pCtx (needed in rsi by the macros below)
633
634%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
635 ; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed.
636 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
637 LOADHOSTMSR MSR_K8_SF_MASK
638 LOADHOSTMSR MSR_K6_STAR
639 LOADHOSTMSR MSR_K8_LSTAR
640%else
641%ifdef VBOX_WITH_OLD_VTX_CODE
642 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
643 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
644%endif
645%endif
646
647%ifdef VMX_USE_CACHED_VMCS_ACCESSES
648 add xSP, xS ; pCache
649%endif
650
651 ; Restore segment registers.
652 MYPOPSEGS xAX, ax
653
654 ; Restore all general purpose host registers.
655 MYPOPAD
656 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
657 jmp .vmstart64_end
658
659.vmxstart64_start_failed:
660 ; Restore base and limit of the IDTR & GDTR.
661 lidt [xSP]
662 add xSP, xS*2
663 lgdt [xSP]
664 add xSP, xS*2
665
666 ; Restore TSS selector; must mark it as not busy before using ltr (!)
667 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
668 ; @todo get rid of sgdt
669 pop xBX ; Saved TR
670 sub xSP, xS*2
671 sgdt [xSP]
672 mov xAX, xBX
673 and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
674 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
675 and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
676 ltr bx
677 add xSP, xS*2
678
679 pop xAX ; Saved LDTR
680 lldt ax
681
682 pop xSI ; pCtx (needed in rsi by the macros below).
683
684%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
685 ; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed.
686 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
687 LOADHOSTMSR MSR_K8_SF_MASK
688 LOADHOSTMSR MSR_K6_STAR
689 LOADHOSTMSR MSR_K8_LSTAR
690%else
691%ifdef VBOX_WITH_OLD_VTX_CODE
692 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
693 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
694%endif
695%endif
696
697%ifdef VMX_USE_CACHED_VMCS_ACCESSES
698 add xSP, xS ; pCache
699%endif
700
701 ; Restore segment registers.
702 MYPOPSEGS xAX, ax
703
704 ; Restore all general purpose host registers.
705 MYPOPAD
706 mov eax, VERR_VMX_UNABLE_TO_START_VM
707 jmp .vmstart64_end
708ENDPROC MY_NAME(VMXR0StartVM64)
709%endif ; RT_ARCH_AMD64
710
711
712;/**
713; * Prepares for and executes VMRUN (32 bits guests)
714; *
715; * @returns VBox status code
716; * @param HCPhysVMCB Physical address of host VMCB
717; * @param HCPhysVMCB Physical address of guest VMCB
718; * @param pCtx Guest context
719; */
720ALIGNCODE(16)
721BEGINPROC MY_NAME(SVMR0VMRun)
722%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
723 %ifdef ASM_CALL64_GCC
724 push rdx
725 push rsi
726 push rdi
727 %else
728 push r8
729 push rdx
730 push rcx
731 %endif
732 push 0
733%endif
734 push xBP
735 mov xBP, xSP
736 pushf
737
738 ; Manual save and restore:
739 ; - General purpose registers except RIP, RSP, RAX
740 ;
741 ; Trashed:
742 ; - CR2 (we don't care)
743 ; - LDTR (reset to 0)
744 ; - DRx (presumably not changed at all)
745 ; - DR7 (reset to 0x400)
746
747 ; Save all general purpose host registers.
748 MYPUSHAD
749
750 ; Save the Guest CPU context pointer.
751 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
752 push xSI ; push for saving the state at the end
753
754 ; Save host fs, gs, sysenter msr etc.
755 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
756 push xAX ; save for the vmload after vmrun
757 vmsave
758
759 ; Setup eax for VMLOAD.
760 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
761
762 ; Restore Guest's general purpose registers.
763 ; eax is loaded from the VMCB by VMRUN.
764 mov ebx, [xSI + CPUMCTX.ebx]
765 mov ecx, [xSI + CPUMCTX.ecx]
766 mov edx, [xSI + CPUMCTX.edx]
767 mov edi, [xSI + CPUMCTX.edi]
768 mov ebp, [xSI + CPUMCTX.ebp]
769 mov esi, [xSI + CPUMCTX.esi]
770
771 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
772 clgi
773 sti
774
775 ; Load guest fs, gs, sysenter msr etc.
776 vmload
777 ; Run the VM.
778 vmrun
779
780 ; eax is in the VMCB already; we can use it here.
781
782 ; Save guest fs, gs, sysenter msr etc.
783 vmsave
784
785 ; Load host fs, gs, sysenter msr etc.
786 pop xAX ; Pushed above
787 vmload
788
789 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
790 cli
791 stgi
792
793 pop xAX ; pCtx
794
795 mov [ss:xAX + CPUMCTX.ebx], ebx
796 mov [ss:xAX + CPUMCTX.ecx], ecx
797 mov [ss:xAX + CPUMCTX.edx], edx
798 mov [ss:xAX + CPUMCTX.esi], esi
799 mov [ss:xAX + CPUMCTX.edi], edi
800 mov [ss:xAX + CPUMCTX.ebp], ebp
801
802 ; Restore general purpose registers.
803 MYPOPAD
804
805 mov eax, VINF_SUCCESS
806
807 popf
808 pop xBP
809%ifdef RT_ARCH_AMD64
810 add xSP, 4*xS
811%endif
812 ret
813ENDPROC MY_NAME(SVMR0VMRun)
814
815%ifdef RT_ARCH_AMD64
816;/**
817; * Prepares for and executes VMRUN (64 bits guests)
818; *
819; * @returns VBox status code
820; * @param HCPhysVMCB Physical address of host VMCB
821; * @param HCPhysVMCB Physical address of guest VMCB
822; * @param pCtx Guest context
823; */
824ALIGNCODE(16)
825BEGINPROC MY_NAME(SVMR0VMRun64)
826 ; Fake a cdecl stack frame
827 %ifdef ASM_CALL64_GCC
828 push rdx
829 push rsi
830 push rdi
831 %else
832 push r8
833 push rdx
834 push rcx
835 %endif
836 push 0
837 push rbp
838 mov rbp, rsp
839 pushf
840
841 ; Manual save and restore:
842 ; - General purpose registers except RIP, RSP, RAX
843 ;
844 ; Trashed:
845 ; - CR2 (we don't care)
846 ; - LDTR (reset to 0)
847 ; - DRx (presumably not changed at all)
848 ; - DR7 (reset to 0x400)
849 ;
850
851 ; Save all general purpose host registers.
852 MYPUSHAD
853
854 ; Save the Guest CPU context pointer.
855 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx
856 push rsi ; push for saving the state at the end
857
858 ; Save host fs, gs, sysenter msr etc.
859 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
860 push rax ; Save for the vmload after vmrun
861 vmsave
862
863 ; Setup eax for VMLOAD.
864 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
865
866 ; Restore Guest's general purpose registers.
867 ; rax is loaded from the VMCB by VMRUN.
868 mov rbx, qword [xSI + CPUMCTX.ebx]
869 mov rcx, qword [xSI + CPUMCTX.ecx]
870 mov rdx, qword [xSI + CPUMCTX.edx]
871 mov rdi, qword [xSI + CPUMCTX.edi]
872 mov rbp, qword [xSI + CPUMCTX.ebp]
873 mov r8, qword [xSI + CPUMCTX.r8]
874 mov r9, qword [xSI + CPUMCTX.r9]
875 mov r10, qword [xSI + CPUMCTX.r10]
876 mov r11, qword [xSI + CPUMCTX.r11]
877 mov r12, qword [xSI + CPUMCTX.r12]
878 mov r13, qword [xSI + CPUMCTX.r13]
879 mov r14, qword [xSI + CPUMCTX.r14]
880 mov r15, qword [xSI + CPUMCTX.r15]
881 mov rsi, qword [xSI + CPUMCTX.esi]
882
883 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
884 clgi
885 sti
886
887 ; Load guest fs, gs, sysenter msr etc.
888 vmload
889 ; Run the VM.
890 vmrun
891
892 ; rax is in the VMCB already; we can use it here.
893
894 ; Save guest fs, gs, sysenter msr etc.
895 vmsave
896
897 ; Load host fs, gs, sysenter msr etc.
898 pop rax ; pushed above
899 vmload
900
901 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
902 cli
903 stgi
904
905 pop rax ; pCtx
906
907 mov qword [rax + CPUMCTX.ebx], rbx
908 mov qword [rax + CPUMCTX.ecx], rcx
909 mov qword [rax + CPUMCTX.edx], rdx
910 mov qword [rax + CPUMCTX.esi], rsi
911 mov qword [rax + CPUMCTX.edi], rdi
912 mov qword [rax + CPUMCTX.ebp], rbp
913 mov qword [rax + CPUMCTX.r8], r8
914 mov qword [rax + CPUMCTX.r9], r9
915 mov qword [rax + CPUMCTX.r10], r10
916 mov qword [rax + CPUMCTX.r11], r11
917 mov qword [rax + CPUMCTX.r12], r12
918 mov qword [rax + CPUMCTX.r13], r13
919 mov qword [rax + CPUMCTX.r14], r14
920 mov qword [rax + CPUMCTX.r15], r15
921
922 ; Restore general purpose registers.
923 MYPOPAD
924
925 mov eax, VINF_SUCCESS
926
927 popf
928 pop rbp
929 add rsp, 4*xS
930 ret
931ENDPROC MY_NAME(SVMR0VMRun64)
932%endif ; RT_ARCH_AMD64
933
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette