VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac@ 45852

Last change on this file since 45852 was 45845, checked in by vboxsync, 12 years ago

VMM/VMMR0: Load kernel-gs base MSR as part of the auto-load/store MSR feature.
VMM/HMVMXR0: Fixed the code to also deal with the case when auto-load/store is not defined.
VMM/VMMR0: Fixed LegacyandAmd64.mac to restore the MSRs from the stack in the right order, not sure how it
could have worked previously.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 25.2 KB
Line 
1; $Id: HMR0Mixed.mac 45845 2013-04-30 16:38:34Z vboxsync $
2;; @file
3; HMR0Mixed.mac - Stuff that darwin needs to build two versions of.
4;
5; Included by HMR0A.asm with RT_ARCH_AMD64 defined or or undefined.
6;
7
8;
9; Copyright (C) 2006-2012 Oracle Corporation
10;
11; This file is part of VirtualBox Open Source Edition (OSE), as
12; available from http://www.virtualbox.org. This file is free software;
13; you can redistribute it and/or modify it under the terms of the GNU
14; General Public License (GPL) as published by the Free Software
15; Foundation, in version 2 as it comes in the "COPYING" file of the
16; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18;
19
20
21;/**
22; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
23; *
24; * @returns VBox status code
25; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi vmlauch/vmresume
26; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Guest context
27; * @param pCache x86:[esp+10],msc:r8, gcc:rdx VMCS cache
28; */
29ALIGNCODE(16)
30BEGINPROC MY_NAME(VMXR0StartVM32)
31 push xBP
32 mov xBP, xSP
33
34 pushf
35 cli
36
37 ;/* Save all general purpose host registers. */
38 MYPUSHAD
39
40 ;/* First we have to save some final CPU context registers. */
41 mov eax, VMX_VMCS_HOST_RIP
42%ifdef RT_ARCH_AMD64
43 lea r10, [.vmlaunch_done wrt rip]
44 vmwrite rax, r10
45%else
46 mov ecx, .vmlaunch_done
47 vmwrite eax, ecx
48%endif
49 ;/* Note: assumes success... */
50
51 ;/* Manual save and restore:
52 ; * - General purpose registers except RIP, RSP
53 ; *
54 ; * Trashed:
55 ; * - CR2 (we don't care)
56 ; * - LDTR (reset to 0)
57 ; * - DRx (presumably not changed at all)
58 ; * - DR7 (reset to 0x400)
59 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
60 ; *
61 ; */
62
63 ;/* Save the Guest CPU context pointer. */
64%ifdef RT_ARCH_AMD64
65 %ifdef ASM_CALL64_GCC
66 ; fResume already in rdi
67 ; pCtx already in rsi
68 mov rbx, rdx ; pCache
69 %else
70 mov rdi, rcx ; fResume
71 mov rsi, rdx ; pCtx
72 mov rbx, r8 ; pCache
73 %endif
74%else
75 mov edi, [ebp + 8] ; fResume
76 mov esi, [ebp + 12] ; pCtx
77 mov ebx, [ebp + 16] ; pCache
78%endif
79
80 ;/* Save segment registers */
81 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
82 MYPUSHSEGS xAX, ax
83
84%ifdef VMX_USE_CACHED_VMCS_ACCESSES
85 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
86 cmp ecx, 0
87 je .no_cached_writes
88 mov edx, ecx
89 mov ecx, 0
90 jmp .cached_write
91
92ALIGN(16)
93.cached_write:
94 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
95 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
96 inc xCX
97 cmp xCX, xDX
98 jl .cached_write
99
100 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
101.no_cached_writes:
102
103 ; Save the pCache pointer
104 push xBX
105%endif
106
107 ; Save the pCtx pointer
108 push xSI
109
110 ; Save LDTR
111 xor eax, eax
112 sldt ax
113 push xAX
114
115 ; The TR limit is reset to 0x67; restore it manually
116 str eax
117 push xAX
118
119 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
120 sub xSP, xS*2
121 sgdt [xSP]
122
123 sub xSP, xS*2
124 sidt [xSP]
125
126%ifdef VBOX_WITH_DR6_EXPERIMENT
127 ; Restore DR6 - experiment, not safe!
128 mov xBX, [xSI + CPUMCTX.dr6]
129 mov dr6, xBX
130%endif
131
132 ; Restore CR2
133 mov xBX, [xSI + CPUMCTX.cr2]
134 mov xDX, cr2
135 cmp xBX, xDX
136 je .skipcr2write32
137 mov cr2, xBX
138
139.skipcr2write32:
140 mov eax, VMX_VMCS_HOST_RSP
141 vmwrite xAX, xSP
142 ;/* Note: assumes success... */
143 ;/* Don't mess with ESP anymore!! */
144
145 ;/* Restore Guest's general purpose registers. */
146 mov eax, [xSI + CPUMCTX.eax]
147 mov ebx, [xSI + CPUMCTX.ebx]
148 mov ecx, [xSI + CPUMCTX.ecx]
149 mov edx, [xSI + CPUMCTX.edx]
150 mov ebp, [xSI + CPUMCTX.ebp]
151
152 ; resume or start?
153 cmp xDI, 0 ; fResume
154 je .vmlauch_lauch
155
156 ;/* Restore edi & esi. */
157 mov edi, [xSI + CPUMCTX.edi]
158 mov esi, [xSI + CPUMCTX.esi]
159
160 vmresume
161 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
162
163.vmlauch_lauch:
164 ;/* Restore edi & esi. */
165 mov edi, [xSI + CPUMCTX.edi]
166 mov esi, [xSI + CPUMCTX.esi]
167
168 vmlaunch
169 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
170
171ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
172.vmlaunch_done:
173 jc near .vmxstart_invalid_vmxon_ptr
174 jz near .vmxstart_start_failed
175
176 ; Restore base and limit of the IDTR & GDTR
177 lidt [xSP]
178 add xSP, xS*2
179 lgdt [xSP]
180 add xSP, xS*2
181
182 push xDI
183 mov xDI, [xSP + xS * 3] ; pCtx (*3 to skip the saved LDTR + TR)
184
185 mov [ss:xDI + CPUMCTX.eax], eax
186 mov [ss:xDI + CPUMCTX.ebx], ebx
187 mov [ss:xDI + CPUMCTX.ecx], ecx
188 mov [ss:xDI + CPUMCTX.edx], edx
189 mov [ss:xDI + CPUMCTX.esi], esi
190 mov [ss:xDI + CPUMCTX.ebp], ebp
191%ifndef VBOX_WITH_OLD_VTX_CODE
192 mov xAX, cr2
193 mov [ss:xDI + CPUMCTX.cr2], xAX
194%endif
195
196%ifdef RT_ARCH_AMD64
197 pop xAX ; the guest edi we pushed above
198 mov dword [ss:xDI + CPUMCTX.edi], eax
199%else
200 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
201%endif
202
203%ifdef VBOX_WITH_DR6_EXPERIMENT
204 ; Save DR6 - experiment, not safe!
205 mov xAX, dr6
206 mov [ss:xDI + CPUMCTX.dr6], xAX
207%endif
208
209 ; Restore TSS selector; must mark it as not busy before using ltr (!)
210 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
211 ; @todo get rid of sgdt
212 pop xBX ; saved TR
213 sub xSP, xS*2
214 sgdt [xSP]
215 mov xAX, xBX
216 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
217 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
218 and dword [ss:xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
219 ltr bx
220 add xSP, xS*2
221
222 pop xAX ; saved LDTR
223 lldt ax
224
225 add xSP, xS ; pCtx
226
227%ifdef VMX_USE_CACHED_VMCS_ACCESSES
228 pop xDX ; saved pCache
229
230 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
231 cmp ecx, 0 ; can't happen
232 je .no_cached_reads
233 jmp .cached_read
234
235ALIGN(16)
236.cached_read:
237 dec xCX
238 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX*4]
239 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
240 cmp xCX, 0
241 jnz .cached_read
242.no_cached_reads:
243
244%ifdef VBOX_WITH_OLD_VTX_CODE
245 ; Save CR2 for EPT
246 mov xAX, cr2
247 mov [ss:xDX + VMCSCACHE.cr2], xAX
248%endif
249%endif
250
251 ; Restore segment registers
252 MYPOPSEGS xAX, ax
253
254 ; Restore general purpose registers
255 MYPOPAD
256
257 mov eax, VINF_SUCCESS
258
259.vmstart_end:
260 popf
261 pop xBP
262 ret
263
264
265.vmxstart_invalid_vmxon_ptr:
266 ; Restore base and limit of the IDTR & GDTR
267 lidt [xSP]
268 add xSP, xS*2
269 lgdt [xSP]
270 add xSP, xS*2
271
272 ; Restore TSS selector; must mark it as not busy before using ltr (!)
273 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
274 ; @todo get rid of sgdt
275 pop xBX ; saved TR
276 sub xSP, xS*2
277 sgdt [xSP]
278 mov xAX, xBX
279 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
280 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
281 and dword [ss:xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
282 ltr bx
283 add xSP, xS*2
284
285 pop xAX ; saved LDTR
286 lldt ax
287
288%ifdef VMX_USE_CACHED_VMCS_ACCESSES
289 add xSP, xS*2 ; pCtx + pCache
290%else
291 add xSP, xS ; pCtx
292%endif
293
294 ; Restore segment registers
295 MYPOPSEGS xAX, ax
296
297 ; Restore all general purpose host registers.
298 MYPOPAD
299 mov eax, VERR_VMX_INVALID_VMXON_PTR
300 jmp .vmstart_end
301
302.vmxstart_start_failed:
303 ; Restore base and limit of the IDTR & GDTR
304 lidt [xSP]
305 add xSP, xS*2
306 lgdt [xSP]
307 add xSP, xS*2
308
309 ; Restore TSS selector; must mark it as not busy before using ltr (!)
310 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
311 ; @todo get rid of sgdt
312 pop xBX ; saved TR
313 sub xSP, xS*2
314 sgdt [xSP]
315 mov xAX, xBX
316 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
317 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
318 and dword [ss:xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
319 ltr bx
320 add xSP, xS*2
321
322 pop xAX ; saved LDTR
323 lldt ax
324
325%ifdef VMX_USE_CACHED_VMCS_ACCESSES
326 add xSP, xS*2 ; pCtx + pCache
327%else
328 add xSP, xS ; pCtx
329%endif
330
331 ; Restore segment registers
332 MYPOPSEGS xAX, ax
333
334 ; Restore all general purpose host registers.
335 MYPOPAD
336 mov eax, VERR_VMX_UNABLE_TO_START_VM
337 jmp .vmstart_end
338
339ENDPROC MY_NAME(VMXR0StartVM32)
340
341%ifdef RT_ARCH_AMD64
342;/**
343; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
344; *
345; * @returns VBox status code
346; * @param fResume msc:rcx, gcc:rdi vmlauch/vmresume
347; * @param pCtx msc:rdx, gcc:rsi Guest context
348; * @param pCache msc:r8, gcc:rdx VMCS cache
349; */
350ALIGNCODE(16)
351BEGINPROC MY_NAME(VMXR0StartVM64)
352 push xBP
353 mov xBP, xSP
354
355 pushf
356 cli
357
358 ;/* Save all general purpose host registers. */
359 MYPUSHAD
360
361 ;/* First we have to save some final CPU context registers. */
362 lea r10, [.vmlaunch64_done wrt rip]
363 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
364 vmwrite rax, r10
365 ;/* Note: assumes success... */
366
367 ;/* Manual save and restore:
368 ; * - General purpose registers except RIP, RSP
369 ; *
370 ; * Trashed:
371 ; * - CR2 (we don't care)
372 ; * - LDTR (reset to 0)
373 ; * - DRx (presumably not changed at all)
374 ; * - DR7 (reset to 0x400)
375 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
376 ; *
377 ; */
378
379 ;/* Save the Guest CPU context pointer. */
380%ifdef ASM_CALL64_GCC
381 ; fResume already in rdi
382 ; pCtx already in rsi
383 mov rbx, rdx ; pCache
384%else
385 mov rdi, rcx ; fResume
386 mov rsi, rdx ; pCtx
387 mov rbx, r8 ; pCache
388%endif
389
390 ;/* Save segment registers */
391 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
392 MYPUSHSEGS xAX, ax
393
394%ifdef VMX_USE_CACHED_VMCS_ACCESSES
395 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
396 cmp ecx, 0
397 je .no_cached_writes
398 mov edx, ecx
399 mov ecx, 0
400 jmp .cached_write
401
402ALIGN(16)
403.cached_write:
404 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
405 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
406 inc xCX
407 cmp xCX, xDX
408 jl .cached_write
409
410 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
411.no_cached_writes:
412
413 ; Save the pCache pointer
414 push xBX
415%endif
416
417%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
418 ; Save the host MSRs and load the guest MSRs
419 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
420 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
421 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
422 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
423%endif
424
425 ; Save the pCtx pointer
426 push xSI
427
428 ; Save LDTR
429 xor eax, eax
430 sldt ax
431 push xAX
432
433 ; The TR limit is reset to 0x67; restore it manually
434 str eax
435 push xAX
436
437 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
438 sub xSP, xS*2
439 sgdt [xSP]
440
441 sub xSP, xS*2
442 sidt [xSP]
443
444%ifdef VBOX_WITH_DR6_EXPERIMENT
445 ; Restore DR6 - experiment, not safe!
446 mov xBX, [xSI + CPUMCTX.dr6]
447 mov dr6, xBX
448%endif
449
450 ; Restore CR2
451 mov rbx, qword [xSI + CPUMCTX.cr2]
452 mov rdx, cr2
453 cmp rbx, rdx
454 je .skipcr2write
455 mov cr2, rbx
456
457.skipcr2write:
458 mov eax, VMX_VMCS_HOST_RSP
459 vmwrite xAX, xSP
460 ;/* Note: assumes success... */
461 ;/* Don't mess with ESP anymore!! */
462
463 ;/* Restore Guest's general purpose registers. */
464 mov rax, qword [xSI + CPUMCTX.eax]
465 mov rbx, qword [xSI + CPUMCTX.ebx]
466 mov rcx, qword [xSI + CPUMCTX.ecx]
467 mov rdx, qword [xSI + CPUMCTX.edx]
468 mov rbp, qword [xSI + CPUMCTX.ebp]
469 mov r8, qword [xSI + CPUMCTX.r8]
470 mov r9, qword [xSI + CPUMCTX.r9]
471 mov r10, qword [xSI + CPUMCTX.r10]
472 mov r11, qword [xSI + CPUMCTX.r11]
473 mov r12, qword [xSI + CPUMCTX.r12]
474 mov r13, qword [xSI + CPUMCTX.r13]
475 mov r14, qword [xSI + CPUMCTX.r14]
476 mov r15, qword [xSI + CPUMCTX.r15]
477
478 ; resume or start?
479 cmp xDI, 0 ; fResume
480 je .vmlauch64_lauch
481
482 ;/* Restore edi & esi. */
483 mov rdi, qword [xSI + CPUMCTX.edi]
484 mov rsi, qword [xSI + CPUMCTX.esi]
485
486 vmresume
487 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */
488
489.vmlauch64_lauch:
490 ;/* Restore rdi & rsi. */
491 mov rdi, qword [xSI + CPUMCTX.edi]
492 mov rsi, qword [xSI + CPUMCTX.esi]
493
494 vmlaunch
495 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
496
497ALIGNCODE(16)
498.vmlaunch64_done:
499 jc near .vmxstart64_invalid_vmxon_ptr
500 jz near .vmxstart64_start_failed
501
502 ; Restore base and limit of the IDTR & GDTR
503 lidt [xSP]
504 add xSP, xS*2
505 lgdt [xSP]
506 add xSP, xS*2
507
508 push xDI
509 mov xDI, [xSP + xS * 3] ; pCtx (*3 to skip the saved LDTR + TR)
510
511 mov qword [xDI + CPUMCTX.eax], rax
512 mov qword [xDI + CPUMCTX.ebx], rbx
513 mov qword [xDI + CPUMCTX.ecx], rcx
514 mov qword [xDI + CPUMCTX.edx], rdx
515 mov qword [xDI + CPUMCTX.esi], rsi
516 mov qword [xDI + CPUMCTX.ebp], rbp
517 mov qword [xDI + CPUMCTX.r8], r8
518 mov qword [xDI + CPUMCTX.r9], r9
519 mov qword [xDI + CPUMCTX.r10], r10
520 mov qword [xDI + CPUMCTX.r11], r11
521 mov qword [xDI + CPUMCTX.r12], r12
522 mov qword [xDI + CPUMCTX.r13], r13
523 mov qword [xDI + CPUMCTX.r14], r14
524 mov qword [xDI + CPUMCTX.r15], r15
525%ifndef VBOX_WITH_OLD_VTX_CODE
526 mov rax, cr2
527 mov qword [xDI + CPUMCTX.cr2], rax
528%endif
529
530 pop xAX ; the guest edi we pushed above
531 mov qword [xDI + CPUMCTX.edi], rax
532
533%ifdef VBOX_WITH_DR6_EXPERIMENT
534 ; Save DR6 - experiment, not safe!
535 mov xAX, dr6
536 mov [xDI + CPUMCTX.dr6], xAX
537%endif
538
539 ; Restore TSS selector; must mark it as not busy before using ltr (!)
540 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
541 ; @todo get rid of sgdt
542 pop xBX ; saved TR
543 sub xSP, xS*2
544 sgdt [xSP]
545 mov xAX, xBX
546 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
547 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
548 and dword [xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
549 ltr bx
550 add xSP, xS*2
551
552 pop xAX ; saved LDTR
553 lldt ax
554
555 pop xSI ; pCtx (needed in rsi by the macros below)
556
557%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
558 ; Save the guest MSRs and load the host MSRs
559 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
560 LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
561 LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
562 LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
563%endif
564
565%ifdef VMX_USE_CACHED_VMCS_ACCESSES
566 pop xDX ; saved pCache
567
568 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
569 cmp ecx, 0 ; can't happen
570 je .no_cached_reads
571 jmp .cached_read
572
573ALIGN(16)
574.cached_read:
575 dec xCX
576 mov eax, [xDX + VMCSCACHE.Read.aField + xCX*4]
577 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
578 cmp xCX, 0
579 jnz .cached_read
580.no_cached_reads:
581
582%ifdef VBOX_WITH_OLD_VTX_CODE
583 ; Save CR2 for EPT
584 mov xAX, cr2
585 mov [xDX + VMCSCACHE.cr2], xAX
586%endif
587%endif
588
589 ; Restore segment registers
590 MYPOPSEGS xAX, ax
591
592 ; Restore general purpose registers
593 MYPOPAD
594
595 mov eax, VINF_SUCCESS
596
597.vmstart64_end:
598 popf
599 pop xBP
600 ret
601
602
603.vmxstart64_invalid_vmxon_ptr:
604 ; Restore base and limit of the IDTR & GDTR
605 lidt [xSP]
606 add xSP, xS*2
607 lgdt [xSP]
608 add xSP, xS*2
609
610 ; Restore TSS selector; must mark it as not busy before using ltr (!)
611 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
612 ; @todo get rid of sgdt
613 pop xBX ; saved TR
614 sub xSP, xS*2
615 sgdt [xSP]
616 mov xAX, xBX
617 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
618 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
619 and dword [xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
620 ltr bx
621 add xSP, xS*2
622
623 pop xAX ; saved LDTR
624 lldt ax
625
626 pop xSI ; pCtx (needed in rsi by the macros below)
627
628%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
629 ; Load the host MSRs
630 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
631 LOADHOSTMSR MSR_K8_SF_MASK
632 LOADHOSTMSR MSR_K6_STAR
633 LOADHOSTMSR MSR_K8_LSTAR
634%endif
635
636%ifdef VMX_USE_CACHED_VMCS_ACCESSES
637 add xSP, xS ; pCache
638%endif
639
640 ; Restore segment registers
641 MYPOPSEGS xAX, ax
642
643 ; Restore all general purpose host registers.
644 MYPOPAD
645 mov eax, VERR_VMX_INVALID_VMXON_PTR
646 jmp .vmstart64_end
647
648.vmxstart64_start_failed:
649 ; Restore base and limit of the IDTR & GDTR
650 lidt [xSP]
651 add xSP, xS*2
652 lgdt [xSP]
653 add xSP, xS*2
654
655 ; Restore TSS selector; must mark it as not busy before using ltr (!)
656 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
657 ; @todo get rid of sgdt
658 pop xBX ; saved TR
659 sub xSP, xS*2
660 sgdt [xSP]
661 mov xAX, xBX
662 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
663 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
664 and dword [xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
665 ltr bx
666 add xSP, xS*2
667
668 pop xAX ; saved LDTR
669 lldt ax
670
671 pop xSI ; pCtx (needed in rsi by the macros below)
672
673%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
674 ; Load the host MSRs
675 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
676 LOADHOSTMSR MSR_K8_SF_MASK
677 LOADHOSTMSR MSR_K6_STAR
678 LOADHOSTMSR MSR_K8_LSTAR
679%endif
680
681%ifdef VMX_USE_CACHED_VMCS_ACCESSES
682 add xSP, xS ; pCache
683%endif
684
685 ; Restore segment registers
686 MYPOPSEGS xAX, ax
687
688 ; Restore all general purpose host registers.
689 MYPOPAD
690 mov eax, VERR_VMX_UNABLE_TO_START_VM
691 jmp .vmstart64_end
692ENDPROC MY_NAME(VMXR0StartVM64)
693%endif ; RT_ARCH_AMD64
694
695
696;/**
697; * Prepares for and executes VMRUN (32 bits guests)
698; *
699; * @returns VBox status code
700; * @param HCPhysVMCB Physical address of host VMCB
701; * @param HCPhysVMCB Physical address of guest VMCB
702; * @param pCtx Guest context
703; */
704ALIGNCODE(16)
705BEGINPROC MY_NAME(SVMR0VMRun)
706%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
707 %ifdef ASM_CALL64_GCC
708 push rdx
709 push rsi
710 push rdi
711 %else
712 push r8
713 push rdx
714 push rcx
715 %endif
716 push 0
717%endif
718 push xBP
719 mov xBP, xSP
720 pushf
721
722 ;/* Manual save and restore:
723 ; * - General purpose registers except RIP, RSP, RAX
724 ; *
725 ; * Trashed:
726 ; * - CR2 (we don't care)
727 ; * - LDTR (reset to 0)
728 ; * - DRx (presumably not changed at all)
729 ; * - DR7 (reset to 0x400)
730 ; */
731
732 ;/* Save all general purpose host registers. */
733 MYPUSHAD
734
735 ;/* Save the Guest CPU context pointer. */
736 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
737 push xSI ; push for saving the state at the end
738
739 ; save host fs, gs, sysenter msr etc
740 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
741 push xAX ; save for the vmload after vmrun
742 vmsave
743
744 ; setup eax for VMLOAD
745 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
746
747 ;/* Restore Guest's general purpose registers. */
748 ;/* EAX is loaded from the VMCB by VMRUN */
749 mov ebx, [xSI + CPUMCTX.ebx]
750 mov ecx, [xSI + CPUMCTX.ecx]
751 mov edx, [xSI + CPUMCTX.edx]
752 mov edi, [xSI + CPUMCTX.edi]
753 mov ebp, [xSI + CPUMCTX.ebp]
754 mov esi, [xSI + CPUMCTX.esi]
755
756 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
757 clgi
758 sti
759
760 ; load guest fs, gs, sysenter msr etc
761 vmload
762 ; run the VM
763 vmrun
764
765 ;/* EAX is in the VMCB already; we can use it here. */
766
767 ; save guest fs, gs, sysenter msr etc
768 vmsave
769
770 ; load host fs, gs, sysenter msr etc
771 pop xAX ; pushed above
772 vmload
773
774 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
775 cli
776 stgi
777
778 pop xAX ; pCtx
779
780 mov [ss:xAX + CPUMCTX.ebx], ebx
781 mov [ss:xAX + CPUMCTX.ecx], ecx
782 mov [ss:xAX + CPUMCTX.edx], edx
783 mov [ss:xAX + CPUMCTX.esi], esi
784 mov [ss:xAX + CPUMCTX.edi], edi
785 mov [ss:xAX + CPUMCTX.ebp], ebp
786
787 ; Restore general purpose registers
788 MYPOPAD
789
790 mov eax, VINF_SUCCESS
791
792 popf
793 pop xBP
794%ifdef RT_ARCH_AMD64
795 add xSP, 4*xS
796%endif
797 ret
798ENDPROC MY_NAME(SVMR0VMRun)
799
800%ifdef RT_ARCH_AMD64
801;/**
802; * Prepares for and executes VMRUN (64 bits guests)
803; *
804; * @returns VBox status code
805; * @param HCPhysVMCB Physical address of host VMCB
806; * @param HCPhysVMCB Physical address of guest VMCB
807; * @param pCtx Guest context
808; */
809ALIGNCODE(16)
810BEGINPROC MY_NAME(SVMR0VMRun64)
811 ; fake a cdecl stack frame
812 %ifdef ASM_CALL64_GCC
813 push rdx
814 push rsi
815 push rdi
816 %else
817 push r8
818 push rdx
819 push rcx
820 %endif
821 push 0
822 push rbp
823 mov rbp, rsp
824 pushf
825
826 ;/* Manual save and restore:
827 ; * - General purpose registers except RIP, RSP, RAX
828 ; *
829 ; * Trashed:
830 ; * - CR2 (we don't care)
831 ; * - LDTR (reset to 0)
832 ; * - DRx (presumably not changed at all)
833 ; * - DR7 (reset to 0x400)
834 ; */
835
836 ;/* Save all general purpose host registers. */
837 MYPUSHAD
838
839 ;/* Save the Guest CPU context pointer. */
840 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx
841 push rsi ; push for saving the state at the end
842
843 ; save host fs, gs, sysenter msr etc
844 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
845 push rax ; save for the vmload after vmrun
846 vmsave
847
848 ; setup eax for VMLOAD
849 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
850
851 ;/* Restore Guest's general purpose registers. */
852 ;/* RAX is loaded from the VMCB by VMRUN */
853 mov rbx, qword [xSI + CPUMCTX.ebx]
854 mov rcx, qword [xSI + CPUMCTX.ecx]
855 mov rdx, qword [xSI + CPUMCTX.edx]
856 mov rdi, qword [xSI + CPUMCTX.edi]
857 mov rbp, qword [xSI + CPUMCTX.ebp]
858 mov r8, qword [xSI + CPUMCTX.r8]
859 mov r9, qword [xSI + CPUMCTX.r9]
860 mov r10, qword [xSI + CPUMCTX.r10]
861 mov r11, qword [xSI + CPUMCTX.r11]
862 mov r12, qword [xSI + CPUMCTX.r12]
863 mov r13, qword [xSI + CPUMCTX.r13]
864 mov r14, qword [xSI + CPUMCTX.r14]
865 mov r15, qword [xSI + CPUMCTX.r15]
866 mov rsi, qword [xSI + CPUMCTX.esi]
867
868 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
869 clgi
870 sti
871
872 ; load guest fs, gs, sysenter msr etc
873 vmload
874 ; run the VM
875 vmrun
876
877 ;/* RAX is in the VMCB already; we can use it here. */
878
879 ; save guest fs, gs, sysenter msr etc
880 vmsave
881
882 ; load host fs, gs, sysenter msr etc
883 pop rax ; pushed above
884 vmload
885
886 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
887 cli
888 stgi
889
890 pop rax ; pCtx
891
892 mov qword [rax + CPUMCTX.ebx], rbx
893 mov qword [rax + CPUMCTX.ecx], rcx
894 mov qword [rax + CPUMCTX.edx], rdx
895 mov qword [rax + CPUMCTX.esi], rsi
896 mov qword [rax + CPUMCTX.edi], rdi
897 mov qword [rax + CPUMCTX.ebp], rbp
898 mov qword [rax + CPUMCTX.r8], r8
899 mov qword [rax + CPUMCTX.r9], r9
900 mov qword [rax + CPUMCTX.r10], r10
901 mov qword [rax + CPUMCTX.r11], r11
902 mov qword [rax + CPUMCTX.r12], r12
903 mov qword [rax + CPUMCTX.r13], r13
904 mov qword [rax + CPUMCTX.r14], r14
905 mov qword [rax + CPUMCTX.r15], r15
906
907 ; Restore general purpose registers
908 MYPOPAD
909
910 mov eax, VINF_SUCCESS
911
912 popf
913 pop rbp
914 add rsp, 4*xS
915 ret
916ENDPROC MY_NAME(SVMR0VMRun64)
917%endif ; RT_ARCH_AMD64
918
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette