VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWACCMR0Mixed.mac@ 24760

Last change on this file since 24760 was 22079, checked in by vboxsync, 15 years ago

Back to manual msr save and restore.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 23.7 KB
Line 
1; $Id: HWACCMR0Mixed.mac 22079 2009-08-07 16:26:25Z vboxsync $
2;; @file
3; HWACCMR0Mixed.mac - Stuff that darwin needs to build two versions of.
4;
5; Included by HWACCMR0A.asm with RT_ARCH_AMD64 defined or or undefined.
6;
7
8;
9; Copyright (C) 2006-2007 Sun Microsystems, Inc.
10;
11; This file is part of VirtualBox Open Source Edition (OSE), as
12; available from http://www.virtualbox.org. This file is free software;
13; you can redistribute it and/or modify it under the terms of the GNU
14; General Public License (GPL) as published by the Free Software
15; Foundation, in version 2 as it comes in the "COPYING" file of the
16; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18;
19; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20; Clara, CA 95054 USA or visit http://www.sun.com if you need
21; additional information or have any questions.
22;
23
24
25;/**
26; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
27; *
28; * @returns VBox status code
29; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi vmlauch/vmresume
30; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Guest context
31; * @param pCache x86:[esp+10],msc:r8, gcc:rdx VMCS cache
32; */
33ALIGNCODE(16)
34BEGINPROC MY_NAME(VMXR0StartVM32)
35 push xBP
36 mov xBP, xSP
37
38 pushf
39 cli
40
41 ;/* Save all general purpose host registers. */
42 MYPUSHAD
43
44 ;/* First we have to save some final CPU context registers. */
45 mov eax, VMX_VMCS_HOST_RIP
46%ifdef RT_ARCH_AMD64
47 lea r10, [.vmlaunch_done wrt rip]
48 vmwrite rax, r10
49%else
50 mov ecx, .vmlaunch_done
51 vmwrite eax, ecx
52%endif
53 ;/* Note: assumes success... */
54
55 ;/* Manual save and restore:
56 ; * - General purpose registers except RIP, RSP
57 ; *
58 ; * Trashed:
59 ; * - CR2 (we don't care)
60 ; * - LDTR (reset to 0)
61 ; * - DRx (presumably not changed at all)
62 ; * - DR7 (reset to 0x400)
63 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
64 ; *
65 ; */
66
67 ;/* Save the Guest CPU context pointer. */
68%ifdef RT_ARCH_AMD64
69 %ifdef ASM_CALL64_GCC
70 ; fResume already in rdi
71 ; pCtx already in rsi
72 mov rbx, rdx ; pCache
73 %else
74 mov rdi, rcx ; fResume
75 mov rsi, rdx ; pCtx
76 mov rbx, r8 ; pCache
77 %endif
78%else
79 mov edi, [ebp + 8] ; fResume
80 mov esi, [ebp + 12] ; pCtx
81 mov ebx, [ebp + 16] ; pCache
82%endif
83
84 ;/* Save segment registers */
85 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
86 MYPUSHSEGS xAX, ax
87
88%ifdef VMX_USE_CACHED_VMCS_ACCESSES
89 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
90 cmp ecx, 0
91 je .no_cached_writes
92 mov edx, ecx
93 mov ecx, 0
94 jmp .cached_write
95
96ALIGN(16)
97.cached_write:
98 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
99 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
100 inc xCX
101 cmp xCX, xDX
102 jl .cached_write
103
104 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
105.no_cached_writes:
106
107 ; Save the pCache pointer
108 push xBX
109%endif
110
111 ; Save the pCtx pointer
112 push xSI
113
114 ; Save LDTR
115 xor eax, eax
116 sldt ax
117 push xAX
118
119 ; The TR limit is reset to 0x67; restore it manually
120 str eax
121 push xAX
122
123 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
124 sub xSP, xS*2
125 sgdt [xSP]
126
127 sub xSP, xS*2
128 sidt [xSP]
129
130%ifdef VBOX_WITH_DR6_EXPERIMENT
131 ; Restore DR6 - experiment, not safe!
132 mov xBX, [xSI + CPUMCTX.dr6]
133 mov dr6, xBX
134%endif
135
136 ; Restore CR2
137 mov ebx, [xSI + CPUMCTX.cr2]
138 mov cr2, xBX
139
140 mov eax, VMX_VMCS_HOST_RSP
141 vmwrite xAX, xSP
142 ;/* Note: assumes success... */
143 ;/* Don't mess with ESP anymore!! */
144
145 ;/* Restore Guest's general purpose registers. */
146 mov eax, [xSI + CPUMCTX.eax]
147 mov ebx, [xSI + CPUMCTX.ebx]
148 mov ecx, [xSI + CPUMCTX.ecx]
149 mov edx, [xSI + CPUMCTX.edx]
150 mov ebp, [xSI + CPUMCTX.ebp]
151
152 ; resume or start?
153 cmp xDI, 0 ; fResume
154 je .vmlauch_lauch
155
156 ;/* Restore edi & esi. */
157 mov edi, [xSI + CPUMCTX.edi]
158 mov esi, [xSI + CPUMCTX.esi]
159
160 vmresume
161 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */
162
163.vmlauch_lauch:
164 ;/* Restore edi & esi. */
165 mov edi, [xSI + CPUMCTX.edi]
166 mov esi, [xSI + CPUMCTX.esi]
167
168 vmlaunch
169 jmp .vmlaunch_done; ;/* here if vmlaunch detected a failure. */
170
171ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
172.vmlaunch_done:
173 jc near .vmxstart_invalid_vmxon_ptr
174 jz near .vmxstart_start_failed
175
176 ; Restore base and limit of the IDTR & GDTR
177 lidt [xSP]
178 add xSP, xS*2
179 lgdt [xSP]
180 add xSP, xS*2
181
182 push xDI
183 mov xDI, [xSP + xS * 3] ; pCtx (*3 to skip the saved LDTR + TR)
184
185 mov [ss:xDI + CPUMCTX.eax], eax
186 mov [ss:xDI + CPUMCTX.ebx], ebx
187 mov [ss:xDI + CPUMCTX.ecx], ecx
188 mov [ss:xDI + CPUMCTX.edx], edx
189 mov [ss:xDI + CPUMCTX.esi], esi
190 mov [ss:xDI + CPUMCTX.ebp], ebp
191%ifdef RT_ARCH_AMD64
192 pop xAX ; the guest edi we pushed above
193 mov dword [ss:xDI + CPUMCTX.edi], eax
194%else
195 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above
196%endif
197
198%ifdef VBOX_WITH_DR6_EXPERIMENT
199 ; Save DR6 - experiment, not safe!
200 mov xAX, dr6
201 mov [ss:xDI + CPUMCTX.dr6], xAX
202%endif
203
204 ; Restore TSS selector; must mark it as not busy before using ltr (!)
205 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
206 ; @todo get rid of sgdt
207 pop xBX ; saved TR
208%ifndef RT_ARCH_AMD64
209 sub xSP, xS*2
210 sgdt [xSP]
211 mov eax, ebx
212 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
213 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
214 and dword [ss:xAX + 4], ~0200h ; clear busy flag (2nd type2 bit)
215 ltr bx
216 add xSP, xS*2
217%endif
218
219 pop xAX ; saved LDTR
220 lldt ax
221
222 add xSP, xS ; pCtx
223
224%ifdef VMX_USE_CACHED_VMCS_ACCESSES
225 pop xDX ; saved pCache
226
227 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
228 cmp ecx, 0 ; can't happen
229 je .no_cached_reads
230 jmp .cached_read
231
232ALIGN(16)
233.cached_read:
234 dec xCX
235 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX*4]
236 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
237 cmp xCX, 0
238 jnz .cached_read
239.no_cached_reads:
240
241 ; Save CR2 for EPT
242 mov xAX, cr2
243 mov [ss:xDX + VMCSCACHE.cr2], xAX
244%endif
245
246 ; Restore segment registers
247 MYPOPSEGS xAX, ax
248
249 ; Restore general purpose registers
250 MYPOPAD
251
252 mov eax, VINF_SUCCESS
253
254.vmstart_end:
255 popf
256 pop xBP
257 ret
258
259
260.vmxstart_invalid_vmxon_ptr:
261 ; Restore base and limit of the IDTR & GDTR
262 lidt [xSP]
263 add xSP, xS*2
264 lgdt [xSP]
265 add xSP, xS*2
266
267 ; Restore TSS selector; must mark it as not busy before using ltr (!)
268 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
269 ; @todo get rid of sgdt
270 pop xBX ; saved TR
271%ifndef RT_ARCH_AMD64
272 sub xSP, xS*2
273 sgdt [xSP]
274 mov eax, ebx
275 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
276 add eax, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
277 and dword [ss:eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
278 ltr bx
279 add xSP, xS*2
280%endif
281
282 pop xAX ; saved LDTR
283 lldt ax
284
285%ifdef VMX_USE_CACHED_VMCS_ACCESSES
286 add xSP, xS*2 ; pCtx + pCache
287%else
288 add xSP, xS ; pCtx
289%endif
290
291 ; Restore segment registers
292 MYPOPSEGS xAX, ax
293
294 ; Restore all general purpose host registers.
295 MYPOPAD
296 mov eax, VERR_VMX_INVALID_VMXON_PTR
297 jmp .vmstart_end
298
299.vmxstart_start_failed:
300 ; Restore base and limit of the IDTR & GDTR
301 lidt [xSP]
302 add xSP, xS*2
303 lgdt [xSP]
304 add xSP, xS*2
305
306 ; Restore TSS selector; must mark it as not busy before using ltr (!)
307 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
308 ; @todo get rid of sgdt
309 pop xBX ; saved TR
310%ifndef RT_ARCH_AMD64
311 sub xSP, xS*2
312 sgdt [xSP]
313 mov eax, ebx
314 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
315 add eax, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
316 and dword [ss:eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
317 ltr bx
318 add xSP, xS*2
319%endif
320
321 pop xAX ; saved LDTR
322 lldt ax
323
324%ifdef VMX_USE_CACHED_VMCS_ACCESSES
325 add xSP, xS*2 ; pCtx + pCache
326%else
327 add xSP, xS ; pCtx
328%endif
329
330 ; Restore segment registers
331 MYPOPSEGS xAX, ax
332
333 ; Restore all general purpose host registers.
334 MYPOPAD
335 mov eax, VERR_VMX_UNABLE_TO_START_VM
336 jmp .vmstart_end
337
338ENDPROC MY_NAME(VMXR0StartVM32)
339
340%ifdef RT_ARCH_AMD64
341;/**
342; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
343; *
344; * @returns VBox status code
345; * @param fResume msc:rcx, gcc:rdi vmlauch/vmresume
346; * @param pCtx msc:rdx, gcc:rsi Guest context
347; * @param pCache msc:r8, gcc:rdx VMCS cache
348; */
349ALIGNCODE(16)
350BEGINPROC MY_NAME(VMXR0StartVM64)
351 push xBP
352 mov xBP, xSP
353
354 pushf
355 cli
356
357 ;/* Save all general purpose host registers. */
358 MYPUSHAD
359
360 ;/* First we have to save some final CPU context registers. */
361 lea r10, [.vmlaunch64_done wrt rip]
362 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
363 vmwrite rax, r10
364 ;/* Note: assumes success... */
365
366 ;/* Manual save and restore:
367 ; * - General purpose registers except RIP, RSP
368 ; *
369 ; * Trashed:
370 ; * - CR2 (we don't care)
371 ; * - LDTR (reset to 0)
372 ; * - DRx (presumably not changed at all)
373 ; * - DR7 (reset to 0x400)
374 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
375 ; *
376 ; */
377
378 ;/* Save the Guest CPU context pointer. */
379%ifdef ASM_CALL64_GCC
380 ; fResume already in rdi
381 ; pCtx already in rsi
382 mov rbx, rdx ; pCache
383%else
384 mov rdi, rcx ; fResume
385 mov rsi, rdx ; pCtx
386 mov rbx, r8 ; pCache
387%endif
388
389 ;/* Save segment registers */
390 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
391 MYPUSHSEGS xAX, ax
392
393%ifdef VMX_USE_CACHED_VMCS_ACCESSES
394 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
395 cmp ecx, 0
396 je .no_cached_writes
397 mov edx, ecx
398 mov ecx, 0
399 jmp .cached_write
400
401ALIGN(16)
402.cached_write:
403 mov eax, [xBX + VMCSCACHE.Write.aField + xCX*4]
404 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX*8]
405 inc xCX
406 cmp xCX, xDX
407 jl .cached_write
408
409 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
410.no_cached_writes:
411
412 ; Save the pCache pointer
413 push xBX
414%endif
415
416 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
417 ;; @todo use the automatic load feature for MSRs
418 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
419%if 0 ; not supported on Intel CPUs
420 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
421%endif
422 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
423 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
424 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
425
426 ; Save the pCtx pointer
427 push xSI
428
429 ; Save LDTR
430 xor eax, eax
431 sldt ax
432 push xAX
433
434 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
435 sub xSP, xS*2
436 sgdt [xSP]
437
438 sub xSP, xS*2
439 sidt [xSP]
440
441%ifdef VBOX_WITH_DR6_EXPERIMENT
442 ; Restore DR6 - experiment, not safe!
443 mov xBX, [xSI + CPUMCTX.dr6]
444 mov dr6, xBX
445%endif
446
447 ; Restore CR2
448 mov rbx, qword [xSI + CPUMCTX.cr2]
449 mov cr2, rbx
450
451 mov eax, VMX_VMCS_HOST_RSP
452 vmwrite xAX, xSP
453 ;/* Note: assumes success... */
454 ;/* Don't mess with ESP anymore!! */
455
456 ;/* Restore Guest's general purpose registers. */
457 mov rax, qword [xSI + CPUMCTX.eax]
458 mov rbx, qword [xSI + CPUMCTX.ebx]
459 mov rcx, qword [xSI + CPUMCTX.ecx]
460 mov rdx, qword [xSI + CPUMCTX.edx]
461 mov rbp, qword [xSI + CPUMCTX.ebp]
462 mov r8, qword [xSI + CPUMCTX.r8]
463 mov r9, qword [xSI + CPUMCTX.r9]
464 mov r10, qword [xSI + CPUMCTX.r10]
465 mov r11, qword [xSI + CPUMCTX.r11]
466 mov r12, qword [xSI + CPUMCTX.r12]
467 mov r13, qword [xSI + CPUMCTX.r13]
468 mov r14, qword [xSI + CPUMCTX.r14]
469 mov r15, qword [xSI + CPUMCTX.r15]
470
471 ; resume or start?
472 cmp xDI, 0 ; fResume
473 je .vmlauch64_lauch
474
475 ;/* Restore edi & esi. */
476 mov rdi, qword [xSI + CPUMCTX.edi]
477 mov rsi, qword [xSI + CPUMCTX.esi]
478
479 vmresume
480 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */
481
482.vmlauch64_lauch:
483 ;/* Restore rdi & rsi. */
484 mov rdi, qword [xSI + CPUMCTX.edi]
485 mov rsi, qword [xSI + CPUMCTX.esi]
486
487 vmlaunch
488 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
489
490ALIGNCODE(16)
491.vmlaunch64_done:
492 jc near .vmxstart64_invalid_vmxon_ptr
493 jz near .vmxstart64_start_failed
494
495 ; Restore base and limit of the IDTR & GDTR
496 lidt [xSP]
497 add xSP, xS*2
498 lgdt [xSP]
499 add xSP, xS*2
500
501 push xDI
502 mov xDI, [xSP + xS * 2] ; pCtx (*2 to skip the saved LDTR)
503
504 mov qword [xDI + CPUMCTX.eax], rax
505 mov qword [xDI + CPUMCTX.ebx], rbx
506 mov qword [xDI + CPUMCTX.ecx], rcx
507 mov qword [xDI + CPUMCTX.edx], rdx
508 mov qword [xDI + CPUMCTX.esi], rsi
509 mov qword [xDI + CPUMCTX.ebp], rbp
510 mov qword [xDI + CPUMCTX.r8], r8
511 mov qword [xDI + CPUMCTX.r9], r9
512 mov qword [xDI + CPUMCTX.r10], r10
513 mov qword [xDI + CPUMCTX.r11], r11
514 mov qword [xDI + CPUMCTX.r12], r12
515 mov qword [xDI + CPUMCTX.r13], r13
516 mov qword [xDI + CPUMCTX.r14], r14
517 mov qword [xDI + CPUMCTX.r15], r15
518
519 pop xAX ; the guest edi we pushed above
520 mov qword [xDI + CPUMCTX.edi], rax
521
522%ifdef VBOX_WITH_DR6_EXPERIMENT
523 ; Save DR6 - experiment, not safe!
524 mov xAX, dr6
525 mov [xDI + CPUMCTX.dr6], xAX
526%endif
527
528 pop xAX ; saved LDTR
529 lldt ax
530
531 pop xSI ; pCtx (needed in rsi by the macros below)
532
533 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
534 ;; @todo use the automatic load feature for MSRs
535 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
536 LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
537 LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
538%if 0 ; not supported on Intel CPUs
539 LOADHOSTMSREX MSR_K8_CSTAR, CPUMCTX.msrCSTAR
540%endif
541 LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
542
543%ifdef VMX_USE_CACHED_VMCS_ACCESSES
544 pop xDX ; saved pCache
545
546 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
547 cmp ecx, 0 ; can't happen
548 je .no_cached_reads
549 jmp .cached_read
550
551ALIGN(16)
552.cached_read:
553 dec xCX
554 mov eax, [xDX + VMCSCACHE.Read.aField + xCX*4]
555 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX*8], xAX
556 cmp xCX, 0
557 jnz .cached_read
558.no_cached_reads:
559
560 ; Save CR2 for EPT
561 mov xAX, cr2
562 mov [xDX + VMCSCACHE.cr2], xAX
563%endif
564
565 ; Restore segment registers
566 MYPOPSEGS xAX, ax
567
568 ; Restore general purpose registers
569 MYPOPAD
570
571 mov eax, VINF_SUCCESS
572
573.vmstart64_end:
574 popf
575 pop xBP
576 ret
577
578
579.vmxstart64_invalid_vmxon_ptr:
580 ; Restore base and limit of the IDTR & GDTR
581 lidt [xSP]
582 add xSP, xS*2
583 lgdt [xSP]
584 add xSP, xS*2
585
586 pop xAX ; saved LDTR
587 lldt ax
588
589 pop xSI ; pCtx (needed in rsi by the macros below)
590
591 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
592 ;; @todo use the automatic load feature for MSRs
593 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
594 LOADHOSTMSR MSR_K8_SF_MASK
595 LOADHOSTMSR MSR_K6_STAR
596%if 0 ; not supported on Intel CPUs
597 LOADHOSTMSR MSR_K8_CSTAR
598%endif
599 LOADHOSTMSR MSR_K8_LSTAR
600
601%ifdef VMX_USE_CACHED_VMCS_ACCESSES
602 add xSP, xS ; pCache
603%endif
604
605 ; Restore segment registers
606 MYPOPSEGS xAX, ax
607
608 ; Restore all general purpose host registers.
609 MYPOPAD
610 mov eax, VERR_VMX_INVALID_VMXON_PTR
611 jmp .vmstart64_end
612
613.vmxstart64_start_failed:
614 ; Restore base and limit of the IDTR & GDTR
615 lidt [xSP]
616 add xSP, xS*2
617 lgdt [xSP]
618 add xSP, xS*2
619
620 pop xAX ; saved LDTR
621 lldt ax
622
623 pop xSI ; pCtx (needed in rsi by the macros below)
624
625 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
626 ;; @todo use the automatic load feature for MSRs
627 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
628 LOADHOSTMSR MSR_K8_SF_MASK
629 LOADHOSTMSR MSR_K6_STAR
630%if 0 ; not supported on Intel CPUs
631 LOADHOSTMSR MSR_K8_CSTAR
632%endif
633 LOADHOSTMSR MSR_K8_LSTAR
634
635%ifdef VMX_USE_CACHED_VMCS_ACCESSES
636 add xSP, xS ; pCache
637%endif
638
639 ; Restore segment registers
640 MYPOPSEGS xAX, ax
641
642 ; Restore all general purpose host registers.
643 MYPOPAD
644 mov eax, VERR_VMX_UNABLE_TO_START_VM
645 jmp .vmstart64_end
646ENDPROC MY_NAME(VMXR0StartVM64)
647%endif ; RT_ARCH_AMD64
648
649
650;/**
651; * Prepares for and executes VMRUN (32 bits guests)
652; *
653; * @returns VBox status code
654; * @param HCPhysVMCB Physical address of host VMCB
655; * @param HCPhysVMCB Physical address of guest VMCB
656; * @param pCtx Guest context
657; */
658ALIGNCODE(16)
659BEGINPROC MY_NAME(SVMR0VMRun)
660%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
661 %ifdef ASM_CALL64_GCC
662 push rdx
663 push rsi
664 push rdi
665 %else
666 push r8
667 push rdx
668 push rcx
669 %endif
670 push 0
671%endif
672 push xBP
673 mov xBP, xSP
674 pushf
675
676 ;/* Manual save and restore:
677 ; * - General purpose registers except RIP, RSP, RAX
678 ; *
679 ; * Trashed:
680 ; * - CR2 (we don't care)
681 ; * - LDTR (reset to 0)
682 ; * - DRx (presumably not changed at all)
683 ; * - DR7 (reset to 0x400)
684 ; */
685
686 ;/* Save all general purpose host registers. */
687 MYPUSHAD
688
689 ;/* Save the Guest CPU context pointer. */
690 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx
691 push xSI ; push for saving the state at the end
692
693 ; save host fs, gs, sysenter msr etc
694 mov xAX, [xBP + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
695 push xAX ; save for the vmload after vmrun
696 vmsave
697
698 ; setup eax for VMLOAD
699 mov xAX, [xBP + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
700
701 ;/* Restore Guest's general purpose registers. */
702 ;/* EAX is loaded from the VMCB by VMRUN */
703 mov ebx, [xSI + CPUMCTX.ebx]
704 mov ecx, [xSI + CPUMCTX.ecx]
705 mov edx, [xSI + CPUMCTX.edx]
706 mov edi, [xSI + CPUMCTX.edi]
707 mov ebp, [xSI + CPUMCTX.ebp]
708 mov esi, [xSI + CPUMCTX.esi]
709
710 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
711 clgi
712 sti
713
714 ; load guest fs, gs, sysenter msr etc
715 vmload
716 ; run the VM
717 vmrun
718
719 ;/* EAX is in the VMCB already; we can use it here. */
720
721 ; save guest fs, gs, sysenter msr etc
722 vmsave
723
724 ; load host fs, gs, sysenter msr etc
725 pop xAX ; pushed above
726 vmload
727
728 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
729 cli
730 stgi
731
732 pop xAX ; pCtx
733
734 mov [ss:xAX + CPUMCTX.ebx], ebx
735 mov [ss:xAX + CPUMCTX.ecx], ecx
736 mov [ss:xAX + CPUMCTX.edx], edx
737 mov [ss:xAX + CPUMCTX.esi], esi
738 mov [ss:xAX + CPUMCTX.edi], edi
739 mov [ss:xAX + CPUMCTX.ebp], ebp
740
741 ; Restore general purpose registers
742 MYPOPAD
743
744 mov eax, VINF_SUCCESS
745
746 popf
747 pop xBP
748%ifdef RT_ARCH_AMD64
749 add xSP, 4*xS
750%endif
751 ret
752ENDPROC MY_NAME(SVMR0VMRun)
753
754%ifdef RT_ARCH_AMD64
755;/**
756; * Prepares for and executes VMRUN (64 bits guests)
757; *
758; * @returns VBox status code
759; * @param HCPhysVMCB Physical address of host VMCB
760; * @param HCPhysVMCB Physical address of guest VMCB
761; * @param pCtx Guest context
762; */
763ALIGNCODE(16)
764BEGINPROC MY_NAME(SVMR0VMRun64)
765 ; fake a cdecl stack frame
766 %ifdef ASM_CALL64_GCC
767 push rdx
768 push rsi
769 push rdi
770 %else
771 push r8
772 push rdx
773 push rcx
774 %endif
775 push 0
776 push rbp
777 mov rbp, rsp
778 pushf
779
780 ;/* Manual save and restore:
781 ; * - General purpose registers except RIP, RSP, RAX
782 ; *
783 ; * Trashed:
784 ; * - CR2 (we don't care)
785 ; * - LDTR (reset to 0)
786 ; * - DRx (presumably not changed at all)
787 ; * - DR7 (reset to 0x400)
788 ; */
789
790 ;/* Save all general purpose host registers. */
791 MYPUSHAD
792
793 ;/* Save the Guest CPU context pointer. */
794 mov rsi, [rbp + xS*2 + RTHCPHYS_CB*2] ; pCtx
795 push rsi ; push for saving the state at the end
796
797 ; save host fs, gs, sysenter msr etc
798 mov rax, [rbp + xS*2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
799 push rax ; save for the vmload after vmrun
800 vmsave
801
802 ; setup eax for VMLOAD
803 mov rax, [rbp + xS*2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
804
805 ;/* Restore Guest's general purpose registers. */
806 ;/* RAX is loaded from the VMCB by VMRUN */
807 mov rbx, qword [xSI + CPUMCTX.ebx]
808 mov rcx, qword [xSI + CPUMCTX.ecx]
809 mov rdx, qword [xSI + CPUMCTX.edx]
810 mov rdi, qword [xSI + CPUMCTX.edi]
811 mov rbp, qword [xSI + CPUMCTX.ebp]
812 mov r8, qword [xSI + CPUMCTX.r8]
813 mov r9, qword [xSI + CPUMCTX.r9]
814 mov r10, qword [xSI + CPUMCTX.r10]
815 mov r11, qword [xSI + CPUMCTX.r11]
816 mov r12, qword [xSI + CPUMCTX.r12]
817 mov r13, qword [xSI + CPUMCTX.r13]
818 mov r14, qword [xSI + CPUMCTX.r14]
819 mov r15, qword [xSI + CPUMCTX.r15]
820 mov rsi, qword [xSI + CPUMCTX.esi]
821
822 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
823 clgi
824 sti
825
826 ; load guest fs, gs, sysenter msr etc
827 vmload
828 ; run the VM
829 vmrun
830
831 ;/* RAX is in the VMCB already; we can use it here. */
832
833 ; save guest fs, gs, sysenter msr etc
834 vmsave
835
836 ; load host fs, gs, sysenter msr etc
837 pop rax ; pushed above
838 vmload
839
840 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
841 cli
842 stgi
843
844 pop rax ; pCtx
845
846 mov qword [rax + CPUMCTX.ebx], rbx
847 mov qword [rax + CPUMCTX.ecx], rcx
848 mov qword [rax + CPUMCTX.edx], rdx
849 mov qword [rax + CPUMCTX.esi], rsi
850 mov qword [rax + CPUMCTX.edi], rdi
851 mov qword [rax + CPUMCTX.ebp], rbp
852 mov qword [rax + CPUMCTX.r8], r8
853 mov qword [rax + CPUMCTX.r9], r9
854 mov qword [rax + CPUMCTX.r10], r10
855 mov qword [rax + CPUMCTX.r11], r11
856 mov qword [rax + CPUMCTX.r12], r12
857 mov qword [rax + CPUMCTX.r13], r13
858 mov qword [rax + CPUMCTX.r14], r14
859 mov qword [rax + CPUMCTX.r15], r15
860
861 ; Restore general purpose registers
862 MYPOPAD
863
864 mov eax, VINF_SUCCESS
865
866 popf
867 pop rbp
868 add rsp, 4*xS
869 ret
870ENDPROC MY_NAME(SVMR0VMRun64)
871%endif ; RT_ARCH_AMD64
872
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette