VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac@ 51230

Last change on this file since 51230 was 47844, checked in by vboxsync, 11 years ago

VMM: X2APIC + NMI. Only tested on AMD64.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 33.2 KB
Line 
1; $Id: PAEand32Bit.mac 47844 2013-08-19 14:03:17Z vboxsync $
2;; @file
3; VMM - World Switchers, template for PAE and 32-Bit.
4;
5
6;
7; Copyright (C) 2006-2013 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;%define DEBUG_STUFF 1
19
20;*******************************************************************************
21;* Header Files *
22;*******************************************************************************
23%include "VBox/asmdefs.mac"
24%include "VBox/apic.mac"
25%include "iprt/x86.mac"
26%include "VBox/vmm/cpum.mac"
27%include "VBox/vmm/stam.mac"
28%include "VBox/vmm/vm.mac"
29%include "VBox/err.mac"
30%include "CPUMInternal.mac"
31%include "VMMSwitcher.mac"
32
33%undef NEED_ID
34%ifdef NEED_PAE_ON_32BIT_HOST
35%define NEED_ID
36%endif
37%ifdef NEED_32BIT_ON_PAE_HOST
38%define NEED_ID
39%endif
40
41
42
43;
44; Start the fixup records
45; We collect the fixups in the .data section as we go along
46; It is therefore VITAL that no-one is using the .data section
47; for anything else between 'Start' and 'End'.
48;
49BEGINDATA
50GLOBALNAME Fixups
51
52
53
54BEGINCODE
55GLOBALNAME Start
56
57;;
58; The C interface.
59;
60BEGINPROC vmmR0ToRawMode
61
62%ifdef DEBUG_STUFF
63 COM_S_NEWLINE
64 COM_S_CHAR '^'
65%endif
66
67%ifdef VBOX_WITH_STATISTICS
68 ;
69 ; Switcher stats.
70 ;
71 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
72 mov edx, 0ffffffffh
73 STAM_PROFILE_ADV_START edx
74%endif
75
76 ;
77 ; Call worker.
78 ;
79 FIXUP FIX_HC_CPUM_OFF, 1, 0
80 mov edx, 0ffffffffh
81 push cs ; allow for far return and restore cs correctly.
82 call NAME(vmmR0ToRawModeAsm)
83
84%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
85 ; Restore blocked Local APIC NMI vectors
86 ; Do this here to ensure the host CS is already restored
87 mov ecx, [edx + CPUMCPU.fApicDisVectors]
88 test ecx, ecx
89 jz gth_apic_done
90 cmp byte [edx + CPUMCPU.fX2Apic], 1
91 je gth_x2apic
92
93 mov edx, [edx + CPUMCPU.pvApicBase]
94 shr ecx, 1
95 jnc gth_nolint0
96 and dword [edx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
97gth_nolint0:
98 shr ecx, 1
99 jnc gth_nolint1
100 and dword [edx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
101gth_nolint1:
102 shr ecx, 1
103 jnc gth_nopc
104 and dword [edx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
105gth_nopc:
106 shr ecx, 1
107 jnc gth_notherm
108 and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
109gth_notherm:
110 jmp gth_apic_done
111
112gth_x2apic:
113 push eax ; save eax
114 push ebx ; save it for fApicDisVectors
115 push edx ; save edx just in case.
116 mov ebx, ecx ; ebx = fApicDisVectors, ecx free for MSR use
117 shr ebx, 1
118 jnc gth_x2_nolint0
119 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
120 rdmsr
121 and eax, ~APIC_REG_LVT_MASKED
122 wrmsr
123gth_x2_nolint0:
124 shr ebx, 1
125 jnc gth_x2_nolint1
126 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
127 rdmsr
128 and eax, ~APIC_REG_LVT_MASKED
129 wrmsr
130gth_x2_nolint1:
131 shr ebx, 1
132 jnc gth_x2_nopc
133 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
134 rdmsr
135 and eax, ~APIC_REG_LVT_MASKED
136 wrmsr
137gth_x2_nopc:
138 shr ebx, 1
139 jnc gth_x2_notherm
140 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
141 rdmsr
142 and eax, ~APIC_REG_LVT_MASKED
143 wrmsr
144gth_x2_notherm:
145 pop edx
146 pop ebx
147 pop eax
148
149gth_apic_done:
150%endif ; VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
151
152%ifdef VBOX_WITH_STATISTICS
153 ;
154 ; Switcher stats.
155 ;
156 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
157 mov edx, 0ffffffffh
158 STAM_PROFILE_ADV_STOP edx
159%endif
160
161 ret
162ENDPROC vmmR0ToRawMode
163
164
165
166; *****************************************************************************
167; vmmR0ToRawModeAsm
168;
169; Phase one of the switch from host to guest context (host MMU context)
170;
171; INPUT:
172; - edx virtual address of CPUM structure (valid in host context)
173;
174; USES/DESTROYS:
175; - eax, ecx, edx
176;
177; ASSUMPTION:
178; - current CS and DS selectors are wide open
179;
180; *****************************************************************************
181ALIGNCODE(16)
182BEGINPROC vmmR0ToRawModeAsm
183 ;;
184 ;; Save CPU host context
185 ;; Skip eax, edx and ecx as these are not preserved over calls.
186 ;;
187 CPUMCPU_FROM_CPUM(edx)
188 ; general registers.
189 mov [edx + CPUMCPU.Host.ebx], ebx
190 mov [edx + CPUMCPU.Host.edi], edi
191 mov [edx + CPUMCPU.Host.esi], esi
192 mov [edx + CPUMCPU.Host.esp], esp
193 mov [edx + CPUMCPU.Host.ebp], ebp
194 ; selectors.
195 mov [edx + CPUMCPU.Host.ds], ds
196 mov [edx + CPUMCPU.Host.es], es
197 mov [edx + CPUMCPU.Host.fs], fs
198 mov [edx + CPUMCPU.Host.gs], gs
199 mov [edx + CPUMCPU.Host.ss], ss
200 ; special registers.
201 sldt [edx + CPUMCPU.Host.ldtr]
202 sidt [edx + CPUMCPU.Host.idtr]
203 sgdt [edx + CPUMCPU.Host.gdtr]
204 str [edx + CPUMCPU.Host.tr]
205 ; flags
206 pushfd
207 pop dword [edx + CPUMCPU.Host.eflags]
208
209 ; Block Local APIC NMI vectors
210%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
211 cmp byte [edx + CPUMCPU.pvApicBase], 1
212 je htg_x2apic
213
214 mov ebx, [edx + CPUMCPU.pvApicBase]
215 or ebx, ebx
216 jz htg_apic_done
217 xor edi, edi ; fApicDisVectors
218
219 mov eax, [ebx + APIC_REG_LVT_LINT0]
220 mov ecx, eax
221 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
222 cmp ecx, APIC_REG_LVT_MODE_NMI
223 jne htg_nolint0
224 or edi, 0x01
225 or eax, APIC_REG_LVT_MASKED
226 mov [ebx + APIC_REG_LVT_LINT0], eax
227 mov eax, [ebx + APIC_REG_LVT_LINT0] ; write completion
228htg_nolint0:
229 mov eax, [ebx + APIC_REG_LVT_LINT1]
230 mov ecx, eax
231 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
232 cmp ecx, APIC_REG_LVT_MODE_NMI
233 jne htg_nolint1
234 or edi, 0x02
235 or eax, APIC_REG_LVT_MASKED
236 mov [ebx + APIC_REG_LVT_LINT1], eax
237 mov eax, [ebx + APIC_REG_LVT_LINT1] ; write completion
238htg_nolint1:
239 mov eax, [ebx + APIC_REG_LVT_PC]
240 mov ecx, eax
241 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
242 cmp ecx, APIC_REG_LVT_MODE_NMI
243 jne htg_nopc
244 or edi, 0x04
245 or eax, APIC_REG_LVT_MASKED
246 mov [ebx + APIC_REG_LVT_PC], eax
247 mov eax, [ebx + APIC_REG_LVT_PC] ; write completion
248htg_nopc:
249 mov eax, [ebx + APIC_REG_VERSION]
250 shr eax, 16
251 cmp al, 5
252 jb htg_notherm
253 mov eax, [ebx + APIC_REG_LVT_THMR]
254 mov ecx, eax
255 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
256 cmp ecx, APIC_REG_LVT_MODE_NMI
257 jne htg_notherm
258 or edi, 0x08
259 or eax, APIC_REG_LVT_MASKED
260 mov [ebx + APIC_REG_LVT_THMR], eax
261 mov eax, [ebx + APIC_REG_LVT_THMR] ; write completion
262htg_notherm:
263 mov [edx + CPUMCPU.fApicDisVectors], edi
264 jmp htg_apic_done
265
266htg_x2apic:
267 mov esi, edx ; Save edx.
268 xor edi, edi ; fApicDisVectors
269
270 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4)
271 rdmsr
272 mov ebx, eax
273 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
274 cmp ebx, APIC_REG_LVT_MODE_NMI
275 jne htg_x2_nolint0
276 or edi, 0x01
277 or eax, APIC_REG_LVT_MASKED
278 wrmsr
279htg_x2_nolint0:
280 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4)
281 rdmsr
282 mov ebx, eax
283 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
284 cmp ebx, APIC_REG_LVT_MODE_NMI
285 jne htg_x2_nolint1
286 or edi, 0x02
287 or eax, APIC_REG_LVT_MASKED
288 wrmsr
289htg_x2_nolint1:
290 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4)
291 rdmsr
292 mov ebx, eax
293 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
294 cmp ebx, APIC_REG_LVT_MODE_NMI
295 jne htg_x2_nopc
296 or edi, 0x04
297 or eax, APIC_REG_LVT_MASKED
298 wrmsr
299htg_x2_nopc:
300 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_VERSION >> 4)
301 rdmsr
302 shr eax, 16
303 cmp al, 5
304 jb htg_x2_notherm
305 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4)
306 rdmsr
307 mov ebx, eax
308 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
309 cmp ebx, APIC_REG_LVT_MODE_NMI
310 jne htg_x2_notherm
311 or edi, 0x08
312 or eax, APIC_REG_LVT_MASKED
313 wrmsr
314htg_x2_notherm:
315 mov edx, esi ; Restore edx.
316 mov [edx + CPUMCPU.fApicDisVectors], edi
317
318htg_apic_done:
319%endif ; VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
320
321 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
322 ; save MSR_IA32_SYSENTER_CS register.
323 mov ecx, MSR_IA32_SYSENTER_CS
324 mov ebx, edx ; save edx
325 rdmsr ; edx:eax <- MSR[ecx]
326 mov [ebx + CPUMCPU.Host.SysEnter.cs], eax
327 mov [ebx + CPUMCPU.Host.SysEnter.cs + 4], edx
328 xor eax, eax ; load 0:0 to cause #GP upon sysenter
329 xor edx, edx
330 wrmsr
331 xchg ebx, edx ; restore edx
332 jmp short htg_no_sysenter
333
334ALIGNCODE(16)
335htg_no_sysenter:
336
337 FIXUP FIX_NO_SYSCALL_JMP, 0, htg_no_syscall - NAME(Start) ; this will insert a jmp htg_no_syscall if host doesn't use syscall.
338 ; clear MSR_K6_EFER_SCE.
339 mov ebx, edx ; save edx
340 mov ecx, MSR_K6_EFER
341 rdmsr ; edx:eax <- MSR[ecx]
342 and eax, ~MSR_K6_EFER_SCE
343 wrmsr
344 mov edx, ebx ; restore edx
345 jmp short htg_no_syscall
346
347ALIGNCODE(16)
348htg_no_syscall:
349
350 ;; handle use flags.
351 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
352 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
353 mov [edx + CPUMCPU.fUseFlags], esi
354
355 ; debug registers.
356 test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST
357 jnz htg_debug_regs_save_dr7and6
358htg_debug_regs_no:
359
360 ; control registers.
361 mov eax, cr0
362 mov [edx + CPUMCPU.Host.cr0], eax
363 ;mov eax, cr2 ; assume host os don't suff things in cr2. (safe)
364 ;mov [edx + CPUMCPU.Host.cr2], eax
365 mov eax, cr3
366 mov [edx + CPUMCPU.Host.cr3], eax
367 mov eax, cr4
368 mov [edx + CPUMCPU.Host.cr4], eax
369
370 ;;
371 ;; Start switching to VMM context.
372 ;;
373
374 ;
375 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
376 ; Also disable WP. (eax==cr4 now)
377 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
378 ; Note! X86_CR4_VMXE must not be touched in case the CPU is in vmx root mode
379 ;
380 and eax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_VMXE
381 mov ecx, [edx + CPUMCPU.Guest.cr4]
382 ;; @todo Switcher cleanup: Determine base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
383 ; in CPUMCPU.Hyper.cr4 (which isn't currently being used). That should
384 ; simplify this operation a bit (and improve locality of the data).
385
386 ;
387 ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
388 ; FXSAVE support on the host CPU
389 ;
390 CPUM_FROM_CPUMCPU(edx)
391 and ecx, [edx + CPUM.CR4.AndMask]
392 or eax, ecx
393 or eax, [edx + CPUM.CR4.OrMask]
394 mov cr4, eax
395
396 CPUMCPU_FROM_CPUM(edx)
397 mov eax, [edx + CPUMCPU.Guest.cr0]
398 and eax, X86_CR0_EM
399 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
400 mov cr0, eax
401
402 ; Load new gdt so we can do far jump to guest code after cr3 reload.
403 lgdt [edx + CPUMCPU.Hyper.gdtr]
404 DEBUG_CHAR('1') ; trashes esi
405
406 ; Store the hypervisor cr3 for later loading
407 mov ebp, [edx + CPUMCPU.Hyper.cr3]
408
409 ;;
410 ;; Load Intermediate memory context.
411 ;;
412 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
413 mov eax, 0ffffffffh
414 mov cr3, eax
415 DEBUG_CHAR('2') ; trashes esi
416
417%ifdef NEED_ID
418 ;;
419 ;; Jump to identity mapped location
420 ;;
421 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
422 jmp near NAME(IDEnterTarget)
423
424 ; We're now on identity mapped pages!
425ALIGNCODE(16)
426GLOBALNAME IDEnterTarget
427 DEBUG_CHAR('3')
428 mov edx, cr4
429%ifdef NEED_PAE_ON_32BIT_HOST
430 or edx, X86_CR4_PAE
431%else
432 and edx, ~X86_CR4_PAE
433%endif
434 mov eax, cr0
435 and eax, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
436 mov cr0, eax
437 DEBUG_CHAR('4')
438 mov cr4, edx
439 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
440 mov edx, 0ffffffffh
441 mov cr3, edx
442 or eax, X86_CR0_PG
443 DEBUG_CHAR('5')
444 mov cr0, eax
445 DEBUG_CHAR('6')
446%endif
447
448 ;;
449 ;; Jump to guest code mapping of the code and load the Hypervisor CS.
450 ;;
451 FIXUP FIX_GC_FAR32, 1, NAME(FarJmpGCTarget) - NAME(Start)
452 jmp 0fff8h:0deadfaceh
453
454
455 ;;
456 ;; When we arrive at this label we're at the
457 ;; guest code mapping of the switching code.
458 ;;
459ALIGNCODE(16)
460GLOBALNAME FarJmpGCTarget
461 DEBUG_CHAR('-')
462 ; load final cr3 and do far jump to load cs.
463 mov cr3, ebp ; ebp set above
464 DEBUG_CHAR('0')
465
466 ;;
467 ;; We're in VMM MMU context and VMM CS is loaded.
468 ;; Setup the rest of the VMM state.
469 ;;
470 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
471 mov edx, 0ffffffffh
472 ; Activate guest IDT
473 DEBUG_CHAR('1')
474 lidt [edx + CPUMCPU.Hyper.idtr]
475 ; Load selectors
476 DEBUG_CHAR('2')
477 FIXUP FIX_HYPER_DS, 1
478 mov eax, 0ffffh
479 mov ds, eax
480 mov es, eax
481 xor eax, eax
482 mov gs, eax
483 mov fs, eax
484
485 ; Setup stack.
486 DEBUG_CHAR('3')
487 mov eax, [edx + CPUMCPU.Hyper.ss.Sel]
488 mov ss, ax
489 mov esp, [edx + CPUMCPU.Hyper.esp]
490
491 ; Restore TSS selector; must mark it as not busy before using ltr (!)
492 DEBUG_CHAR('4')
493 FIXUP FIX_GC_TSS_GDTE_DW2, 2
494 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
495 DEBUG_CHAR('5')
496 ltr word [edx + CPUMCPU.Hyper.tr.Sel]
497 DEBUG_CHAR('6')
498
499 ; Activate the ldt (now we can safely crash).
500 lldt [edx + CPUMCPU.Hyper.ldtr.Sel]
501 DEBUG_CHAR('7')
502
503 ;; use flags.
504 mov esi, [edx + CPUMCPU.fUseFlags]
505
506 ; debug registers
507 test esi, CPUM_USE_DEBUG_REGS_HYPER
508 jnz htg_debug_regs_guest
509htg_debug_regs_guest_done:
510 DEBUG_CHAR('9')
511
512%ifdef VBOX_WITH_NMI
513 ;
514 ; Setup K7 NMI.
515 ;
516 mov esi, edx
517 ; clear all PerfEvtSeln registers
518 xor eax, eax
519 xor edx, edx
520 mov ecx, MSR_K7_PERFCTR0
521 wrmsr
522 mov ecx, MSR_K7_PERFCTR1
523 wrmsr
524 mov ecx, MSR_K7_PERFCTR2
525 wrmsr
526 mov ecx, MSR_K7_PERFCTR3
527 wrmsr
528
529 mov eax, RT_BIT(20) | RT_BIT(17) | RT_BIT(16) | 076h
530 mov ecx, MSR_K7_EVNTSEL0
531 wrmsr
532 mov eax, 02329B000h
533 mov edx, 0fffffffeh ; -1.6GHz * 5
534 mov ecx, MSR_K7_PERFCTR0
535 wrmsr
536
537 FIXUP FIX_GC_APIC_BASE_32BIT, 1
538 mov eax, 0f0f0f0f0h
539 add eax, 0340h ; APIC_LVTPC
540 mov dword [eax], 0400h ; APIC_DM_NMI
541
542 xor edx, edx
543 mov eax, RT_BIT(20) | RT_BIT(17) | RT_BIT(16) | 076h | RT_BIT(22) ;+EN
544 mov ecx, MSR_K7_EVNTSEL0
545 wrmsr
546
547 mov edx, esi
548%endif
549
550 ; General registers (sans edx).
551 mov eax, [edx + CPUMCPU.Hyper.eax]
552 mov ebx, [edx + CPUMCPU.Hyper.ebx]
553 mov ecx, [edx + CPUMCPU.Hyper.ecx]
554 mov ebp, [edx + CPUMCPU.Hyper.ebp]
555 mov esi, [edx + CPUMCPU.Hyper.esi]
556 mov edi, [edx + CPUMCPU.Hyper.edi]
557 DEBUG_S_CHAR('!')
558
559 ;;
560 ;; Return to the VMM code which either called the switcher or
561 ;; the code set up to run by HC.
562 ;;
563 push dword [edx + CPUMCPU.Hyper.eflags]
564 push cs
565 push dword [edx + CPUMCPU.Hyper.eip]
566 mov edx, [edx + CPUMCPU.Hyper.edx] ; !! edx is no longer pointing to CPUMCPU here !!
567
568%ifdef DEBUG_STUFF
569 COM_S_PRINT ';eip='
570 push eax
571 mov eax, [esp + 8]
572 COM_S_DWORD_REG eax
573 pop eax
574 COM_S_CHAR ';'
575%endif
576%ifdef VBOX_WITH_STATISTICS
577 push edx
578 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
579 mov edx, 0ffffffffh
580 STAM_PROFILE_ADV_STOP edx
581 pop edx
582%endif
583
584 iret ; Use iret to make debugging and TF/RF work.
585
586;;
587; Detour for saving the host DR7 and DR6.
588; esi and edx must be preserved.
589htg_debug_regs_save_dr7and6:
590DEBUG_S_CHAR('s');
591 mov eax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
592 mov [edx + CPUMCPU.Host.dr7], eax
593 xor eax, eax ; clear everything. (bit 12? is read as 1...)
594 mov dr7, eax
595 mov eax, dr6 ; just in case we save the state register too.
596 mov [edx + CPUMCPU.Host.dr6], eax
597 jmp htg_debug_regs_no
598
599;;
600; Detour for saving host DR0-3 and loading hypervisor debug registers.
601; esi and edx must be preserved.
602htg_debug_regs_guest:
603 DEBUG_S_CHAR('D')
604 DEBUG_S_CHAR('R')
605 DEBUG_S_CHAR('x')
606 ; save host DR0-3.
607 mov eax, dr0
608 mov [edx + CPUMCPU.Host.dr0], eax
609 mov ebx, dr1
610 mov [edx + CPUMCPU.Host.dr1], ebx
611 mov ecx, dr2
612 mov [edx + CPUMCPU.Host.dr2], ecx
613 mov eax, dr3
614 mov [edx + CPUMCPU.Host.dr3], eax
615 or dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HOST
616
617 ; load hyper DR0-7
618 mov ebx, [edx + CPUMCPU.Hyper.dr]
619 mov dr0, ebx
620 mov ecx, [edx + CPUMCPU.Hyper.dr + 8*1]
621 mov dr1, ecx
622 mov eax, [edx + CPUMCPU.Hyper.dr + 8*2]
623 mov dr2, eax
624 mov ebx, [edx + CPUMCPU.Hyper.dr + 8*3]
625 mov dr3, ebx
626 mov ecx, X86_DR6_INIT_VAL
627 mov dr6, ecx
628 mov eax, [edx + CPUMCPU.Hyper.dr + 8*7]
629 mov dr7, eax
630 or dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
631 jmp htg_debug_regs_guest_done
632
633ENDPROC vmmR0ToRawModeAsm
634
635
636;;
637; Trampoline for doing a call when starting the hyper visor execution.
638;
639; Push any arguments to the routine.
640; Push the argument frame size (cArg * 4).
641; Push the call target (_cdecl convention).
642; Push the address of this routine.
643;
644;
645ALIGNCODE(16)
646BEGINPROC vmmRCCallTrampoline
647%ifdef DEBUG_STUFF
648 COM_S_CHAR 'c'
649 COM_S_CHAR 't'
650 COM_S_CHAR '!'
651%endif
652
653 ; call routine
654 pop eax ; call address
655 pop edi ; argument count.
656%ifdef DEBUG_STUFF
657 COM_S_PRINT ';eax='
658 COM_S_DWORD_REG eax
659 COM_S_CHAR ';'
660%endif
661 call eax ; do call
662 add esp, edi ; cleanup stack
663
664 ; return to the host context.
665%ifdef DEBUG_STUFF
666 COM_S_CHAR '`'
667%endif
668.to_host_again:
669 call NAME(vmmRCToHostAsm)
670 mov eax, VERR_VMM_SWITCHER_IPE_1
671 jmp .to_host_again
672ENDPROC vmmRCCallTrampoline
673
674
675
676;;
677; The C interface.
678;
679ALIGNCODE(16)
680BEGINPROC vmmRCToHost
681%ifdef DEBUG_STUFF
682 push esi
683 COM_NEWLINE
684 DEBUG_CHAR('b')
685 DEBUG_CHAR('a')
686 DEBUG_CHAR('c')
687 DEBUG_CHAR('k')
688 DEBUG_CHAR('!')
689 COM_NEWLINE
690 pop esi
691%endif
692 mov eax, [esp + 4]
693 jmp NAME(vmmRCToHostAsm)
694ENDPROC vmmRCToHost
695
696
697;;
698; vmmRCToHostAsmNoReturn
699;
700; This is an entry point used by TRPM when dealing with raw-mode traps,
701; i.e. traps in the hypervisor code. This will not return and saves no
702; state, because the caller has already saved the state.
703;
704; @param eax Return code.
705;
706ALIGNCODE(16)
707BEGINPROC vmmRCToHostAsmNoReturn
708 DEBUG_S_CHAR('%')
709
710%ifdef VBOX_WITH_STATISTICS
711 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
712 mov edx, 0ffffffffh
713 STAM32_PROFILE_ADV_STOP edx
714
715 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
716 mov edx, 0ffffffffh
717 STAM32_PROFILE_ADV_START edx
718
719 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
720 mov edx, 0ffffffffh
721 STAM32_PROFILE_ADV_START edx
722%endif
723
724 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
725 mov edx, 0ffffffffh
726
727 jmp vmmRCToHostAsm_SaveNoGeneralRegs
728ENDPROC vmmRCToHostAsmNoReturn
729
730
731;;
732; vmmRCToHostAsm
733;
734; This is an entry point used by TRPM to return to host context when an
735; interrupt occured or an guest trap needs handling in host context. It
736; is also used by the C interface above.
737;
738; The hypervisor context is saved and it will return to the caller if
739; host context so desires.
740;
741; @param eax Return code.
742; @uses eax, edx, ecx (or it may use them in the future)
743;
744ALIGNCODE(16)
745BEGINPROC vmmRCToHostAsm
746 DEBUG_S_CHAR('%')
747 push edx
748
749%ifdef VBOX_WITH_STATISTICS
750 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
751 mov edx, 0ffffffffh
752 STAM_PROFILE_ADV_STOP edx
753
754 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
755 mov edx, 0ffffffffh
756 STAM_PROFILE_ADV_START edx
757
758 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
759 mov edx, 0ffffffffh
760 STAM_PROFILE_ADV_START edx
761%endif
762
763 ;
764 ; Load the CPUMCPU pointer.
765 ;
766 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
767 mov edx, 0ffffffffh
768
769 ; Save register context.
770 pop dword [edx + CPUMCPU.Hyper.edx]
771 pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack
772 mov dword [edx + CPUMCPU.Hyper.esp], esp
773 mov dword [edx + CPUMCPU.Hyper.eax], eax
774 mov dword [edx + CPUMCPU.Hyper.ebx], ebx
775 mov dword [edx + CPUMCPU.Hyper.ecx], ecx
776 mov dword [edx + CPUMCPU.Hyper.esi], esi
777 mov dword [edx + CPUMCPU.Hyper.edi], edi
778 mov dword [edx + CPUMCPU.Hyper.ebp], ebp
779
780 ; special registers which may change.
781vmmRCToHostAsm_SaveNoGeneralRegs:
782 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
783 ; str [edx + CPUMCPU.Hyper.tr] - double fault only, and it won't be right then either.
784 sldt [edx + CPUMCPU.Hyper.ldtr.Sel]
785
786 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
787 ; FPU context is saved before restore of host saving (another) branch.
788
789 ; Disable debug regsiters if active so they cannot trigger while switching.
790 test dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
791 jz .gth_disabled_dr7
792 mov eax, X86_DR7_INIT_VAL
793 mov dr7, eax
794.gth_disabled_dr7:
795
796%ifdef VBOX_WITH_NMI
797 ;
798 ; Disarm K7 NMI.
799 ;
800 mov esi, edx
801
802 xor edx, edx
803 xor eax, eax
804 mov ecx, MSR_K7_EVNTSEL0
805 wrmsr
806
807 mov edx, esi
808%endif
809
810
811 ;;
812 ;; Load Intermediate memory context.
813 ;;
814 mov ecx, [edx + CPUMCPU.Host.cr3]
815 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
816 mov eax, 0ffffffffh
817 mov cr3, eax
818 DEBUG_CHAR('?')
819
820 ;; We're now in intermediate memory context!
821%ifdef NEED_ID
822 ;;
823 ;; Jump to identity mapped location
824 ;;
825 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
826 jmp near NAME(IDExitTarget)
827
828 ; We're now on identity mapped pages!
829ALIGNCODE(16)
830GLOBALNAME IDExitTarget
831 DEBUG_CHAR('1')
832 mov edx, cr4
833%ifdef NEED_PAE_ON_32BIT_HOST
834 and edx, ~X86_CR4_PAE
835%else
836 or edx, X86_CR4_PAE
837%endif
838 mov eax, cr0
839 and eax, (~X86_CR0_PG) & 0xffffffff ; prevent yasm warning
840 mov cr0, eax
841 DEBUG_CHAR('2')
842 mov cr4, edx
843 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
844 mov edx, 0ffffffffh
845 mov cr3, edx
846 or eax, X86_CR0_PG
847 DEBUG_CHAR('3')
848 mov cr0, eax
849 DEBUG_CHAR('4')
850
851 ;;
852 ;; Jump to HC mapping.
853 ;;
854 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
855 jmp near NAME(HCExitTarget)
856%else
857 ;;
858 ;; Jump to HC mapping.
859 ;;
860 FIXUP FIX_GC_2_HC_NEAR_REL, 1, NAME(HCExitTarget) - NAME(Start)
861 jmp near NAME(HCExitTarget)
862%endif
863
864
865 ;
866 ; When we arrive here we're at the host context
867 ; mapping of the switcher code.
868 ;
869ALIGNCODE(16)
870GLOBALNAME HCExitTarget
871 DEBUG_CHAR('9')
872 ; load final cr3
873 mov cr3, ecx
874 DEBUG_CHAR('@')
875
876
877 ;;
878 ;; Restore Host context.
879 ;;
880 ; Load CPUM pointer into edx
881 FIXUP FIX_HC_CPUM_OFF, 1, 0
882 mov edx, 0ffffffffh
883 CPUMCPU_FROM_CPUM(edx)
884 ; activate host gdt and idt
885 lgdt [edx + CPUMCPU.Host.gdtr]
886 DEBUG_CHAR('0')
887 lidt [edx + CPUMCPU.Host.idtr]
888 DEBUG_CHAR('1')
889 ; Restore TSS selector; must mark it as not busy before using ltr (!)
890%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
891 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
892 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
893 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
894 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
895 ltr word [edx + CPUMCPU.Host.tr]
896%else
897 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
898 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
899 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
900 mov ecx, [eax + 4] ; ecx <- 2nd descriptor dword
901 mov ebx, ecx ; save original value
902 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
903 mov [eax + 4], ecx ; not using xchg here is paranoia..
904 ltr word [edx + CPUMCPU.Host.tr]
905 xchg [eax + 4], ebx ; using xchg is paranoia too...
906%endif
907 ; activate ldt
908 DEBUG_CHAR('2')
909 lldt [edx + CPUMCPU.Host.ldtr]
910 ; Restore segment registers
911 mov eax, [edx + CPUMCPU.Host.ds]
912 mov ds, eax
913 mov eax, [edx + CPUMCPU.Host.es]
914 mov es, eax
915 mov eax, [edx + CPUMCPU.Host.fs]
916 mov fs, eax
917 mov eax, [edx + CPUMCPU.Host.gs]
918 mov gs, eax
919 ; restore stack
920 lss esp, [edx + CPUMCPU.Host.esp]
921
922
923 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
924 ; restore MSR_IA32_SYSENTER_CS register.
925 mov ecx, MSR_IA32_SYSENTER_CS
926 mov eax, [edx + CPUMCPU.Host.SysEnter.cs]
927 mov ebx, [edx + CPUMCPU.Host.SysEnter.cs + 4]
928 xchg edx, ebx ; save/load edx
929 wrmsr ; MSR[ecx] <- edx:eax
930 xchg edx, ebx ; restore edx
931 jmp short gth_sysenter_no
932
933ALIGNCODE(16)
934gth_sysenter_no:
935
936 FIXUP FIX_NO_SYSCALL_JMP, 0, gth_syscall_no - NAME(Start) ; this will insert a jmp gth_syscall_no if host doesn't use syscall.
937 ; set MSR_K6_EFER_SCE.
938 mov ebx, edx ; save edx
939 mov ecx, MSR_K6_EFER
940 rdmsr
941 or eax, MSR_K6_EFER_SCE
942 wrmsr
943 mov edx, ebx ; restore edx
944 jmp short gth_syscall_no
945
946ALIGNCODE(16)
947gth_syscall_no:
948
949 ; Restore FPU if guest has used it.
950 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
951 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
952 test esi, CPUM_USED_FPU
953 jz near gth_fpu_no
954 mov ecx, cr0
955 and ecx, ~(X86_CR0_TS | X86_CR0_EM)
956 mov cr0, ecx
957
958 FIXUP FIX_NO_FXSAVE_JMP, 0, gth_no_fxsave - NAME(Start) ; this will insert a jmp gth_no_fxsave if fxsave isn't supported.
959 fxsave [edx + CPUMCPU.Guest.fpu]
960 fxrstor [edx + CPUMCPU.Host.fpu]
961 jmp near gth_fpu_no
962
963gth_no_fxsave:
964 fnsave [edx + CPUMCPU.Guest.fpu]
965 mov eax, [edx + CPUMCPU.Host.fpu] ; control word
966 not eax ; 1 means exception ignored (6 LS bits)
967 and eax, byte 03Fh ; 6 LS bits only
968 test eax, [edx + CPUMCPU.Host.fpu + 4] ; status word
969 jz gth_no_exceptions_pending
970
971 ; technically incorrect, but we certainly don't want any exceptions now!!
972 and dword [edx + CPUMCPU.Host.fpu + 4], ~03Fh
973
974gth_no_exceptions_pending:
975 frstor [edx + CPUMCPU.Host.fpu]
976 jmp short gth_fpu_no
977
978ALIGNCODE(16)
979gth_fpu_no:
980
981 ; Control registers.
982 ; Would've liked to have these higher up in case of crashes, but
983 ; the fpu stuff must be done before we restore cr0.
984 mov ecx, [edx + CPUMCPU.Host.cr4]
985 mov cr4, ecx
986 mov ecx, [edx + CPUMCPU.Host.cr0]
987 mov cr0, ecx
988 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
989 ;mov cr2, ecx
990
991 ; restore debug registers (if modified) (esi must still be fUseFlags!)
992 ; (must be done after cr4 reload because of the debug extension.)
993 test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HOST
994 jnz gth_debug_regs_restore
995gth_debug_regs_done:
996
997 ; restore general registers.
998 mov eax, edi ; restore return code. eax = return code !!
999 mov edi, [edx + CPUMCPU.Host.edi]
1000 mov esi, [edx + CPUMCPU.Host.esi]
1001 mov ebx, [edx + CPUMCPU.Host.ebx]
1002 mov ebp, [edx + CPUMCPU.Host.ebp]
1003 push dword [edx + CPUMCPU.Host.eflags]
1004 popfd
1005
1006%ifdef DEBUG_STUFF
1007; COM_S_CHAR '4'
1008%endif
1009 retf
1010
1011;;
1012; Detour for restoring the host debug registers.
1013; edx and edi must be preserved.
1014gth_debug_regs_restore:
1015 DEBUG_S_CHAR('d')
1016 mov eax, dr7 ; Some DR7 paranoia first...
1017 mov ecx, X86_DR7_INIT_VAL
1018 cmp eax, ecx
1019 je .gth_debug_skip_dr7_disabling
1020 mov dr7, ecx
1021.gth_debug_skip_dr7_disabling:
1022 test esi, CPUM_USED_DEBUG_REGS_HOST
1023 jz .gth_debug_regs_dr7
1024
1025 DEBUG_S_CHAR('r')
1026 mov eax, [edx + CPUMCPU.Host.dr0]
1027 mov dr0, eax
1028 mov ebx, [edx + CPUMCPU.Host.dr1]
1029 mov dr1, ebx
1030 mov ecx, [edx + CPUMCPU.Host.dr2]
1031 mov dr2, ecx
1032 mov eax, [edx + CPUMCPU.Host.dr3]
1033 mov dr3, eax
1034.gth_debug_regs_dr7:
1035 mov ebx, [edx + CPUMCPU.Host.dr6]
1036 mov dr6, ebx
1037 mov ecx, [edx + CPUMCPU.Host.dr7]
1038 mov dr7, ecx
1039
1040 and dword [edx + CPUMCPU.fUseFlags], ~(CPUM_USED_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HYPER)
1041 jmp gth_debug_regs_done
1042
1043ENDPROC vmmRCToHostAsm
1044
1045
1046GLOBALNAME End
1047;
1048; The description string (in the text section).
1049;
1050NAME(Description):
1051 db SWITCHER_DESCRIPTION
1052 db 0
1053
1054extern NAME(Relocate)
1055
1056;
1057; End the fixup records.
1058;
1059BEGINDATA
1060 db FIX_THE_END ; final entry.
1061GLOBALNAME FixupsEnd
1062
1063;;
1064; The switcher definition structure.
1065ALIGNDATA(16)
1066GLOBALNAME Def
1067 istruc VMMSWITCHERDEF
1068 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1069 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1070 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1071 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1072 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1073 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1074 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
1075 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
1076 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
1077 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
1078 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
1079 ; disasm help
1080 at VMMSWITCHERDEF.offHCCode0, dd 0
1081%ifdef NEED_ID
1082 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1083%else
1084 at VMMSWITCHERDEF.cbHCCode0, dd NAME(FarJmpGCTarget) - NAME(Start)
1085%endif
1086 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
1087 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
1088%ifdef NEED_ID
1089 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1090 at VMMSWITCHERDEF.cbIDCode0, dd NAME(FarJmpGCTarget) - NAME(IDEnterTarget)
1091 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1092 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
1093%else
1094 at VMMSWITCHERDEF.offIDCode0, dd 0
1095 at VMMSWITCHERDEF.cbIDCode0, dd 0
1096 at VMMSWITCHERDEF.offIDCode1, dd 0
1097 at VMMSWITCHERDEF.cbIDCode1, dd 0
1098%endif
1099 at VMMSWITCHERDEF.offGCCode, dd NAME(FarJmpGCTarget) - NAME(Start)
1100%ifdef NEED_ID
1101 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(FarJmpGCTarget)
1102%else
1103 at VMMSWITCHERDEF.cbGCCode, dd NAME(HCExitTarget) - NAME(FarJmpGCTarget)
1104%endif
1105
1106 iend
1107
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette