Changeset 41976 in vbox for trunk/src/VBox
- Timestamp:
- Jul 1, 2012 2:16:40 PM (13 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r41965 r41976 144 144 145 145 146 VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP) 147 { 148 pVCpu->cpum.s.Hyper.esp = u32ESP; 149 } 150 151 146 152 VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl) 147 153 { … … 154 160 { 155 161 pVCpu->cpum.s.Hyper.eip = u32EIP; 162 } 163 164 165 /** 166 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers, 167 * EFLAGS and EIP prior to resuming guest execution. 168 * 169 * All general register not given as a parameter will be set to 0. The EFLAGS 170 * register will be set to sane values for C/C++ code execution with interrupts 171 * disabled and IOPL 0. 172 * 173 * @param pVCpu The current virtual CPU. 174 * @param u32EIP The EIP value. 175 * @param u32ESP The ESP value. 176 * @param u32EAX The EAX value. 177 * @param u32EDX The EDX value. 178 */ 179 VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX) 180 { 181 pVCpu->cpum.s.Hyper.eip = u32EIP; 182 pVCpu->cpum.s.Hyper.esp = u32ESP; 183 pVCpu->cpum.s.Hyper.eax = u32EAX; 184 pVCpu->cpum.s.Hyper.edx = u32EDX; 185 pVCpu->cpum.s.Hyper.ecx = 0; 186 pVCpu->cpum.s.Hyper.ebx = 0; 187 pVCpu->cpum.s.Hyper.ebp = 0; 188 pVCpu->cpum.s.Hyper.esi = 0; 189 pVCpu->cpum.s.Hyper.edi = 0; 190 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1; 156 191 } 157 192 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r41965 r41976 672 672 TMNotifyStartOfExecution(pVCpu); 673 673 674 rc = pVM->vmm.s.pfn HostToGuestR0(pVM);674 rc = pVM->vmm.s.pfnR0ToRawMode(pVM); 675 675 pVCpu->vmm.s.iLastGZRc = rc; 676 676 … … 982 982 return rc; 983 983 984 rc = pVM->vmm.s.pfn HostToGuestR0(pVM);984 rc = pVM->vmm.s.pfnR0ToRawMode(pVM); 985 985 986 986 /* Re-enable VT-x if previously turned off. */ -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r41965 r41976 1205 1205 1206 1206 /* 1207 * Set the EIP and ESP. 1208 */ 1209 CPUMSetHyperEIP(pVCpu, CPUMGetGuestEFlags(pVCpu) & X86_EFL_VM 1210 ? pVM->vmm.s.pfnCPUMRCResumeGuestV86 1211 : pVM->vmm.s.pfnCPUMRCResumeGuest); 1212 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); 1207 * Set the hypervisor to resume executing a CPUM resume function 1208 * in CPUMRCA.asm. 1209 */ 1210 CPUMSetHyperState(pVCpu, 1211 CPUMGetGuestEFlags(pVCpu) & X86_EFL_VM 1212 ? pVM->vmm.s.pfnCPUMRCResumeGuestV86 1213 : pVM->vmm.s.pfnCPUMRCResumeGuest, /* eip */ 1214 pVCpu->vmm.s.pbEMTStackBottomRC, /* esp */ 1215 0, /* eax */ 1216 VM_RC_ADDR(pVM, &pVCpu->cpum) /* edx */); 1213 1217 1214 1218 /* … … 1873 1877 * Setup the call frame using the trampoline. 1874 1878 */ 1879 CPUMSetHyperState(pVCpu, 1880 pVM->vmm.s.pfnCallTrampolineRC, /* eip */ 1881 pVCpu->vmm.s.pbEMTStackBottomRC - cArgs * sizeof(RTGCUINTPTR32), /* esp */ 1882 RCPtrEntry, /* eax */ 1883 cArgs /* edx */ 1884 ); 1885 1875 1886 memset(pVCpu->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */ 1876 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC - cArgs * sizeof(RTGCUINTPTR32));1877 1887 PRTGCUINTPTR32 pFrame = (PRTGCUINTPTR32)(pVCpu->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE) - cArgs; 1878 1888 int i = cArgs; … … 1882 1892 CPUMPushHyper(pVCpu, cArgs * sizeof(RTGCUINTPTR32)); /* stack frame size */ 1883 1893 CPUMPushHyper(pVCpu, RCPtrEntry); /* what to call */ 1884 CPUMSetHyperEIP(pVCpu, pVM->vmm.s.pfnCallTrampolineRC);1885 1894 1886 1895 /* … … 1903 1912 1904 1913 /* 1905 * Flush the log s.1914 * Flush the loggers. 1906 1915 */ 1907 1916 #ifdef LOG_ENABLED … … 2011 2020 2012 2021 /* 2013 * Flush the loggers ,2022 * Flush the loggers. 2014 2023 */ 2015 2024 #ifdef LOG_ENABLED -
trunk/src/VBox/VMM/VMMR3/VMMSwitcher.cpp
r41965 r41976 258 258 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[pVM->vmm.s.enmSwitcher]; 259 259 RTRCPTR RCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher]; 260 pVM->vmm.s.pfnGuestToHostRC = RCPtr + pSwitcher->offGCGuestToHost; 261 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offGCCallTrampoline; 262 pVM->pfnVMMGCGuestToHostAsm = RCPtr + pSwitcher->offGCGuestToHostAsm; 260 pVM->vmm.s.pfnRCToHost = RCPtr + pSwitcher->offRCToHost; 261 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offRCCallTrampoline; 262 pVM->pfnVMMRCToHostAsm = RCPtr + pSwitcher->offRCToHostAsm; 263 pVM->pfnVMMRCToHostAsmNoReturn = RCPtr + pSwitcher->offRCToHostAsmNoReturn; 263 264 264 265 // AssertFailed(); … … 816 817 { 817 818 /* try label it */ 818 if (pSwitcher->offR0HostToGuest == offCode) 819 RTLogPrintf(" *R0HostToGuest:\n"); 820 if (pSwitcher->offGCGuestToHost == offCode) 821 RTLogPrintf(" *GCGuestToHost:\n"); 822 if (pSwitcher->offGCCallTrampoline == offCode) 823 RTLogPrintf(" *GCCallTrampoline:\n"); 824 if (pSwitcher->offGCGuestToHostAsm == offCode) 825 RTLogPrintf(" *GCGuestToHostAsm:\n"); 819 if (pSwitcher->offR0ToRawMode == offCode) 820 RTLogPrintf(" *R0ToRawMode:\n"); 821 if (pSwitcher->offRCToHost == offCode) 822 RTLogPrintf(" *RCToHost:\n"); 823 if (pSwitcher->offRCCallTrampoline == offCode) 824 RTLogPrintf(" *RCCallTrampoline:\n"); 825 if (pSwitcher->offRCToHostAsm == offCode) 826 RTLogPrintf(" *RCToHostAsm:\n"); 827 if (pSwitcher->offRCToHostAsmNoReturn == offCode) 828 RTLogPrintf(" *RCToHostAsmNoReturn:\n"); 826 829 827 830 /* disas */ … … 968 971 969 972 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvCoreCodeR0 type */ 970 pVM->vmm.s.pfn HostToGuestR0 = pbCodeR0 + pSwitcher->offR0HostToGuest;973 pVM->vmm.s.pfnR0ToRawMode = pbCodeR0 + pSwitcher->offR0ToRawMode; 971 974 972 975 RTRCPTR RCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[enmSwitcher]; 973 pVM->vmm.s.pfnGuestToHostRC = RCPtr + pSwitcher->offGCGuestToHost; 974 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offGCCallTrampoline; 975 pVM->pfnVMMGCGuestToHostAsm = RCPtr + pSwitcher->offGCGuestToHostAsm; 976 pVM->vmm.s.pfnRCToHost = RCPtr + pSwitcher->offRCToHost; 977 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offRCCallTrampoline; 978 pVM->pfnVMMRCToHostAsm = RCPtr + pSwitcher->offRCToHostAsm; 979 pVM->pfnVMMRCToHostAsmNoReturn = RCPtr + pSwitcher->offRCToHostAsmNoReturn; 976 980 return VINF_SUCCESS; 977 981 } … … 1027 1031 { 1028 1032 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvCoreCodeR0 type */ 1029 return pbCodeR0 + pSwitcher->offR0 HostToGuest;1033 return pbCodeR0 + pSwitcher->offR0ToRawMode; 1030 1034 } 1031 1035 return NIL_RTR0PTR; -
trunk/src/VBox/VMM/VMMRC/TRPMRCHandlersA.asm
r41943 r41976 452 452 %endif 453 453 mov edx, IMP(g_VM) 454 call [edx + VM.pfnVMM GCGuestToHostAsm]454 call [edx + VM.pfnVMMRCToHostAsm] 455 455 456 456 ; We shouldn't ever return this way. So, raise a special IPE if we do. … … 458 458 mov eax, VERR_TRPM_IPE_3 459 459 mov edx, IMP(g_VM) 460 call [edx + VM.pfnVMM GCGuestToHostAsm]460 call [edx + VM.pfnVMMRCToHostAsm] 461 461 jmp .gc_panic_again 462 462 … … 611 611 ; 612 612 .rc_to_host: 613 mov ecx, ebx614 613 mov edx, IMP(g_VM) 615 call [edx + VM.pfnVMMGCGuestToHostAsm] 616 jmp short .rc_continue 614 %if 0 615 call [edx + VM.pfnVMMRCToHostAsm] 616 %else 617 call [edx + VM.pfnVMMRCToHostAsmNoReturn] 618 %endif 619 mov eax, VERR_TRPM_DONT_PANIC 620 jmp .rc_to_host 617 621 618 622 ; … … 691 695 mov edx, IMP(g_VM) 692 696 mov eax, VERR_TRPM_DONT_PANIC 693 call [edx + VM.pfnVMMGCGuestToHostAsm] 697 %if 0 698 call [edx + VM.pfnVMMRCToHostAsm] 699 %else 700 call [edx + VM.pfnVMMRCToHostAsmNoReturn] 701 %endif 694 702 %ifdef DEBUG_STUFF 695 703 COM_S_PRINT 'bad!!!' … … 912 920 mov edx, IMP(g_VM) 913 921 mov eax, VINF_EM_RAW_INTERRUPT 914 call [edx + VM.pfnVMM GCGuestToHostAsm]922 call [edx + VM.pfnVMMRCToHostAsm] 915 923 916 924 ; … … 1022 1030 mov edx, IMP(g_VM) 1023 1031 mov eax, VINF_EM_RAW_INTERRUPT_HYPER 1024 call [edx + VM.pfnVMMGCGuestToHostAsm] 1032 %if 0 1033 call [edx + VM.pfnVMMRCToHostAsm] 1034 %else 1035 call [edx + VM.pfnVMMRCToHostAsmNoReturn] 1036 %endif 1025 1037 %ifdef DEBUG_STUFF_INT 1026 1038 COM_S_CHAR '!' … … 1244 1256 mov edx, IMP(g_VM) 1245 1257 mov eax, VERR_TRPM_PANIC 1246 call [edx + VM.pfnVMMGCGuestToHostAsm] 1258 %if 0 1259 call [edx + VM.pfnVMMRCToHostAsm] 1260 %else 1261 call [edx + VM.pfnVMMRCToHostAsmNoReturn] 1262 %endif 1247 1263 jmp short df_to_host 1248 1264 -
trunk/src/VBox/VMM/VMMRC/VMMRC.cpp
r41965 r41976 205 205 VMMRCDECL(void) VMMGCGuestToHost(PVM pVM, int rc) 206 206 { 207 pVM->vmm.s.pfn GuestToHostRC(rc);207 pVM->vmm.s.pfnRCToHost(rc); 208 208 } 209 209 … … 216 216 DECLASM(void) vmmRCProbeFireHelper(PVM pVM) 217 217 { 218 pVM->vmm.s.pfn GuestToHostRC(VINF_VMM_CALL_TRACER);218 pVM->vmm.s.pfnRCToHost(VINF_VMM_CALL_TRACER); 219 219 } 220 220 -
trunk/src/VBox/VMM/VMMRZ/VMMRZ.cpp
r41803 r41976 82 82 pVCpu->vmm.s.rcCallRing3 = VERR_VMM_RING3_CALL_NO_RC; 83 83 #ifdef IN_RC 84 pVM->vmm.s.pfn GuestToHostRC(VINF_VMM_CALL_HOST);84 pVM->vmm.s.pfnRCToHost(VINF_VMM_CALL_HOST); 85 85 #else 86 86 int rc = vmmR0CallRing3LongJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, VINF_VMM_CALL_HOST); -
trunk/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac
r41933 r41976 28 28 %include "VBox/vmm/stam.mac" 29 29 %include "VBox/vmm/vm.mac" 30 %include "VBox/err.mac" 30 31 %include "CPUMInternal.mac" 31 32 %include "VMMSwitcher.mac" … … 54 55 ; @param pVM GCC: rdi MSC:rcx The VM handle. 55 56 ; 56 BEGINPROC vmmR0 HostToGuest57 BEGINPROC vmmR0ToRawMode 57 58 %ifdef DEBUG_STUFF 58 59 COM64_S_NEWLINE … … 96 97 mov eax, cs 97 98 push rax 98 call NAME(vmmR0 HostToGuestAsm)99 call NAME(vmmR0ToRawModeAsm) 99 100 100 101 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI … … 130 131 131 132 ret 132 ENDPROC vmmR0 HostToGuest133 ENDPROC vmmR0ToRawMode 133 134 134 135 … … 141 142 ; The C interface. 142 143 ; 143 BEGINPROC vmmR0 HostToGuest144 BEGINPROC vmmR0ToRawMode 144 145 %ifdef DEBUG_STUFF 145 146 COM32_S_NEWLINE … … 164 165 push cs 165 166 push 0 166 FIXUP FIX_HC_32BIT, 1, .vmmR0 HostToGuestReturn - NAME(Start)167 FIXUP FIX_HC_32BIT, 1, .vmmR0ToRawModeReturn - NAME(Start) 167 168 push 0ffffffffh 168 169 169 170 FIXUP FIX_HC_64BIT_CS, 1 170 171 push 0ffffh 171 FIXUP FIX_HC_32BIT, 1, NAME(vmmR0 HostToGuestAsm) - NAME(Start)172 FIXUP FIX_HC_32BIT, 1, NAME(vmmR0ToRawModeAsm) - NAME(Start) 172 173 push 0ffffffffh 173 174 retf 174 .vmmR0 HostToGuestReturn:175 .vmmR0ToRawModeReturn: 175 176 176 177 ; … … 200 201 201 202 ret 202 ENDPROC vmmR0 HostToGuest203 ENDPROC vmmR0ToRawMode 203 204 204 205 BITS 64 … … 208 209 209 210 ; ***************************************************************************** 210 ; vmmR0 HostToGuestAsm211 ; vmmR0ToRawModeAsm 211 212 ; 212 213 ; Phase one of the switch from host to guest context (host MMU context) … … 223 224 ; ***************************************************************************** 224 225 ALIGNCODE(16) 225 BEGINPROC vmmR0 HostToGuestAsm226 BEGINPROC vmmR0ToRawModeAsm 226 227 ;; Store the offset from CPUM to CPUMCPU in r8 227 228 mov r8d, [rdx + CPUM.offCPUMCPU0] … … 330 331 mov [rdx + CPUM.fApicDisVectors], edi 331 332 htg_noapic: 332 %endif 333 %endif ; VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 333 334 334 335 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter. … … 457 458 458 459 459 ; We're now on an identity mapped pages!in 32-bit compatibility mode.460 ; We're now on identity mapped pages in 32-bit compatibility mode. 460 461 BITS 32 461 462 ALIGNCODE(16) … … 542 543 lidt [edx + CPUMCPU.Hyper.idtr] 543 544 544 ; Setup stack.545 ; Setup the stack. 545 546 DEBUG_CHAR('3') 546 547 mov ax, [edx + CPUMCPU.Hyper.ss.Sel] … … 549 550 550 551 ; Restore TSS selector; must mark it as not busy before using ltr (!) 551 DEBUG_ CHAR('4')552 DEBUG_S_CHAR('4') 552 553 FIXUP FIX_GC_TSS_GDTE_DW2, 2 553 554 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit) 554 DEBUG_ CHAR('5')555 DEBUG_S_CHAR('5') 555 556 ltr word [edx + CPUMCPU.Hyper.tr.Sel] 556 DEBUG_ CHAR('6')557 DEBUG_S_CHAR('6') 557 558 558 559 ; Activate the ldt (now we can safely crash). 559 560 lldt [edx + CPUMCPU.Hyper.ldtr.Sel] 560 DEBUG_ CHAR('7')561 562 ;; use flags.561 DEBUG_S_CHAR('7') 562 563 ;; Use flags. 563 564 mov esi, [edx + CPUMCPU.fUseFlags] 564 565 565 566 ; debug registers 566 567 test esi, CPUM_USE_DEBUG_REGS 567 j z htg_debug_regs_guest_no568 jmp htg_debug_regs_guest 569 htg_debug_regs_guest_no: 570 DEBUG_CHAR('9') 571 572 ; General registers.568 jnz htg_debug_regs_guest 569 htg_debug_regs_guest_done: 570 DEBUG_S_CHAR('9') 571 572 ; General registers (sans edx). 573 mov eax, [edx + CPUMCPU.Hyper.eax] 573 574 mov ebx, [edx + CPUMCPU.Hyper.ebx] 575 mov ecx, [edx + CPUMCPU.Hyper.ecx] 574 576 mov ebp, [edx + CPUMCPU.Hyper.ebp] 575 577 mov esi, [edx + CPUMCPU.Hyper.esi] 576 578 mov edi, [edx + CPUMCPU.Hyper.edi] 577 push dword [edx + CPUMCPU.Hyper.eflags] 578 popfd 579 DEBUG_CHAR('!') 579 DEBUG_S_CHAR('!') 580 580 581 581 ;; … … 583 583 ;; the code set up to run by HC. 584 584 ;; 585 push dword [edx + CPUMCPU.Hyper.eip] 586 push dword [edx + CPUMCPU.Hyper.eflags] 587 mov edx, [edx + CPUMCPU.Hyper.edx] ; !! edx is no longer pointing to CPUMCPU here !! 588 585 589 %ifdef DEBUG_STUFF 586 590 COM32_S_PRINT ';eip=' 587 mov eax, [edx + CPUMCPU.Hyper.eip] 591 push eax 592 mov eax, [esp + 8] 588 593 COM32_S_DWORD_REG eax 594 pop eax 589 595 COM32_S_CHAR ';' 590 596 %endif 591 mov eax, [edx + CPUMCPU.Hyper.eip]592 ; callees expect CPUM ptr593 CPUM_FROM_CPUMCPU(edx)594 595 597 %ifdef VBOX_WITH_STATISTICS 598 push eax 596 599 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC 597 mov edx, 0ffffffffh 598 STAM32_PROFILE_ADV_STOP edx 599 FIXUP FIX_GC_CPUM_OFF, 1, 0 600 mov edx, 0ffffffffh 601 %endif 602 jmp eax 600 mov eax, 0ffffffffh 601 STAM32_PROFILE_ADV_STOP eax 602 pop eax 603 %endif 604 605 popfd 606 ret 603 607 604 608 ;; … … 623 627 mov eax, [edx + CPUMCPU.Hyper.dr + 8*7] 624 628 mov dr7, eax 625 jmp htg_debug_regs_guest_ no626 627 ENDPROC vmmR0 HostToGuestAsm629 jmp htg_debug_regs_guest_done 630 631 ENDPROC vmmR0ToRawModeAsm 628 632 629 633 … … 638 642 ; 639 643 ALIGNCODE(16) 640 BEGINPROC vmm GCCallTrampoline644 BEGINPROC vmmRCCallTrampoline 641 645 %ifdef DEBUG_STUFF 642 646 COM32_S_CHAR 'c' … … 647 651 ; call routine 648 652 pop eax ; call address 649 mov esi, edx ; save edx650 653 pop edi ; argument count. 651 654 %ifdef DEBUG_STUFF … … 657 660 add esp, edi ; cleanup stack 658 661 659 ; return to the host context. 660 push byte 0 ; eip 661 mov edx, esi ; CPUM pointer 662 662 ; return to the host context (eax = C returncode). 663 663 %ifdef DEBUG_STUFF 664 664 COM32_S_CHAR '`' 665 665 %endif 666 jmp NAME(VMMGCGuestToHostAsm) ; eax = returncode. 667 ENDPROC vmmGCCallTrampoline 666 .to_host_again: 667 call NAME(vmmRCToHostAsm) 668 mov eax, VERR_VMM_SWITCHER_IPE_1 669 jmp .to_host_again 670 ENDPROC vmmRCCallTrampoline 668 671 669 672 … … 673 676 ; 674 677 ALIGNCODE(16) 675 BEGINPROC vmm GCGuestToHost678 BEGINPROC vmmRCToHost 676 679 %ifdef DEBUG_STUFF 677 680 push esi … … 686 689 %endif 687 690 mov eax, [esp + 4] 688 jmp NAME( VMMGCGuestToHostAsm)689 ENDPROC vmm GCGuestToHost691 jmp NAME(vmmRCToHostAsm) 692 ENDPROC vmmRCToHost 690 693 691 694 692 695 ;; 693 ; VMMGCGuestToHostAsm 694 ; 695 ; This is an alternative entry point which we'll be using 696 ; when the we have saved the guest state already or we haven't 697 ; been messing with the guest at all. 696 ; vmmRCToHostAsmNoReturn 697 ; 698 ; This is an entry point used by TRPM when dealing with raw-mode traps, 699 ; i.e. traps in the hypervisor code. This will not return and saves no 700 ; state, because the caller has already saved the state. 701 ; 702 ; @param eax Return code. 703 ; 704 ALIGNCODE(16) 705 BEGINPROC vmmRCToHostAsmNoReturn 706 DEBUG_S_CHAR('%') 707 708 %ifdef VBOX_WITH_STATISTICS 709 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC 710 mov edx, 0ffffffffh 711 STAM32_PROFILE_ADV_STOP edx 712 713 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu 714 mov edx, 0ffffffffh 715 STAM32_PROFILE_ADV_START edx 716 717 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC 718 mov edx, 0ffffffffh 719 STAM32_PROFILE_ADV_START edx 720 %endif 721 722 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0 723 mov edx, 0ffffffffh 724 725 jmp vmmRCToHostAsm_SaveNoGeneralRegs 726 ENDPROC vmmRCToHostAsmNoReturn 727 728 729 ;; 730 ; vmmRCToHostAsm 731 ; 732 ; This is an entry point used by TRPM to return to host context when an 733 ; interrupt occured or an guest trap needs handling in host context. It 734 ; is also used by the C interface above. 735 ; 736 ; The hypervisor context is saved and it will return to the caller if 737 ; host context so desires. 698 738 ; 699 739 ; @param eax Return code. … … 701 741 ; 702 742 ALIGNCODE(16) 703 BEGINPROC VMMGCGuestToHostAsm 704 DEBUG_CHAR('%') 743 BEGINPROC vmmRCToHostAsm 744 DEBUG_S_CHAR('%') 745 push edx 705 746 706 747 %ifdef VBOX_WITH_STATISTICS … … 724 765 mov edx, 0ffffffffh 725 766 767 ; Save register context. 768 pop dword [edx + CPUMCPU.Hyper.edx] 726 769 pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack 727 728 ; general registers which we care about.770 mov dword [edx + CPUMCPU.Hyper.esp], esp 771 mov dword [edx + CPUMCPU.Hyper.eax], eax 729 772 mov dword [edx + CPUMCPU.Hyper.ebx], ebx 773 mov dword [edx + CPUMCPU.Hyper.ecx], ecx 730 774 mov dword [edx + CPUMCPU.Hyper.esi], esi 731 775 mov dword [edx + CPUMCPU.Hyper.edi], edi 732 776 mov dword [edx + CPUMCPU.Hyper.ebp], ebp 733 mov dword [edx + CPUMCPU.Hyper.esp], esp734 777 735 778 ; special registers which may change. 779 vmmRCToHostAsm_SaveNoGeneralRegs: 736 780 %ifdef STRICT_IF 737 781 pushf … … 1025 1069 jmp gth_debug_regs_no 1026 1070 1027 ENDPROC VMMGCGuestToHostAsm1071 ENDPROC vmmRCToHostAsm 1028 1072 1029 1073 … … 1056 1100 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE 1057 1101 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start) 1058 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start) 1059 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start) 1060 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start) 1061 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start) 1102 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start) 1103 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start) 1104 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start) 1105 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start) 1106 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start) 1062 1107 ; disasm help 1063 1108 at VMMSWITCHERDEF.offHCCode0, dd 0 -
trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
r41936 r41976 56 56 ; @param [esp + 08h] Param 2 - VMCPU offset 57 57 ; 58 BEGINPROC vmmR0 HostToGuest58 BEGINPROC vmmR0ToRawMode 59 59 %ifdef DEBUG_STUFF 60 60 COM32_S_NEWLINE … … 84 84 mov edx, 0ffffffffh 85 85 push cs ; allow for far return and restore cs correctly. 86 call NAME(vmmR0 HostToGuestAsm)86 call NAME(vmmR0ToRawModeAsm) 87 87 88 88 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI … … 124 124 ret 125 125 126 ENDPROC vmmR0 HostToGuest126 ENDPROC vmmR0ToRawMode 127 127 128 128 ; ***************************************************************************** 129 ; vmmR0 HostToGuestAsm129 ; vmmR0ToRawModeAsm 130 130 ; 131 131 ; Phase one of the switch from host to guest context (host MMU context) … … 143 143 ; ***************************************************************************** 144 144 ALIGNCODE(16) 145 BEGINPROC vmmR0 HostToGuestAsm145 BEGINPROC vmmR0ToRawModeAsm 146 146 ;; 147 147 ;; Save CPU host context … … 457 457 458 458 ; now let's switch back 459 jmp NAME( VMMGCGuestToHostAsm) ; rax = returncode.460 461 ENDPROC vmmR0 HostToGuestAsm459 jmp NAME(vmmRCToHostAsm) ; rax = returncode. 460 461 ENDPROC vmmR0ToRawModeAsm 462 462 463 463 … … 473 473 BITS 64 474 474 ALIGNCODE(16) 475 BEGINPROC vmm GCCallTrampoline475 BEGINPROC vmmRCCallTrampoline 476 476 %ifdef DEBUG_STUFF 477 477 COM64_S_CHAR 'c' … … 480 480 %endif 481 481 int3 482 ENDPROC vmm GCCallTrampoline482 ENDPROC vmmRCCallTrampoline 483 483 484 484 … … 488 488 BITS 64 489 489 ALIGNCODE(16) 490 BEGINPROC vmm GCGuestToHost490 BEGINPROC vmmRCToHost 491 491 %ifdef DEBUG_STUFF 492 492 push rsi … … 501 501 %endif 502 502 int3 503 ENDPROC vmm GCGuestToHost503 ENDPROC vmmRCToHost 504 504 505 505 ;; 506 ; VMMGCGuestToHostAsm506 ; vmmRCToHostAsm 507 507 ; 508 508 ; This is an alternative entry point which we'll be using … … 515 515 BITS 64 516 516 ALIGNCODE(16) 517 BEGINPROC VMMGCGuestToHostAsm 517 BEGINPROC vmmRCToHostAsm 518 NAME(vmmRCToHostAsmNoReturn): 518 519 ;; We're still in the intermediate memory context! 519 520 … … 658 659 mov eax, [edx + CPUMCPU.u32RetCode] 659 660 retf 660 ENDPROC VMMGCGuestToHostAsm661 ENDPROC vmmRCToHostAsm 661 662 662 663 … … 689 690 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE 690 691 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start) 691 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start) 692 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start) 693 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start) 694 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start) 692 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start) 693 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start) 694 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start) 695 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start) 696 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start) 695 697 ; disasm help 696 698 at VMMSWITCHERDEF.offHCCode0, dd 0 -
trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac
r41933 r41976 27 27 %include "VBox/vmm/stam.mac" 28 28 %include "VBox/vmm/vm.mac" 29 %include "VBox/err.mac" 29 30 %include "CPUMInternal.mac" 30 31 %include "VMMSwitcher.mac" … … 57 58 ; The C interface. 58 59 ; 59 BEGINPROC vmmR0 HostToGuest60 BEGINPROC vmmR0ToRawMode 60 61 61 62 %ifdef DEBUG_STUFF … … 79 80 mov edx, 0ffffffffh 80 81 push cs ; allow for far return and restore cs correctly. 81 call NAME(vmmR0 HostToGuestAsm)82 call NAME(vmmR0ToRawModeAsm) 82 83 83 84 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI … … 115 116 116 117 ret 117 ENDPROC vmmR0 HostToGuest118 ENDPROC vmmR0ToRawMode 118 119 119 120 120 121 121 122 ; ***************************************************************************** 122 ; vmmR0 HostToGuestAsm123 ; vmmR0ToRawModeAsm 123 124 ; 124 125 ; Phase one of the switch from host to guest context (host MMU context) … … 135 136 ; ***************************************************************************** 136 137 ALIGNCODE(16) 137 BEGINPROC vmmR0 HostToGuestAsm138 BEGINPROC vmmR0ToRawModeAsm 138 139 ;; 139 140 ;; Save CPU host context … … 256 257 ; debug registers. 257 258 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST 258 jz htg_debug_regs_no 259 jmp htg_debug_regs_save_dr7and6 259 jnz htg_debug_regs_save_dr7and6 260 260 htg_debug_regs_no: 261 261 … … 408 408 ; debug registers 409 409 test esi, CPUM_USE_DEBUG_REGS 410 jz htg_debug_regs_guest_no 411 jmp htg_debug_regs_guest 412 htg_debug_regs_guest_no: 410 jnz htg_debug_regs_guest 411 htg_debug_regs_guest_done: 413 412 DEBUG_CHAR('9') 414 413 … … 451 450 %endif 452 451 453 ; General registers. 452 ; General registers (sans edx). 453 mov eax, [edx + CPUMCPU.Hyper.eax] 454 454 mov ebx, [edx + CPUMCPU.Hyper.ebx] 455 mov ecx, [edx + CPUMCPU.Hyper.ecx] 455 456 mov ebp, [edx + CPUMCPU.Hyper.ebp] 456 457 mov esi, [edx + CPUMCPU.Hyper.esi] 457 458 mov edi, [edx + CPUMCPU.Hyper.edi] 458 push dword [edx + CPUMCPU.Hyper.eflags] 459 popfd 460 DEBUG_CHAR('!') 459 DEBUG_S_CHAR('!') 461 460 462 461 ;; … … 464 463 ;; the code set up to run by HC. 465 464 ;; 465 push dword [edx + CPUMCPU.Hyper.eip] 466 push dword [edx + CPUMCPU.Hyper.eflags] 467 mov edx, [edx + CPUMCPU.Hyper.edx] ; !! edx is no longer pointing to CPUMCPU here !! 468 466 469 %ifdef DEBUG_STUFF 467 470 COM_S_PRINT ';eip=' 468 mov eax, [edx + CPUMCPU.Hyper.eip] 471 push eax 472 mov eax, [esp + 8] 469 473 COM_S_DWORD_REG eax 474 pop eax 470 475 COM_S_CHAR ';' 471 476 %endif 472 mov eax, [edx + CPUMCPU.Hyper.eip]473 ; callees expect CPUM ptr474 CPUM_FROM_CPUMCPU(edx)475 476 477 %ifdef VBOX_WITH_STATISTICS 478 push edx 477 479 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC 478 480 mov edx, 0ffffffffh 479 481 STAM_PROFILE_ADV_STOP edx 480 FIXUP FIX_GC_CPUM_OFF, 1, 0 481 mov edx, 0ffffffffh 482 %endif 483 jmp eax 482 pop edx 483 %endif 484 485 popfd 486 ret 484 487 485 488 ;; … … 527 530 mov eax, [edx + CPUMCPU.Hyper.dr + 8*7] 528 531 mov dr7, eax 529 jmp htg_debug_regs_guest_ no530 531 ENDPROC vmmR0 HostToGuestAsm532 jmp htg_debug_regs_guest_done 533 534 ENDPROC vmmR0ToRawModeAsm 532 535 533 536 … … 542 545 ; 543 546 ALIGNCODE(16) 544 BEGINPROC vmm GCCallTrampoline547 BEGINPROC vmmRCCallTrampoline 545 548 %ifdef DEBUG_STUFF 546 549 COM_S_CHAR 'c' … … 551 554 ; call routine 552 555 pop eax ; call address 553 mov esi, edx ; save edx554 556 pop edi ; argument count. 555 557 %ifdef DEBUG_STUFF … … 562 564 563 565 ; return to the host context. 564 push byte 0 ; eip565 mov edx, esi ; CPUM pointer566 567 566 %ifdef DEBUG_STUFF 568 567 COM_S_CHAR '`' 569 568 %endif 570 jmp NAME(VMMGCGuestToHostAsm) ; eax = returncode. 571 ENDPROC vmmGCCallTrampoline 569 .to_host_again: 570 call NAME(vmmRCToHostAsm) 571 mov eax, VERR_VMM_SWITCHER_IPE_1 572 jmp .to_host_again 573 ENDPROC vmmRCCallTrampoline 572 574 573 575 … … 577 579 ; 578 580 ALIGNCODE(16) 579 BEGINPROC vmm GCGuestToHost581 BEGINPROC vmmRCToHost 580 582 %ifdef DEBUG_STUFF 581 583 push esi … … 590 592 %endif 591 593 mov eax, [esp + 4] 592 jmp NAME( VMMGCGuestToHostAsm)593 ENDPROC vmm GCGuestToHost594 jmp NAME(vmmRCToHostAsm) 595 ENDPROC vmmRCToHost 594 596 595 597 596 598 ;; 597 ; VMMGCGuestToHostAsm 598 ; 599 ; This is an alternative entry point which we'll be using 600 ; when the we have saved the guest state already or we haven't 601 ; been messing with the guest at all. 599 ; vmmRCToHostAsmNoReturn 600 ; 601 ; This is an entry point used by TRPM when dealing with raw-mode traps, 602 ; i.e. traps in the hypervisor code. This will not return and saves no 603 ; state, because the caller has already saved the state. 604 ; 605 ; @param eax Return code. 606 ; 607 ALIGNCODE(16) 608 BEGINPROC vmmRCToHostAsmNoReturn 609 DEBUG_S_CHAR('%') 610 611 %ifdef VBOX_WITH_STATISTICS 612 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC 613 mov edx, 0ffffffffh 614 STAM32_PROFILE_ADV_STOP edx 615 616 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu 617 mov edx, 0ffffffffh 618 STAM32_PROFILE_ADV_START edx 619 620 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC 621 mov edx, 0ffffffffh 622 STAM32_PROFILE_ADV_START edx 623 %endif 624 625 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0 626 mov edx, 0ffffffffh 627 628 jmp vmmRCToHostAsm_SaveNoGeneralRegs 629 ENDPROC vmmRCToHostAsmNoReturn 630 631 632 ;; 633 ; vmmRCToHostAsm 634 ; 635 ; This is an entry point used by TRPM to return to host context when an 636 ; interrupt occured or an guest trap needs handling in host context. It 637 ; is also used by the C interface above. 638 ; 639 ; The hypervisor context is saved and it will return to the caller if 640 ; host context so desires. 602 641 ; 603 642 ; @param eax Return code. … … 605 644 ; 606 645 ALIGNCODE(16) 607 BEGINPROC VMMGCGuestToHostAsm 608 DEBUG_CHAR('%') 646 BEGINPROC vmmRCToHostAsm 647 DEBUG_S_CHAR('%') 648 push edx 609 649 610 650 %ifdef VBOX_WITH_STATISTICS … … 628 668 mov edx, 0ffffffffh 629 669 670 ; Save register context. 671 pop dword [edx + CPUMCPU.Hyper.edx] 630 672 pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack 631 632 ; general registers which we care about.673 mov dword [edx + CPUMCPU.Hyper.esp], esp 674 mov dword [edx + CPUMCPU.Hyper.eax], eax 633 675 mov dword [edx + CPUMCPU.Hyper.ebx], ebx 676 mov dword [edx + CPUMCPU.Hyper.ecx], ecx 634 677 mov dword [edx + CPUMCPU.Hyper.esi], esi 635 678 mov dword [edx + CPUMCPU.Hyper.edi], edi 636 679 mov dword [edx + CPUMCPU.Hyper.ebp], ebp 637 mov dword [edx + CPUMCPU.Hyper.esp], esp638 680 639 681 ; special registers which may change. 682 vmmRCToHostAsm_SaveNoGeneralRegs: 640 683 ; str [edx + CPUMCPU.Hyper.tr] - double fault only, and it won't be right then either. 641 684 sldt [edx + CPUMCPU.Hyper.ldtr.Sel] … … 888 931 jmp gth_debug_regs_no 889 932 890 ENDPROC VMMGCGuestToHostAsm933 ENDPROC vmmRCToHostAsm 891 934 892 935 … … 919 962 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE 920 963 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start) 921 at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start) 922 at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start) 923 at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start) 924 at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start) 964 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start) 965 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start) 966 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start) 967 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start) 968 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start) 925 969 ; disasm help 926 970 at VMMSWITCHERDEF.offHCCode0, dd 0 -
trunk/src/VBox/VMM/include/VMMInternal.h
r41836 r41976 238 238 RTRCPTR pfnCallTrampolineRC; 239 239 /** Guest to host switcher entry point. */ 240 RCPTRTYPE(PFNVMMSWITCHERRC) pfn GuestToHostRC;240 RCPTRTYPE(PFNVMMSWITCHERRC) pfnRCToHost; 241 241 /** Host to guest switcher entry point. */ 242 R0PTRTYPE(PFNVMMSWITCHERHC) pfn HostToGuestR0;242 R0PTRTYPE(PFNVMMSWITCHERHC) pfnR0ToRawMode; 243 243 /** @} */ 244 244 -
trunk/src/VBox/VMM/include/VMMSwitcher.h
r41933 r41976 98 98 /** Size of the entire code chunk. */ 99 99 uint32_t cbCode; 100 /** vmmR0HostToGuest C entrypoint. */ 101 uint32_t offR0HostToGuest; 102 /** vmmGCGuestToHost C entrypoint. */ 103 uint32_t offGCGuestToHost; 104 /** vmmGCCallTrampoline address. */ 105 uint32_t offGCCallTrampoline; 106 /** vmmGCGuestToHostAsm assembly entrypoint. */ 107 uint32_t offGCGuestToHostAsm; 100 /** vmmR0ToRawMode C entrypoint. */ 101 uint32_t offR0ToRawMode; 102 /** vmmRCToHost C entrypoint. */ 103 uint32_t offRCToHost; 104 /** vmmRCCallTrampoline address. */ 105 uint32_t offRCCallTrampoline; 106 /** vmmRCToHostAsm - Assembly language entry point for switching from raw-mode 107 * context to host-context. This saves the RC register context. */ 108 uint32_t offRCToHostAsm; 109 /** vmmRCToHostNoReturn - Assembly language entry point for switching from 110 * raw-mode context to host-context. This does not save any RC register 111 * context and expects the caller to have done that already. */ 112 uint32_t offRCToHostAsmNoReturn; 108 113 /** @name Disassembly Regions. 109 114 * @{ */ -
trunk/src/VBox/VMM/include/VMMSwitcher.mac
r41933 r41976 46 46 .enmType: resd 1 47 47 .cbCode: resd 1 48 .offR0HostToGuest: resd 1 49 .offGCGuestToHost: resd 1 50 .offGCCallTrampoline: resd 1 51 .offGCGuestToHostAsm: resd 1 48 .offR0ToRawMode: resd 1 49 .offRCToHost: resd 1 50 .offRCCallTrampoline: resd 1 51 .offRCToHostAsm: resd 1 52 .offRCToHostAsmNoReturn: resd 1 52 53 ; disasm help 53 54 .offHCCode0: resd 1 … … 129 130 %ifdef DEBUG_STUFF 130 131 %define DEBUG_CHAR(ch) COM_CHAR ch 131 %define DEBUG_S_CHAR(ch) COM_ CHAR ch132 %define DEBUG_S_CHAR(ch) COM_S_CHAR ch 132 133 %else 133 134 %define DEBUG_CHAR(ch) -
trunk/src/VBox/VMM/testcase/Makefile.kmk
r41677 r41976 417 417 $(VBOX_PATH_VMM_SRC)/testcase/tstAsmStructsAsm.asm \ 418 418 $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsAsm.mac \ 419 $(DEPTH)/include/iprt/asmdefs.mac \ 420 $(DEPTH)/include/VBox/vmm/cpum.mac \ 421 $(DEPTH)/include/VBox/vmm/vm.mac \ 422 $(DEPTH)/include/VBox/sup.mac \ 423 $(DEPTH)/include/iprt/x86.mac \ 424 $(VBOX_PATH_VMM_SRC)/include/CPUMInternal.mac \ 425 $(VBOX_PATH_VMM_SRC)/include/HWACCMInternal.mac \ 426 $(VBOX_PATH_VMM_SRC)/include/VMMInternal.mac \ 427 $(VBOX_PATH_VMM_SRC)/include/VMMSwitcher.mac \ 419 428 $(VBOX_PATH_VMM_SRC)/testcase/Makefile.kmk \ 420 429 | $$(dir $$@) -
trunk/src/VBox/VMM/testcase/tstMicroRCA.asm
r41944 r41976 511 511 mov edx, IMP(g_VM) 512 512 mov eax, VERR_TRPM_DONT_PANIC 513 call [edx + VM.pfnVMM GCGuestToHostAsm]513 call [edx + VM.pfnVMMRCToHostAsm] 514 514 jmp short tstTrapHandler_Fault_Guest 515 515 … … 523 523 mov edx, IMP(g_VM) 524 524 mov eax, VERR_TRPM_DONT_PANIC 525 call [edx + VM.pfnVMM GCGuestToHostAsm]525 call [edx + VM.pfnVMMRCToHostAsm] 526 526 jmp short tstTrapHandler_Fault_Hyper 527 527 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r41933 r41976 1107 1107 GEN_CHECK_OFF(VMM, aoffSwitchers); 1108 1108 GEN_CHECK_OFF_DOT(VMM, aoffSwitchers[1]); 1109 GEN_CHECK_OFF(VMM, pfn HostToGuestR0);1110 GEN_CHECK_OFF(VMM, pfn GuestToHostRC);1109 GEN_CHECK_OFF(VMM, pfnR0ToRawMode); 1110 GEN_CHECK_OFF(VMM, pfnRCToHost); 1111 1111 GEN_CHECK_OFF(VMM, pfnCallTrampolineRC); 1112 1112 GEN_CHECK_OFF(VMM, pfnCPUMRCResumeGuest); … … 1321 1321 GEN_CHECK_OFF(VM, cbSelf); 1322 1322 GEN_CHECK_OFF(VM, offVMCPU); 1323 GEN_CHECK_OFF(VM, pfnVMMGCGuestToHostAsm); 1323 GEN_CHECK_OFF(VM, pfnVMMRCToHostAsm); 1324 GEN_CHECK_OFF(VM, pfnVMMRCToHostAsmNoReturn); 1324 1325 GEN_CHECK_OFF(VM, fRecompileUser); 1325 1326 GEN_CHECK_OFF(VM, fRecompileSupervisor);
Note:
See TracChangeset
for help on using the changeset viewer.