- Timestamp:
- Jan 28, 2007 2:34:06 AM (18 years ago)
- svn:sync-xref-src-repo-rev:
- 17968
- Location:
- trunk
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/trpm.h
r346 r397 451 451 452 452 /** 453 * Dispatches an interrupt that arrived while we were in the guest context. 454 * 455 * It's assumes we're invoked with interrupts disabled. 456 * When this function returns, interrupts will be enabled. 457 * 458 * @param pVM The VM handle. 459 */ 460 TRPMR0DECL(void) TRPMR0DispatchHostInterrupt(PVM pVM); 461 462 # ifndef VBOX_WITHOUT_IDT_PATCHING 463 464 /** 453 465 * Changes the VMMR0Entry() call frame and stack used by the IDT patch code 454 466 * so that we'll dispatch an interrupt rather than returning directly to Ring-3 … … 460 472 TRPMR0DECL(void) TRPMR0SetupInterruptDispatcherFrame(PVM pVM, void *pvRet); 461 473 474 # endif /* !VBOX_WITHOUT_IDT_PATCHING */ 475 462 476 /** @} */ 463 477 #endif -
trunk/include/VBox/vmm.h
r321 r397 345 345 { 346 346 /** Run guest context. */ 347 VMMR0_DO_RUN_GC = 0, 347 VMMR0_DO_RAW_RUN = 0, 348 VMMR0_DO_RUN_GC = VMMR0_DO_RAW_RUN, 348 349 /** Run guest code using the available hardware acceleration technology. */ 349 VMMR0_ HWACC_RUN_GUEST,350 VMMR0_DO_HWACC_RUN, 350 351 /** Call VMMR0 Per VM Init. */ 351 352 VMMR0_DO_VMMR0_INIT, … … 353 354 VMMR0_DO_VMMR0_TERM, 354 355 /** Setup the hardware accelerated raw-mode session. */ 355 VMMR0_ HWACC_SETUP_VM,356 VMMR0_DO_HWACC_SETUP_VM, 356 357 /** Calls function in the hypervisor. 357 358 * The caller must setup the hypervisor context so the call will be performed. … … 378 379 VMMR0_DO_SRV_END, 379 380 381 /** Official NOP that we use for profiling. */ 382 VMMR0_DO_NOP, 383 /** Official call we use for testing Ring-0 APIs. */ 384 VMMR0_DO_TESTS, 385 380 386 /** The usual 32-bit type blow up. */ 381 387 VMMR0_DO_32BIT_HACK = 0x7fffffff -
trunk/src/VBox/HostDrivers/Support/SUPDRVIOC.h
r42 r397 132 132 #define SUP_IOCTL_SET_VM_FOR_FAST SUP_CTL_CODE(20) 133 133 134 /** Fast path IOCtl: VMMR0_DO_R UN_GC*/134 /** Fast path IOCtl: VMMR0_DO_RAW_RUN */ 135 135 #define SUP_IOCTL_FAST_DO_RAW_RUN SUP_CTL_CODE_FAST(64) 136 /** Fast path IOCtl: VMMR0_ HWACC_RUN_GUEST*/136 /** Fast path IOCtl: VMMR0_DO_HWACC_RUN */ 137 137 #define SUP_IOCTL_FAST_DO_HWACC_RUN SUP_CTL_CODE_FAST(65) 138 /** Just a NOP call for profiling the latency of a fast ioctl call to VMMR0. */ 139 #define SUP_IOCTL_FAST_DO_NOP SUP_CTL_CODE_FAST(66) 138 140 139 141 -
trunk/src/VBox/HostDrivers/Support/SUPDRVShared.c
r392 r397 559 559 int VBOXCALL supdrvIOCtlFast(unsigned uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession) 560 560 { 561 if ( !pSession->pVM 562 || pDevExt->pfnVMMR0Entry) 563 return VERR_INTERNAL_ERROR; 564 switch (uIOCtl) 565 { 566 case SUP_IOCTL_FAST_DO_RAW_RUN: 567 return pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_DO_RUN_GC, NULL); 568 case SUP_IOCTL_FAST_DO_HWACC_RUN: 569 return pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_HWACC_RUN_GUEST, NULL); 570 default: 571 return VERR_INTERNAL_ERROR; 572 } 561 /* 562 * Disable interrupts before invoking VMMR0Entry() because it ASSUMES 563 * that interrupts are disabled. (We check the two prereqs after doing 564 * this only to allow the compiler to optimize things better.) 565 */ 566 RTCCUINTREG uFlags = ASMGetFlags(); 567 ASMIntDisable(); 568 569 int rc; 570 if (RT_LIKELY(pSession->pVM && pDevExt->pfnVMMR0Entry)) 571 { 572 switch (uIOCtl) 573 { 574 case SUP_IOCTL_FAST_DO_RAW_RUN: 575 rc = pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_DO_RAW_RUN, NULL); 576 break; 577 case SUP_IOCTL_FAST_DO_HWACC_RUN: 578 rc = pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_DO_HWACC_RUN, NULL); 579 break; 580 case SUP_IOCTL_FAST_DO_NOP: 581 rc = pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_DO_NOP, NULL); 582 break; 583 default: 584 rc = VERR_INTERNAL_ERROR; 585 break; 586 } 587 } 588 else 589 rc = VERR_INTERNAL_ERROR; 590 591 ASMSetFlags(uFlags); 592 return rc; 573 593 } 574 594 #endif /* VBOX_WITHOUT_IDT_PATCHING */ -
trunk/src/VBox/HostDrivers/Support/SUPLib.cpp
r339 r397 381 381 #ifndef VBOX_WITHOUT_IDT_PATCHING 382 382 return g_pfnCallVMMR0(pVM, uOperation, pvArg); 383 383 384 #else 384 if (uOperation == VMMR0_DO_R UN_GC)385 if (uOperation == VMMR0_DO_RAW_RUN) 385 386 { 386 387 Assert(!pvArg); 387 388 return suplibOSIOCtlFast(SUP_IOCTL_FAST_DO_RAW_RUN); 388 389 } 389 if (uOperation == VMMR0_ HWACC_RUN_GUEST)390 if (uOperation == VMMR0_DO_HWACC_RUN) 390 391 { 391 392 Assert(!pvArg); 392 393 return suplibOSIOCtlFast(SUP_IOCTL_FAST_DO_HWACC_RUN); 394 } 395 if (uOperation == VMMR0_DO_NOP) 396 { 397 Assert(!pvArg); 398 return suplibOSIOCtlFast(SUP_IOCTL_FAST_DO_NOP); 393 399 } 394 400 return SUPCallVMMR0Ex(pVM, uOperation, pvArg, pvArg ? sizeof(pvArg) : 0); -
trunk/src/VBox/HostDrivers/Support/darwin/SUPDrv-darwin.cpp
r387 r397 430 430 * the session and iCmd, and only returns a VBox status code. 431 431 */ 432 if ( iCmd == 1 433 || iCmd == 1) 432 if ( iCmd == SUP_IOCTL_FAST_DO_RAW_RUN 433 || iCmd == SUP_IOCTL_FAST_DO_HWACC_RUN 434 || iCmd == SUP_IOCTL_FAST_DO_NOP) 434 435 return supdrvIOCtlFast(iCmd, &g_DevExt, pSession); 435 436 return VBoxSupDrvIOCtlSlow(pSession, iCmd, pData, pProcess); -
trunk/src/VBox/HostDrivers/Support/testcase/tstInt.cpp
r1 r397 89 89 for (int i = cIterations; i > 0; i--) 90 90 { 91 rc = SUPCallVMMR0(&vm, 0xdeadbeef, NULL); 92 //RTPrintf("tstInt: SUPCallVMMR0 -> rc=%Vrc i=%d\n", rc, i); 91 rc = SUPCallVMMR0(&vm, VMMR0_DO_NOP, NULL); 92 if (rc != VINF_SUCCESS) 93 { 94 RTPrintf("tstInt: SUPCallVMMR0 -> rc=%Vrc i=%d Expected VINF_SUCCESS!\n", rc, i); 95 break;s 96 } 93 97 } 94 98 RTPrintf("tstInt: Performed SUPCallVMMR0 %d times (rc=%Vrc)\n", cIterations, rc); -
trunk/src/VBox/VMM/HWACCM.cpp
r23 r397 292 292 pVM->hwaccm.s.fInitialized = true; 293 293 294 int rc = SUPCallVMMR0(pVM, VMMR0_ HWACC_SETUP_VM, NULL);294 int rc = SUPCallVMMR0(pVM, VMMR0_DO_HWACC_SETUP_VM, NULL); 295 295 AssertRC(rc); 296 296 if (rc == VINF_SUCCESS) … … 323 323 pVM->hwaccm.s.fInitialized = true; 324 324 325 int rc = SUPCallVMMR0(pVM, VMMR0_ HWACC_SETUP_VM, NULL);325 int rc = SUPCallVMMR0(pVM, VMMR0_DO_HWACC_SETUP_VM, NULL); 326 326 AssertRC(rc); 327 327 if (rc == VINF_SUCCESS) -
trunk/src/VBox/VMM/TRPMInternal.h
r347 r397 179 179 180 180 #ifdef IN_RING0 181 182 /** 183 * Calls the interrupt gate as if we received an interrupt while in Ring-0. 184 * 185 * Returns with interrupts enabled. 186 * 187 * @param uIP The interrupt gate IP. 188 * @param SelCS The interrupt gate CS. 189 * @param RSP The interrupt gate RSP. ~0 if no stack switch should take place. (only AMD64) 190 */ 191 DECLASM(void) trpmR0DispatchHostInterrupt(RTR0UINTPTR uIP, RTSEL SelCS, RTR0UINTPTR RSP); 192 193 # ifndef VBOX_WITHOUT_IDT_PATCHING 181 194 /** 182 195 * Code used for the dispatching of interrupts in HC. … … 184 197 */ 185 198 DECLASM(int) trpmR0InterruptDispatcher(void); 199 # endif /* !VBOX_WITHOUT_IDT_PATCHING */ 200 186 201 #endif 187 202 -
trunk/src/VBox/VMM/VMM.cpp
r271 r397 496 496 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchPF, STAMTYPE_COUNTER, "/VMM/GCRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns."); 497 497 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchGP, STAMTYPE_COUNTER, "/VMM/GCRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns."); 498 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/GCRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns."); 498 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/GCRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns."); 499 499 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPageOverflow, STAMTYPE_COUNTER, "/VMM/GCRet/InvlpgOverflow", STAMUNIT_OCCURENCES, "Number of VERR_REM_FLUSHED_PAGES_OVERFLOW returns."); 500 500 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/GCRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns."); … … 1830 1830 rc = VERR_GENERAL_FAILURE; 1831 1831 #else 1832 rc = SUPCallVMMR0(pVM, VMMR0_DO_R UN_GC, NULL);1832 rc = SUPCallVMMR0(pVM, VMMR0_DO_RAW_RUN, NULL); 1833 1833 #endif 1834 1834 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); … … 1878 1878 rc = VERR_GENERAL_FAILURE; 1879 1879 #else 1880 rc = SUPCallVMMR0(pVM, VMMR0_ HWACC_RUN_GUEST, NULL);1880 rc = SUPCallVMMR0(pVM, VMMR0_DO_HWACC_RUN, NULL); 1881 1881 #endif 1882 1882 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); … … 1959 1959 rc = VERR_GENERAL_FAILURE; 1960 1960 #else 1961 rc = SUPCallVMMR0(pVM, VMMR0_DO_R UN_GC, NULL);1961 rc = SUPCallVMMR0(pVM, VMMR0_DO_RAW_RUN, NULL); 1962 1962 #endif 1963 1963 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); … … 2013 2013 rc = VERR_GENERAL_FAILURE; 2014 2014 #else 2015 rc = SUPCallVMMR0(pVM, VMMR0_DO_R UN_GC, NULL);2015 rc = SUPCallVMMR0(pVM, VMMR0_DO_RAW_RUN, NULL); 2016 2016 #endif 2017 2017 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); … … 2540 2540 CPUMPushHyper(pVM, GCPtrEP); /* what to call */ 2541 2541 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline); 2542 return SUPCallVMMR0(pVM, VMMR0_DO_R UN_GC, NULL);2542 return SUPCallVMMR0(pVM, VMMR0_DO_RAW_RUN, NULL); 2543 2543 } 2544 2544 … … 2574 2574 CPUMPushHyper(pVM, GCPtrEP); /* what to call */ 2575 2575 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline); 2576 rc = SUPCallVMMR0(pVM, VMMR0_DO_R UN_GC, NULL);2576 rc = SUPCallVMMR0(pVM, VMMR0_DO_RAW_RUN, NULL); 2577 2577 bool fDump = false; 2578 2578 if (rc != rcExpect) … … 2794 2794 do 2795 2795 { 2796 rc = SUPCallVMMR0(pVM, VMMR0_DO_R UN_GC, NULL);2796 rc = SUPCallVMMR0(pVM, VMMR0_DO_RAW_RUN, NULL); 2797 2797 if (VBOX_FAILURE(rc)) 2798 2798 { … … 2845 2845 2846 2846 uint64_t TickThisStart = ASMReadTSC(); 2847 rc = SUPCallVMMR0(pVM, VMMR0_DO_R UN_GC, NULL);2847 rc = SUPCallVMMR0(pVM, VMMR0_DO_RAW_RUN, NULL); 2848 2848 uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart; 2849 2849 if (VBOX_FAILURE(rc)) -
trunk/src/VBox/VMM/VMMR0/TRPMR0.cpp
r23 r397 35 35 36 36 37 38 /** 39 * Dispatches an interrupt that arrived while we were in the guest context. 40 * 41 * It's assumes we're invoked with interrupts disabled. 42 * When this function returns, interrupts will be enabled. 43 * 44 * @param pVM The VM handle. 45 */ 46 TRPMR0DECL(void) TRPMR0DispatchHostInterrupt(PVM pVM) 47 { 48 RTUINT uActiveVector = pVM->trpm.s.uActiveVector; 49 pVM->trpm.s.uActiveVector = ~0; 50 AssertMsgReturnVoid(uActiveVector < 256, ("uActiveVector=%#x is invalid! (More assertions to come, please enjoy!)\n", uActiveVector)); 51 52 /* 53 * Get the handler pointer (16:32 ptr) / (16:48 ptr). 54 */ 55 RTIDTR Idtr; 56 ASMGetIDTR(&Idtr); 57 #if HC_ARCH_BITS == 32 58 PVBOXIDTE pIdte = &((PVBOXIDTE)Idtr.pIdt)[uActiveVector]; 59 #else 60 PVBOXIDTE pIdte = &((PVBOXIDTE)Idtr.pIdt)[uActiveVector * 2]; 61 #endif 62 AssertMsgReturnVoid(pIdte->Gen.u1Present, ("The IDT entry (%d) is not present!\n", uActiveVector)); 63 AssertMsgReturnVoid( pIdte->Gen.u3Type1 == VBOX_IDTE_TYPE1 64 || pIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_INT_32, 65 ("The IDT entry (%d) is not 32-bit int gate! type1=%#x type2=%#x\n", 66 uActiveVector, pIdte->Gen.u3Type1, pIdte->Gen.u5Type2)); 67 #if HC_ARCH_BITS == 32 68 RTFAR32 pfnHandler; 69 pfnHandler.off = (pIdte->Gen.u16OffsetHigh << 16) | pIdte->Gen.u16OffsetLow; 70 pfnHandler.sel = pIdte->Gen.u16SegSel; 71 72 const RTR0UINTREG uRSP = ~(RTR0UINTREG)0; 73 74 #else /* 64-bit: */ 75 RTFAR64 pfnHandler; 76 pfnHandler.off = (pIdte->Gen.u16OffsetHigh << 16) | pIdte->Gen.u16OffsetLow; 77 pfnHandler.off |= (uint64_t)(*(uint32_t *)(pIdte + 1)) << 32; //cleanup! 78 pfnHandler.sel = pIdte->Gen.u16SegSel; 79 80 RTR0UINTREG uRSP = ~(RTR0UINTREG)0; 81 if (pIdte->au32[1] & 0x7 /*IST*/) 82 { 83 /** @todo implement IST */ 84 } 85 86 #endif 87 88 /* 89 * Dispatch it. 90 */ 91 trpmR0DispatchHostInterrupt(pfnHandler.off, pfnHandler.sel, uRSP); 92 } 93 94 #ifndef VBOX_WITHOUT_IDT_PATCHING 95 37 96 /** 38 97 * Changes the VMMR0Entry() call frame and stack used by the IDT patch code … … 47 106 RTUINT uActiveVector = pVM->trpm.s.uActiveVector; 48 107 pVM->trpm.s.uActiveVector = ~0; 49 if (uActiveVector >= 256) 50 { 51 AssertMsgFailed(("uActiveVector=%#x is invalid! (More assertions to come, please enjoy!)\n", 52 uActiveVector)); 53 return; 54 } 108 AssertMsgReturnVoid(uActiveVector < 256, ("uActiveVector=%#x is invalid! (More assertions to come, please enjoy!)\n", uActiveVector)); 55 109 56 110 #if HC_ARCH_BITS == 32 … … 61 115 ASMGetIDTR(&Idtr); 62 116 PVBOXIDTE pIdte = &((PVBOXIDTE)Idtr.pIdt)[uActiveVector]; 63 if (!pIdte->Gen.u1Present) 64 { 65 AssertMsgFailed(("The IDT entry (%d) is not present!\n", uActiveVector)); 66 return; 67 } 68 if ( pIdte->Gen.u3Type1 != VBOX_IDTE_TYPE1 69 && pIdte->Gen.u5Type2 != VBOX_IDTE_TYPE2_INT_32) 70 { 71 AssertMsgFailed(("The IDT entry (%d) is not 32-bit int gate! type1=%#x type2=%#x\n", 117 AssertMsgReturnVoid(pIdte->Gen.u1Present, ("The IDT entry (%d) is not present!\n", uActiveVector)); 118 AssertMsgReturnVoid( pIdte->Gen.u3Type1 == VBOX_IDTE_TYPE1 119 && pIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_INT_32, 120 ("The IDT entry (%d) is not 32-bit int gate! type1=%#x type2=%#x\n", 72 121 uActiveVector, pIdte->Gen.u3Type1, pIdte->Gen.u5Type2)); 73 return;74 }75 122 76 123 RTFAR32 pfnHandler; … … 110 157 ASMGetIDTR(&Idtr); 111 158 PVBOXIDTE pIdte = &((PVBOXIDTE)Idtr.pIdt)[uActiveVector * 2]; 112 if (!pIdte->Gen.u1Present) 113 { 114 AssertMsgFailed(("The IDT entry (%d) is not present!\n", uActiveVector)); 115 return; 116 } 117 if ( pIdte->Gen.u3Type1 != VBOX_IDTE_TYPE1 118 && pIdte->Gen.u5Type2 != VBOX_IDTE_TYPE2_INT_32) 119 { 120 AssertMsgFailed(("The IDT entry (%d) is not 32-bit int gate! type1=%#x type2=%#x\n", 159 160 AssertMsgReturnVoid(pIdte->Gen.u1Present, ("The IDT entry (%d) is not present!\n", uActiveVector)); 161 AssertMsgReturnVoid( pIdte->Gen.u3Type1 == VBOX_IDTE_TYPE1 162 && pIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_INT_32, /* == 64 */ 163 ("The IDT entry (%d) is not 64-bit int gate! type1=%#x type2=%#x\n", 121 164 uActiveVector, pIdte->Gen.u3Type1, pIdte->Gen.u5Type2)); 122 return;123 }124 165 125 166 RTFAR64 pfnHandler; … … 127 168 pfnHandler.off |= (uint64_t)(*(uint32_t *)(pIdte + 1)) << 32; //cleanup! 128 169 pfnHandler.sel = pIdte->Gen.u16SegSel; 170 171 if (pIdte->au32[1] & 0x7 /*IST*/) 172 { 173 /** @todo implement IST */ 174 } 129 175 130 176 /* … … 150 196 // dprintf(("Interrupt: %04x:%08x vector %d\n", pfnHandler.sel, pfnHandler.off, uActiveVector)); 151 197 } 198 199 #endif /* !VBOX_WITHOUT_IDT_PATCHING */ -
trunk/src/VBox/VMM/VMMR0/TRPMR0A.asm
r19 r397 24 24 ;******************************************************************************* 25 25 %include "VBox/asmdefs.mac" 26 %include "VBox/x86.mac" 26 27 27 28 28 29 BEGINCODE 30 align 16 31 32 ;; 33 ; Calls the interrupt gate as if we received an interrupt while in Ring-0. 34 ; 35 ; Returns with interrupts enabled. 36 ; 37 ; @param uIP x86:[ebp+8] msc:rcx gcc:rdi The interrupt gate IP. 38 ; @param SelCS x86:[ebp+12] msc:dx gcc:si The interrupt gate CS. 39 ; @param RSP msc:r8 gcc:rdx The interrupt gate RSP. ~0 if no stack switch should take place. (only AMD64) 40 ;DECLASM(void) trpmR0DispatchHostInterrupt(RTR0UINTPTR uIP, RTSEL SelCS, RTR0UINTPTR RSP); 41 BEGINPROC trpmR0DispatchHostInterrupt 42 push xBP 43 mov xBP, xSP 44 45 %ifdef __AMD64__ 46 mov rax, rsp 47 and rsp, 15h ; align the stack. (do it unconditionally saves some jump mess) 48 49 ; switch stack? 50 % ifdef ASM_CALL64_MSC 51 cmp r8, 0ffffffffffffffffh 52 je .no_stack_switch 53 mov rsp, r8 54 % else 55 cmp rdx, 0ffffffffffffffffh 56 je .no_stack_switch 57 mov rsp, rdx 58 % endif 59 .no_stack_switch: 60 61 ; create the iret frame 62 push 0 ; SS 63 push rax ; RSP 64 pushfd ; RFLAGS 65 and dword [rsp], ~X86_EFL_IF 66 push cs ; CS 67 mov rax, .return ; RIP 68 push rax 69 70 ; create the retf frame 71 % ifdef ASM_CALL64_MSC 72 movzx rdx, dx 73 push rdx 74 push rcx 75 % else 76 movzx rdi, di 77 push rdi 78 push rsi 79 % endif 80 81 ; dispatch it! 82 db 048h 83 retf 84 85 %else ; 32-bit: 86 mov ecx, [ebp + 8] ; uIP 87 movzx edx, word [ebp + 12] ; SelCS 88 89 ; create the iret frame 90 pushfd ; EFLAGS 91 and dword [esp], ~X86_EFL_IF 92 push cs ; CS 93 push .return ; EIP 94 95 ; create the retf frame 96 push edx 97 push ecx 98 99 ; dispatch it! 100 retf 101 %endif 102 .return: 103 104 leave 105 ret 106 ENDPROC trpmR0DispatchHostInterrupt 107 108 109 %ifndef VBOX_WITHOUT_IDT_PATCHING 29 110 30 111 align 16 … … 66 147 ENDPROC trpmR0InterruptDispatcher 67 148 149 %endif ; !VBOX_WITHOUT_IDT_PATCHING 150 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r272 r397 413 413 * These calls return whatever the GC returns. 414 414 */ 415 case VMMR0_DO_R UN_GC:415 case VMMR0_DO_RAW_RUN: 416 416 { 417 417 /* Safety precaution as VMX disables the switcher. */ … … 447 447 case VINF_EM_RAW_INTERRUPT: 448 448 case VINF_EM_RAW_INTERRUPT_HYPER: 449 #ifdef VBOX_WITHOUT_IDT_PATCHING 450 TRPMR0DispatchHostInterrupt(pVM); 451 #else 449 452 TRPMR0SetupInterruptDispatcherFrame(pVM, (char*)&pVM - sizeof(pVM)); 453 #endif 450 454 return rc; 451 455 } … … 457 461 * Run guest code using the available hardware acceleration technology. 458 462 */ 459 case VMMR0_ HWACC_RUN_GUEST:463 case VMMR0_DO_HWACC_RUN: 460 464 { 461 465 int rc; … … 501 505 * Setup the hardware accelerated raw-mode session. 502 506 */ 503 case VMMR0_ HWACC_SETUP_VM:507 case VMMR0_DO_HWACC_SETUP_VM: 504 508 return HWACCMR0SetupVMX(pVM); 505 509 … … 590 594 #endif /* !__L4__ */ 591 595 592 #ifdef DEBUG 593 /* 594 * For testing purposes only. 595 */ 596 case 0xdeadbeef: 597 { 598 LogCom(("VMMR0Entry: !debug testing! 0xdeadbeef!\n")); 599 #if 0 600 void *pv; 601 void *pvPhys; 602 603 /* alloc cont memory */ 604 int rc = SUPR0ContAlloc(pVM->pSession, 0x1fff, &pv, &pvPhys); 605 LogCom(("VMMR0Entry: ContAlloc: rc=%d pv=%p pvPhys=%p\n", rc, pv, pvPhys)); 606 if (!VBOX_SUCCESS(rc)) 607 return rc; 608 /* touch */ 609 ((char*)pv)[0x1000] = ((char*)pv)[0] = 'f'; 610 /* free */ 611 rc = SUPR0ContFree(pVM->pSession, pv); 612 LogCom(("VMMR0Entry: ContFree: rc=%d\n", rc)); 613 if (!VBOX_SUCCESS(rc)) 614 return rc; 615 #endif 616 /* successful return - consistent with release builds. */ 617 return VERR_NOT_SUPPORTED; 618 } 619 #endif 596 /* 597 * For profiling. 598 */ 599 case VMMR0_DO_NOP: 600 return VINF_SUCCESS; 601 602 /* 603 * For testing Ring-0 APIs invoked in this environment. 604 */ 605 case VMMR0_DO_TESTS: 606 /** @todo make new test */ 607 return VINF_SUCCESS; 608 620 609 621 610 default:
Note:
See TracChangeset
for help on using the changeset viewer.