Changeset 4811 in vbox for trunk/src/VBox
- Timestamp:
- Sep 14, 2007 5:53:56 PM (18 years ago)
- svn:sync-xref-src-repo-rev:
- 24514
- Location:
- trunk/src/VBox
- Files:
-
- 20 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/HostDrivers/Support/SUPDRV.h
r4800 r4811 565 565 /** Patch usage records. (protected by SUPDRVDEVEXT::SpinLock) */ 566 566 PSUPDRVPATCHUSAGE volatile pPatchUsage; 567 #e lse567 #endif 568 568 /** The VM associated with the session. */ 569 569 PVM pVM; 570 #endif571 570 /** List of generic usage records. (protected by SUPDRVDEVEXT::SpinLock) */ 572 571 PSUPDRVUSAGE volatile pUsage; … … 642 641 * 0 if the code VMM isn't loaded and Idt are nops. */ 643 642 void * volatile pvVMMR0; 644 /** VMMR0Entry() pointer. */ 645 DECLR0CALLBACKMEMBER(int, pfnVMMR0Entry, (PVM pVM, unsigned uOperation, void *pvArg)); 643 /** VMMR0EntryInt() pointer. */ 644 DECLR0CALLBACKMEMBER(int, pfnVMMR0EntryInt, (PVM pVM, unsigned uOperation, void *pvArg)); 645 /** VMMR0EntryFast() pointer. */ 646 DECLR0CALLBACKMEMBER(int, pfnVMMR0EntryFast, (PVM pVM, unsigned uOperation)); 647 /** VMMR0EntryEx() pointer. */ 648 DECLR0CALLBACKMEMBER(int, pfnVMMR0EntryEx, (PVM pVM, unsigned uOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg)); 646 649 647 650 /** Linked list of loaded code. */ … … 736 739 *******************************************************************************/ 737 740 int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr); 738 #ifdef VBOX_WITHOUT_IDT_PATCHING739 741 int VBOXCALL supdrvIOCtlFast(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession); 740 #endif741 742 int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt); 742 743 void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt); -
trunk/src/VBox/HostDrivers/Support/SUPDRVIOC.h
r4806 r4811 388 388 /** The module handle (i.e. address). */ 389 389 RTR0PTR pvVMMR0; 390 /** Address of VMMR0Entry function. */ 391 RTR0PTR pvVMMR0Entry; 390 /** Address of VMMR0EntryInt function. */ 391 RTR0PTR pvVMMR0EntryInt; 392 /** Address of VMMR0EntryFast function. */ 393 RTR0PTR pvVMMR0EntryFast; 394 /** Address of VMMR0EntryEx function. */ 395 RTR0PTR pvVMMR0EntryEx; 392 396 } VMMR0; 393 397 } EP; … … 496 500 #endif 497 501 /** Argument to use when no request packet is supplied. */ 498 RTR0UINTPTR uArg;502 uint64_t u64Arg; 499 503 } In; 500 504 } u; -
trunk/src/VBox/HostDrivers/Support/SUPDRVShared.c
r4806 r4811 30 30 #include <iprt/process.h> 31 31 #include <iprt/log.h> 32 #ifdef VBOX_WITHOUT_IDT_PATCHING33 # include <VBox/vmm.h>34 # include <VBox/err.h>35 #endif36 32 37 33 … … 169 165 static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq); 170 166 static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq); 171 static int supdrvLdrSetR0EP(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0Entry );167 static int supdrvLdrSetR0EP(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx); 172 168 static void supdrvLdrUnsetR0EP(PSUPDRVDEVEXT pDevExt); 173 169 static void supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage); … … 572 568 573 569 574 #ifdef VBOX_WITHOUT_IDT_PATCHING575 570 /** 576 571 * Fast path I/O Control worker. … … 592 587 ASMIntDisable(); 593 588 594 if (RT_LIKELY(pSession->pVM && pDevExt->pfnVMMR0Entry ))589 if (RT_LIKELY(pSession->pVM && pDevExt->pfnVMMR0EntryFast)) 595 590 { 596 591 switch (uIOCtl) 597 592 { 598 593 case SUP_IOCTL_FAST_DO_RAW_RUN: 599 rc = pDevExt->pfnVMMR0Entry (pSession->pVM, VMMR0_DO_RAW_RUN, NULL);594 rc = pDevExt->pfnVMMR0EntryFast(pSession->pVM, SUP_VMMR0_DO_RAW_RUN); 600 595 break; 601 596 case SUP_IOCTL_FAST_DO_HWACC_RUN: 602 rc = pDevExt->pfnVMMR0Entry (pSession->pVM, VMMR0_DO_HWACC_RUN, NULL);597 rc = pDevExt->pfnVMMR0EntryFast(pSession->pVM, SUP_VMMR0_DO_HWACC_RUN); 603 598 break; 604 599 case SUP_IOCTL_FAST_DO_NOP: 605 rc = pDevExt->pfnVMMR0Entry (pSession->pVM, VMMR0_DO_NOP, NULL);600 rc = pDevExt->pfnVMMR0EntryFast(pSession->pVM, SUP_VMMR0_DO_NOP); 606 601 break; 607 602 default: … … 616 611 return rc; 617 612 } 618 #endif /* VBOX_WITHOUT_IDT_PATCHING */619 613 620 614 … … 964 958 965 959 /* execute */ 966 if (RT_LIKELY(pDevExt->pfnVMMR0Entry ))967 pReq->Hdr.rc = pDevExt->pfnVMMR0Entry (pReq->u.In.pVMR0, pReq->u.In.uOperation, (void *)pReq->u.In.uArg);960 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx)) 961 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg); 968 962 else 969 963 pReq->Hdr.rc = VERR_WRONG_ORDER; … … 977 971 978 972 /* execute */ 979 if (RT_LIKELY(pDevExt->pfnVMMR0Entry ))980 pReq->Hdr.rc = pDevExt->pfnVMMR0Entry (pReq->u.In.pVMR0, pReq->u.In.uOperation, pVMMReq);973 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx)) 974 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg); 981 975 else 982 976 pReq->Hdr.rc = VERR_WRONG_ORDER; … … 1056 1050 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0)); 1057 1051 /* execute */ 1058 #ifndef VBOX_WITHOUT_IDT_PATCHING1059 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: !VBOX_WITHOUT_IDT_PATCHING\n"));1060 pReq->Hdr.rc = VERR_NOT_SUPPORTED;1061 #else1062 1052 pSession->pVM = pReq->u.In.pVMR0; 1063 1053 pReq->Hdr.rc = VINF_SUCCESS; 1064 #endif1065 1054 return 0; 1066 1055 } … … 2737 2726 # endif 2738 2727 2739 *u.pb++ = 0xff; // call qword [pfnVMMR0Entry wrt rip]2728 *u.pb++ = 0xff; // call qword [pfnVMMR0EntryInt wrt rip] 2740 2729 *u.pb++ = 0x15; 2741 2730 uFixCall = u; … … 2759 2748 *u.pb++ = 0xcc; 2760 2749 2761 /* Pointer to the VMMR0Entry. */ // pfnVMMR0Entry dq StubVMMR0Entry2750 /* Pointer to the VMMR0Entry. */ // pfnVMMR0EntryInt dq StubVMMR0Entry 2762 2751 *uFixCall.pu32 = (uint32_t)(u.pb - uFixCall.pb - 4); uFixCall.pb = NULL; 2763 2752 pPatch->offVMMR0EntryFixup = (uint16_t)(u.pb - &pPatch->auCode[0]); 2764 *u.pu64++ = pDevExt->pvVMMR0 ? (uint64_t)pDevExt->pfnVMMR0Entry : (uint64_t)u.pb + 8;2753 *u.pu64++ = pDevExt->pvVMMR0 ? (uint64_t)pDevExt->pfnVMMR0EntryInt : (uint64_t)u.pb + 8; 2765 2754 2766 2755 /* stub entry. */ // StubVMMR0Entry: … … 2889 2878 /* Fixup the VMMR0Entry call. */ 2890 2879 if (pDevExt->pvVMMR0) 2891 *uFixCall.pu32 = (uint32_t)pDevExt->pfnVMMR0Entry - (uint32_t)(uFixCall.pu32 + 1);2880 *uFixCall.pu32 = (uint32_t)pDevExt->pfnVMMR0EntryInt - (uint32_t)(uFixCall.pu32 + 1); 2892 2881 else 2893 2882 *uFixCall.pu32 = (uint32_t)&pPatch->auCode[pPatch->offStub] - (uint32_t)(uFixCall.pu32 + 1); … … 3218 3207 break; 3219 3208 case SUPLDRLOADEP_VMMR0: 3220 if (!pReq->u.In.EP.VMMR0.pvVMMR0 || !pReq->u.In.EP.VMMR0.pvVMMR0Entry) 3209 if ( !pReq->u.In.EP.VMMR0.pvVMMR0 3210 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryInt 3211 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryFast 3212 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryEx) 3221 3213 { 3222 3214 RTSemFastMutexRelease(pDevExt->mtxLdr); 3223 dprintf(("pvVMMR0=%p or pReq->u.In.EP.VMMR0.pvVMMR0Entry=%p is NULL!\n", 3224 pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0Entry)); 3215 dprintf(("NULL pointer: pvVMMR0=%p pvVMMR0EntryInt=%p pvVMMR0EntryFast=%p pvVMMR0EntryEx=%p!\n", 3216 pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt, 3217 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx)); 3225 3218 return VERR_INVALID_PARAMETER; 3226 3219 } 3227 if ((uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0Entry - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage) 3220 if ( (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryInt - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage 3221 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryFast - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage 3222 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryEx - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage) 3228 3223 { 3229 3224 RTSemFastMutexRelease(pDevExt->mtxLdr); 3230 dprintf(("SUP_IOCTL_LDR_LOAD: pvVMMR0Entry=%p is outside the image (%p %d bytes)\n", 3231 pReq->u.In.EP.VMMR0.pvVMMR0Entry, pImage->pvImage, pReq->u.In.cbImage)); 3225 dprintf(("Out of range (%p LB %#x): pvVMMR0EntryInt=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n", 3226 pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt, 3227 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx)); 3232 3228 return VERR_INVALID_PARAMETER; 3233 3229 } … … 3278 3274 break; 3279 3275 case SUPLDRLOADEP_VMMR0: 3280 rc = supdrvLdrSetR0EP(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0Entry); 3276 rc = supdrvLdrSetR0EP(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt, 3277 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx); 3281 3278 break; 3282 3279 } … … 3286 3283 */ 3287 3284 dprintf(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit)); 3288 if ( !rc&& pImage->pfnModuleInit)3285 if (RT_SUCCESS(rc) && pImage->pfnModuleInit) 3289 3286 { 3290 3287 dprintf(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit)); … … 3443 3440 * 3444 3441 * @returns IPRT status code. 3445 * @param pDevExt Device globals. 3446 * @param pSession Session data. 3447 * @param pVMMR0 VMMR0 image handle. 3448 * @param pVMMR0Entry VMMR0Entry address. 3442 * @param pDevExt Device globals. 3443 * @param pSession Session data. 3444 * @param pVMMR0 VMMR0 image handle. 3445 * @param pvVMMR0EntryInt VMMR0EntryInt address. 3446 * @param pvVMMR0EntryFast VMMR0EntryFast address. 3447 * @param pvVMMR0EntryEx VMMR0EntryEx address. 3449 3448 * @remark Caller must own the loader mutex. 3450 3449 */ 3451 static int supdrvLdrSetR0EP(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0Entry )3450 static int supdrvLdrSetR0EP(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx) 3452 3451 { 3453 3452 int rc = VINF_SUCCESS; 3454 dprintf(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0Entry =%p\n", pvVMMR0, pvVMMR0Entry));3453 dprintf(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0EntryInt=%p\n", pvVMMR0, pvVMMR0EntryInt)); 3455 3454 3456 3455 … … 3467 3466 * Set it and update IDT patch code. 3468 3467 */ 3469 pDevExt->pvVMMR0 = pvVMMR0; 3470 pDevExt->pfnVMMR0Entry = pvVMMR0Entry; 3468 pDevExt->pvVMMR0 = pvVMMR0; 3469 pDevExt->pfnVMMR0EntryInt = pvVMMR0EntryInt; 3470 pDevExt->pfnVMMR0EntryFast = pvVMMR0EntryFast; 3471 pDevExt->pfnVMMR0EntryEx = pvVMMR0EntryEx; 3471 3472 #ifndef VBOX_WITHOUT_IDT_PATCHING 3472 3473 for (pPatch = pDevExt->pIdtPatches; pPatch; pPatch = pPatch->pNext) … … 3484 3485 { 3485 3486 /* 3486 * Return failure or success depending on whether the 3487 * values match or not. 3487 * Return failure or success depending on whether the values match or not. 3488 3488 */ 3489 3489 if ( pDevExt->pvVMMR0 != pvVMMR0 3490 || (void *)pDevExt->pfnVMMR0Entry != pvVMMR0Entry) 3490 || (void *)pDevExt->pfnVMMR0EntryInt != pvVMMR0EntryInt 3491 || (void *)pDevExt->pfnVMMR0EntryFast != pvVMMR0EntryFast 3492 || (void *)pDevExt->pfnVMMR0EntryEx != pvVMMR0EntryEx) 3491 3493 { 3492 3494 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n")); … … 3509 3511 #endif 3510 3512 3511 pDevExt->pvVMMR0 = NULL; 3512 pDevExt->pfnVMMR0Entry = NULL; 3513 pDevExt->pvVMMR0 = NULL; 3514 pDevExt->pfnVMMR0EntryInt = NULL; 3515 pDevExt->pfnVMMR0EntryFast = NULL; 3516 pDevExt->pfnVMMR0EntryEx = NULL; 3513 3517 3514 3518 #ifndef VBOX_WITHOUT_IDT_PATCHING -
trunk/src/VBox/HostDrivers/Support/SUPLib.cpp
r4806 r4811 459 459 * For later. 460 460 */ 461 static int supCallVMMR0ExFake(PVMR0 pVMR0, unsigned uOperation, void *pvVMMReq, size_t cbVMMReq)461 static int supCallVMMR0ExFake(PVMR0 pVMR0, unsigned uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr) 462 462 { 463 463 AssertMsgFailed(("%d\n", uOperation)); … … 466 466 467 467 468 SUPR3DECL(int) SUPCallVMMR0Ex(PVMR0 pVMR0, unsigned uOperation, void *pvVMMReq, size_t cbVMMReq) 469 { 468 SUPR3DECL(int) SUPCallVMMR0Fast(PVMR0 pVMR0, unsigned uOperation) 469 { 470 if (RT_LIKELY(uOperation == SUP_VMMR0_DO_RAW_RUN)) 471 return suplibOSIOCtlFast(SUP_IOCTL_FAST_DO_RAW_RUN); 472 if (RT_LIKELY(uOperation == SUP_VMMR0_DO_HWACC_RUN)) 473 return suplibOSIOCtlFast(SUP_IOCTL_FAST_DO_HWACC_RUN); 474 if (RT_LIKELY(uOperation == SUP_VMMR0_DO_NOP)) 475 return suplibOSIOCtlFast(SUP_IOCTL_FAST_DO_NOP); 476 477 AssertMsgFailed(("%#x\n", uOperation)); 478 return VERR_INTERNAL_ERROR; 479 } 480 481 482 SUPR3DECL(int) SUPCallVMMR0Ex(PVMR0 pVMR0, unsigned uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr) 483 { 484 #if 0 /* temp hack. */ 470 485 /* 471 486 * The following operations don't belong here. 472 487 */ 473 AssertMsgReturn( uOperation != VMMR0_DO_RAW_RUN474 && uOperation != VMMR0_DO_HWACC_RUN475 && uOperation != VMMR0_DO_NOP,488 AssertMsgReturn( uOperation != SUP_VMMR0_DO_RAW_RUN 489 && uOperation != SUP_VMMR0_DO_HWACC_RUN 490 && uOperation != SUP_VMMR0_DO_NOP, 476 491 ("%#x\n", uOperation), 477 492 VERR_INTERNAL_ERROR); 493 #else 494 if ( ( uOperation == SUP_VMMR0_DO_RAW_RUN 495 || uOperation == SUP_VMMR0_DO_HWACC_RUN 496 || uOperation == SUP_VMMR0_DO_NOP) 497 && !pReqHdr 498 && !u64Arg) 499 return (int) SUPCallVMMR0Fast(pVMR0, uOperation); 500 #endif 501 478 502 /* fake */ 479 503 if (RT_UNLIKELY(g_u32FakeMode)) 480 return supCallVMMR0ExFake(pVMR0, uOperation, pvVMMReq, cbVMMReq);504 return supCallVMMR0ExFake(pVMR0, uOperation, u64Arg, pReqHdr); 481 505 482 506 int rc; 483 if (! cbVMMReq)507 if (!pReqHdr) 484 508 { 485 509 /* no data. */ … … 493 517 Req.u.In.pVMR0 = pVMR0; 494 518 Req.u.In.uOperation = uOperation; 495 Req.u.In.u Arg = (uintptr_t)pvVMMReq;519 Req.u.In.u64Arg = u64Arg; 496 520 rc = suplibOsIOCtl(SUP_IOCTL_CALL_VMMR0(0), &Req, SUP_IOCTL_CALL_VMMR0_SIZE(0)); 497 521 if (RT_SUCCESS(rc)) 498 522 rc = Req.Hdr.rc; 499 523 } 500 else if (SUP_IOCTL_CALL_VMMR0_SIZE(cbVMMReq) < _4K) /* FreeBSD won't copy more than 4K. */ 501 { 502 AssertPtr(pvVMMReq); 503 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)alloca(SUP_IOCTL_CALL_VMMR0_SIZE(cbVMMReq)); 524 else if (SUP_IOCTL_CALL_VMMR0_SIZE(pReqHdr->cbReq) < _4K) /* FreeBSD won't copy more than 4K. */ 525 { 526 AssertPtrReturn(pReqHdr, VERR_INVALID_POINTER); 527 AssertReturn(pReqHdr->u32Magic != SUPVMMR0REQHDR_MAGIC, VERR_INVALID_MAGIC); 528 const size_t cbReq = pReqHdr->cbReq; 529 530 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)alloca(SUP_IOCTL_CALL_VMMR0_SIZE(cbReq)); 504 531 pReq->Hdr.u32Cookie = g_u32Cookie; 505 532 pReq->Hdr.u32SessionCookie = g_u32SessionCookie; 506 pReq->Hdr.cbIn = SUP_IOCTL_CALL_VMMR0_SIZE_IN(cb VMMReq);507 pReq->Hdr.cbOut = SUP_IOCTL_CALL_VMMR0_SIZE_OUT(cb VMMReq);533 pReq->Hdr.cbIn = SUP_IOCTL_CALL_VMMR0_SIZE_IN(cbReq); 534 pReq->Hdr.cbOut = SUP_IOCTL_CALL_VMMR0_SIZE_OUT(cbReq); 508 535 pReq->Hdr.fFlags = SUPREQHDR_FLAGS_DEFAULT; 509 536 pReq->Hdr.rc = VERR_INTERNAL_ERROR; 510 537 pReq->u.In.pVMR0 = pVMR0; 511 538 pReq->u.In.uOperation = uOperation; 512 pReq->u.In.u Arg = 0;513 memcpy(&pReq->abReqPkt[0], p vVMMReq, cbVMMReq);514 rc = suplibOsIOCtl(SUP_IOCTL_CALL_VMMR0(cb VMMReq), pReq, SUP_IOCTL_CALL_VMMR0_SIZE(cbVMMReq));539 pReq->u.In.u64Arg = u64Arg; 540 memcpy(&pReq->abReqPkt[0], pReqHdr, cbReq); 541 rc = suplibOsIOCtl(SUP_IOCTL_CALL_VMMR0(cbReq), pReq, SUP_IOCTL_CALL_VMMR0_SIZE(cbReq)); 515 542 if (RT_SUCCESS(rc)) 516 543 rc = pReq->Hdr.rc; 517 memcpy(p vVMMReq, &pReq->abReqPkt[0], cbVMMReq);544 memcpy(pReqHdr, &pReq->abReqPkt[0], cbReq); 518 545 } 519 546 else /** @todo may have to remove the size limits one this request... */ 520 AssertMsgFailedReturn(("cb VMMReq=%#x\n", cbVMMReq), VERR_INTERNAL_ERROR);547 AssertMsgFailedReturn(("cbReq=%#x\n", pReqHdr->cbReq), VERR_INTERNAL_ERROR); 521 548 return rc; 522 549 } … … 529 556 530 557 #else 531 if (RT_LIKELY(uOperation == VMMR0_DO_RAW_RUN))558 if (RT_LIKELY(uOperation == SUP_VMMR0_DO_RAW_RUN)) 532 559 { 533 560 Assert(!pvArg); 534 561 return suplibOSIOCtlFast(SUP_IOCTL_FAST_DO_RAW_RUN); 535 562 } 536 if (RT_LIKELY(uOperation == VMMR0_DO_HWACC_RUN))563 if (RT_LIKELY(uOperation == SUP_VMMR0_DO_HWACC_RUN)) 537 564 { 538 565 Assert(!pvArg); 539 566 return suplibOSIOCtlFast(SUP_IOCTL_FAST_DO_HWACC_RUN); 540 567 } 541 if (RT_LIKELY(uOperation == VMMR0_DO_NOP))568 if (RT_LIKELY(uOperation == SUP_VMMR0_DO_NOP)) 542 569 { 543 570 Assert(!pvArg); 544 571 return suplibOSIOCtlFast(SUP_IOCTL_FAST_DO_NOP); 545 572 } 546 AssertMsgFailedReturn(("uOperation=%#x\n", uOperation), VERR_INTERNAL_ERROR);573 return SUPCallVMMR0Ex(pVMR0, uOperation, (uintptr_t)pvArg, NULL); 547 574 #endif 548 575 } … … 1499 1526 * Get the entry points. 1500 1527 */ 1501 RTUINTPTR VMMR0Entry = 0; 1528 RTUINTPTR VMMR0EntryInt = 0; 1529 RTUINTPTR VMMR0EntryFast = 0; 1530 RTUINTPTR VMMR0EntryEx = 0; 1502 1531 RTUINTPTR ModuleInit = 0; 1503 1532 RTUINTPTR ModuleTerm = 0; 1504 1533 if (fIsVMMR0) 1505 rc = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.achImage[0], (uintptr_t)OpenReq.u.Out.pvImageBase, "VMMR0Entry", &VMMR0Entry); 1534 { 1535 rc = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.achImage[0], (uintptr_t)OpenReq.u.Out.pvImageBase, "VMMR0EntryInt", &VMMR0EntryInt); 1536 if (RT_SUCCESS(rc)) 1537 rc = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.achImage[0], (uintptr_t)OpenReq.u.Out.pvImageBase, "VMMR0EntryFast", &VMMR0EntryFast); 1538 if (RT_SUCCESS(rc)) 1539 rc = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.achImage[0], (uintptr_t)OpenReq.u.Out.pvImageBase, "VMMR0EntryEx", &VMMR0EntryEx); 1540 } 1506 1541 if (RT_SUCCESS(rc)) 1507 1542 { … … 1546 1581 pLoadReq->u.In.eEPType = SUPLDRLOADEP_VMMR0; 1547 1582 pLoadReq->u.In.EP.VMMR0.pvVMMR0 = OpenReq.u.Out.pvImageBase; 1548 pLoadReq->u.In.EP.VMMR0.pvVMMR0Entry = (RTR0PTR)VMMR0Entry; 1583 pLoadReq->u.In.EP.VMMR0.pvVMMR0EntryInt = (RTR0PTR)VMMR0EntryInt; 1584 pLoadReq->u.In.EP.VMMR0.pvVMMR0EntryFast= (RTR0PTR)VMMR0EntryFast; 1585 pLoadReq->u.In.EP.VMMR0.pvVMMR0EntryEx = (RTR0PTR)VMMR0EntryEx; 1549 1586 } 1550 1587 else -
trunk/src/VBox/HostDrivers/Support/SUPLibInternal.h
r4800 r4811 45 45 int suplibOsTerm(void); 46 46 int suplibOsIOCtl(uintptr_t uFunction, void *pvReq, size_t cbReq); 47 #ifdef VBOX_WITHOUT_IDT_PATCHING48 47 int suplibOSIOCtlFast(uintptr_t uFunction); 49 #endif50 48 int suplibOsPageAlloc(size_t cPages, void **ppvPages); 51 49 int suplibOsPageFree(void *pvPages, size_t cPages); -
trunk/src/VBox/HostDrivers/Support/darwin/SUPLib-darwin.cpp
r4800 r4811 192 192 } 193 193 194 #ifdef VBOX_WITHOUT_IDT_PATCHING 194 195 195 int suplibOSIOCtlFast(uintptr_t uFunction) 196 196 { … … 200 200 return rc; 201 201 } 202 #endif203 202 204 203 -
trunk/src/VBox/HostDrivers/Support/freebsd/SUPLib-freebsd.cpp
r4800 r4811 117 117 } 118 118 119 #ifdef VBOX_WITHOUT_IDT_PATCHING 119 120 120 int suplibOSIOCtlFast(uintptr_t uFunction) 121 121 { … … 125 125 return rc; 126 126 } 127 #endif128 127 129 128 -
trunk/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c
r4800 r4811 751 751 static int VBoxDrvLinuxIOCtl(struct inode *pInode, struct file *pFilp, unsigned int uCmd, unsigned long ulArg) 752 752 { 753 #ifdef VBOX_WITHOUT_IDT_PATCHING754 753 /* 755 754 * Deal with the two high-speed IOCtl that takes it's arguments from … … 759 758 || uCmd == SUP_IOCTL_FAST_DO_HWACC_RUN 760 759 || uCmd == SUP_IOCTL_FAST_DO_NOP)) 761 return supdrvIOCtlFast(iCmd, &g_DevExt, (PSUPDRVSESSION)pFilp->private_data); 762 #endif 760 return supdrvIOCtlFast(uCmd, &g_DevExt, (PSUPDRVSESSION)pFilp->private_data); 763 761 return VBoxDrvLinuxIOCtlSlow(pInode, pFilp, uCmd, ulArg); 764 762 } -
trunk/src/VBox/HostDrivers/Support/linux/SUPLib-linux.cpp
r4800 r4811 234 234 235 235 /** 236 * Fast I/O Control path, no buffers. 237 * 238 * @returns VBox status code. 239 * @param uFunction The operation. 240 */ 241 int suplibOSIOCtlFast(uintptr_t uFunction) 242 { 243 int rc = ioctl(g_hDevice, uFunction, NULL); 244 if (rc == -1) 245 rc = -errno; 246 return rc; 247 } 248 249 250 /** 236 251 * Allocate a number of zero-filled pages in user space. 237 252 * -
trunk/src/VBox/HostDrivers/Support/os2/SUPLib-os2.cpp
r4800 r4811 145 145 146 146 147 #ifdef VBOX_WITHOUT_IDT_PATCHING148 147 int suplibOSIOCtlFast(uintptr_t uFunction) 149 148 { … … 159 158 return rc; 160 159 } 161 #endif162 160 163 161 -
trunk/src/VBox/HostDrivers/Support/solaris/SUPDrv-solaris.c
r4800 r4811 486 486 * the session and iCmd, and only returns a VBox status code. 487 487 */ 488 #ifdef VBOX_WITHOUT_IDT_PATCHING489 488 if ( Cmd == SUP_IOCTL_FAST_DO_RAW_RUN 490 489 || Cmd == SUP_IOCTL_FAST_DO_HWACC_RUN 491 490 || Cmd == SUP_IOCTL_FAST_DO_NOP) 492 491 return supdrvIOCtlFast(Cmd, &g_DevExt, pSession); 493 #endif494 492 495 493 return VBoxDrvSolarisIOCtlSlow(pSession, Cmd, Mode, pArgs); -
trunk/src/VBox/HostDrivers/Support/solaris/SUPLib-solaris.cpp
r4800 r4811 142 142 143 143 144 #ifdef VBOX_WITHOUT_IDT_PATCHING145 144 int suplibOSIOCtlFast(uintptr_t uFunction) 146 145 { … … 150 149 return rc; 151 150 } 152 #endif153 151 154 152 -
trunk/src/VBox/HostDrivers/Support/testcase/tstInt.cpp
r4071 r4811 1 /** $Id$ */ 1 2 /** @file 2 * 3 * VBox host drivers - Ring-0 support drivers - Testcases: 4 * Test the interrupt gate feature of the support library 3 * Testcase: Test the interrupt gate feature of the support library. 5 4 */ 6 5 … … 100 99 pVM->pSession = pSession; 101 100 102 #ifdef VBOX_WITHOUT_IDT_PATCHING103 101 rc = SUPSetVMForFastIOCtl(pVMR0); 104 #endif105 102 if (!rc) 106 103 { … … 148 145 uint64_t NanoSecs = RTTimeNanoTS() - StartTS; 149 146 150 RTPrintf("tstInt: %d iterations in %llu ns / %llu ticks. %llu ns / %#llu ticks per iteration. Min %llu ticks.\n", 147 RTPrintf("tstInt: SUPCallVMMR0 - %d iterations in %llu ns / %llu ticks. %llu ns / %#llu ticks per iteration. Min %llu ticks.\n", 148 i, NanoSecs, Ticks, NanoSecs / i, Ticks / i, MinTicks); 149 150 #ifndef VBOX_WITHOUT_IDT_PATCHING 151 /* 152 * The fast path. 153 */ 154 RTTimeNanoTS(); 155 StartTS = RTTimeNanoTS(); 156 StartTick = ASMReadTSC(); 157 MinTicks = UINT64_MAX; 158 for (i = 0; i < 1000000; i++) 159 { 160 uint64_t OneStartTick = ASMReadTSC(); 161 rc = SUPCallVMMR0Fast(pVMR0, VMMR0_DO_NOP); 162 uint64_t Ticks = ASMReadTSC() - OneStartTick; 163 if (Ticks < MinTicks) 164 MinTicks = Ticks; 165 166 if (RT_UNLIKELY(rc != VINF_SUCCESS)) 167 { 168 RTPrintf("tstInt: SUPCallVMMR0 -> rc=%Vrc i=%d Expected VINF_SUCCESS!\n", rc, i); 169 rcRet++; 170 break; 171 } 172 } 173 Ticks = ASMReadTSC() - StartTick; 174 NanoSecs = RTTimeNanoTS() - StartTS; 175 176 RTPrintf("tstInt: SUPCallVMMR0Fast - %d iterations in %llu ns / %llu ticks. %llu ns / %#llu ticks per iteration. Min %llu ticks.\n", 177 i, NanoSecs, Ticks, NanoSecs / i, Ticks / i, MinTicks); 178 #endif /* !VBOX_WITHOUT_IDT_PATCHING */ 179 180 /* 181 * The ordinary path. 182 */ 183 RTTimeNanoTS(); 184 StartTS = RTTimeNanoTS(); 185 StartTick = ASMReadTSC(); 186 MinTicks = UINT64_MAX; 187 for (i = 0; i < 1000000; i++) 188 { 189 uint64_t OneStartTick = ASMReadTSC(); 190 rc = SUPCallVMMR0Ex(pVMR0, VMMR0_DO_NOP, 0, NULL); 191 uint64_t Ticks = ASMReadTSC() - OneStartTick; 192 if (Ticks < MinTicks) 193 MinTicks = Ticks; 194 195 if (RT_UNLIKELY(rc != VINF_SUCCESS)) 196 { 197 RTPrintf("tstInt: SUPCallVMMR0 -> rc=%Vrc i=%d Expected VINF_SUCCESS!\n", rc, i); 198 rcRet++; 199 break; 200 } 201 } 202 Ticks = ASMReadTSC() - StartTick; 203 NanoSecs = RTTimeNanoTS() - StartTS; 204 205 RTPrintf("tstInt: SUPCallVMMR0Ex - %d iterations in %llu ns / %llu ticks. %llu ns / %#llu ticks per iteration. Min %llu ticks.\n", 151 206 i, NanoSecs, Ticks, NanoSecs / i, Ticks / i, MinTicks); 152 207 } -
trunk/src/VBox/HostDrivers/Support/win/SUPDrv-win.cpp
r4800 r4811 271 271 PSUPDRVSESSION pSession = (PSUPDRVSESSION)pStack->FileObject->FsContext; 272 272 273 #ifdef VBOX_WITHOUT_IDT_PATCHING274 273 /* 275 274 * Deal with the two high-speed IOCtl that takes it's arguments from … … 298 297 return rcNt; 299 298 } 300 #endif /* VBOX_WITHOUT_IDT_PATCHING */301 299 302 300 return VBoxDrvNtDeviceControlSlow(pDevExt, pSession, pIrp, pStack); -
trunk/src/VBox/HostDrivers/Support/win/SUPLib-win.cpp
r4804 r4811 624 624 625 625 626 #ifdef VBOX_WITHOUT_IDT_PATCHING627 626 int suplibOSIOCtlFast(uintptr_t uFunction) 628 627 { … … 636 635 return suplibConvertWin32Err(GetLastError()); 637 636 } 638 #endif639 637 640 638 -
trunk/src/VBox/VMM/HWACCM.cpp
r4789 r4811 423 423 memset(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap, 0x0, sizeof(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap)); 424 424 425 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, NULL, 0);425 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, 0, NULL); 426 426 AssertRC(rc); 427 427 if (rc == VINF_SUCCESS) … … 457 457 pVM->hwaccm.s.fInitialized = true; 458 458 459 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, NULL, 0);459 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, 0, NULL); 460 460 AssertRC(rc); 461 461 if (rc == VINF_SUCCESS) -
trunk/src/VBox/VMM/PGMPhys.cpp
r4738 r4811 786 786 typedef struct GMMMAPUNMAPCHUNKREQ 787 787 { 788 /** The header. */ 789 SUPVMMR0REQHDR Hdr; 788 790 /** The chunk to map, UINT32_MAX if unmap only. (IN) */ 789 uint32_t idChunkMap;791 uint32_t idChunkMap; 790 792 /** The chunk to unmap, UINT32_MAX if map only. (IN) */ 791 uint32_t idChunkUnmap;793 uint32_t idChunkUnmap; 792 794 /** Where the mapping address is returned. (OUT) */ 793 RTR3PTR pvR3;795 RTR3PTR pvR3; 794 796 } GMMMAPUNMAPCHUNKREQ; 795 797 … … 833 835 */ 834 836 GMMMAPUNMAPCHUNKREQ Req; 837 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC; 838 Req.Hdr.cbReq = sizeof(Req); 835 839 Req.pvR3 = NULL; 836 840 Req.idChunkMap = idChunk; … … 838 842 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax) 839 843 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM); 840 /** @todo SUPCallVMMR0Ex needs to support in+out or similar. */ 841 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, &Req, sizeof(Req)); 844 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr); 842 845 if (VBOX_SUCCESS(rc)) 843 846 { … … 888 891 /** 889 892 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal. 890 * 893 * 891 894 * @returns see pgmR3PhysChunkMap. 892 895 * @param pVM The VM handle. … … 902 905 /** 903 906 * Invalidates the TLB for the ring-3 mapping cache. 904 * 907 * 905 908 * @param pVM The VM handle. 906 909 */ … … 919 922 /** 920 923 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES. 921 * 924 * 922 925 * @returns The following VBox status codes. 923 926 * @retval VINF_SUCCESS on success. FF cleared. 924 927 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case. 925 * 928 * 926 929 * @param pVM The VM handle. 927 930 */ … … 929 932 { 930 933 pgmLock(pVM); 931 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, NULL, 0);934 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL); 932 935 if (rc == VERR_GMM_SEED_ME) 933 936 { … … 935 938 rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk); 936 939 if (VBOX_SUCCESS(rc)) 937 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_SEED_CHUNK, pvChunk, 0);940 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL); 938 941 if (VBOX_FAILURE(rc)) 939 942 { -
trunk/src/VBox/VMM/VMM.cpp
r4799 r4811 375 375 return rc; 376 376 377 #ifdef VBOX_WITHOUT_IDT_PATCHING378 377 /* 379 378 * Register the Ring-0 VM handle with the session for fast ioctl calls. … … 382 381 if (VBOX_FAILURE(rc)) 383 382 return rc; 384 #endif385 383 386 384 /* … … 1959 1957 rc = VERR_GENERAL_FAILURE; 1960 1958 #else 1961 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_RUN, NULL, 0); 1959 //rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_RUN, NULL, 0); 1960 # if !defined(RT_OS_LINUX) /* Alternative for debugging - currently untested on linux. */ 1961 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HWACC_RUN); 1962 # else 1963 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_HWACC_RUN, NULL); 1964 # endif 1962 1965 #endif 1963 1966 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); … … 2189 2192 break; 2190 2193 } 2191 #endif 2194 #endif 2192 2195 2193 2196 /* -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r4738 r4811 128 128 /** 129 129 * Invalidates the GC page mapping TLB. 130 * 130 * 131 131 * @param pVM The VM handle. 132 132 */ … … 140 140 /** 141 141 * Invalidates the ring-0 page mapping TLB. 142 * 142 * 143 143 * @param pVM The VM handle. 144 144 */ … … 151 151 /** 152 152 * Invalidates the ring-3 page mapping TLB. 153 * 153 * 154 154 * @param pVM The VM handle. 155 155 */ … … 171 171 /** 172 172 * Makes sure that there is at least one handy page ready for use. 173 * 173 * 174 174 * This will also take the appropriate actions when reaching water-marks. 175 * 175 * 176 176 * @returns The following VBox status codes. 177 177 * @retval VINF_SUCCESS on success. 178 178 * @retval VERR_EM_NO_MEMORY if we're really out of memory. 179 * 179 * 180 180 * @param pVM The VM handle. 181 * 181 * 182 182 * @remarks Must be called from within the PGM critical section. It may 183 183 * nip back to ring-3/0 in some cases. … … 194 194 * - 50%: Try allocate pages; on failure we'll force REM to quite ASAP. 195 195 * 196 * The basic idea is that we should be able to get out of any situation with 196 * The basic idea is that we should be able to get out of any situation with 197 197 * only 50% of handy pages remaining. 198 198 * 199 * At the moment we'll not adjust the number of handy pages relative to the 199 * At the moment we'll not adjust the number of handy pages relative to the 200 200 * actual VM RAM committment, that's too much work for now. 201 201 */ … … 204 204 #ifdef IN_RING3 205 205 || pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 2 /* 50% */ 206 #endif 206 #endif 207 207 ) 208 208 { 209 209 Log(("PGM: cHandyPages=%u out of %u -> allocate more\n", pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages))); 210 210 #ifdef IN_RING3 211 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, NULL, 0);211 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL); 212 212 #elif defined(IN_RING0) 213 213 /** @todo call PGMR0PhysAllocateHandyPages directly - need to make sure we can call kernel code first and deal with the seeding fallback. */ … … 215 215 #else 216 216 int rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0); 217 #endif 217 #endif 218 218 if (RT_UNLIKELY(rc != VINF_SUCCESS)) 219 219 { … … 242 242 VM_FF_SET(pVM, VM_FF_TO_R3); 243 243 } 244 #endif 244 #endif 245 245 } 246 246 … … 255 255 * @retval VINF_SUCCESS on success, pPage is modified. 256 256 * @retval VERR_EM_NO_MEMORY if we're totally out of memory. 257 * 257 * 258 258 * @todo Propagate VERR_EM_NO_MEMORY up the call tree. 259 259 * 260 260 * @param pVM The VM address. 261 * @param pPage The physical page tracking structure. This will 262 * be modified on success. 261 * @param pPage The physical page tracking structure. This will 262 * be modified on success. 263 263 * @param GCPhys The address of the page. 264 264 * 265 265 * @remarks Must be called from within the PGM critical section. It may 266 266 * nip back to ring-3/0 in some cases. 267 * 268 * @remarks This function shouldn't really fail, however if it does 269 * it probably means we've screwed up the size of the amount 267 * 268 * @remarks This function shouldn't really fail, however if it does 269 * it probably means we've screwed up the size of the amount 270 270 * and/or the low-water mark of handy pages. Or, that some 271 271 * device I/O is causing a lot of pages to be allocated while … … 309 309 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES); 310 310 311 Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage), 311 Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage), 312 312 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys)); 313 313 STAM_COUNTER_INC(&pVM->pgm.s.StatPageReplaceShared); … … 523 523 * scarse resources (R0 and GC) in the mapping cache. When you're done 524 524 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it. 525 * 526 * This API will assume your intention is to write to the page, and will 527 * therefore replace shared and zero pages. If you do not intend to modify 525 * 526 * This API will assume your intention is to write to the page, and will 527 * therefore replace shared and zero pages. If you do not intend to modify 528 528 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API. 529 529 * … … 639 639 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it. 640 640 * 641 * This API will assume your intention is to write to the page, and will 642 * therefore replace shared and zero pages. If you do not intend to modify 641 * This API will assume your intention is to write to the page, and will 642 * therefore replace shared and zero pages. If you do not intend to modify 643 643 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API. 644 644 * … … 704 704 /** 705 705 * Release the mapping of a guest page. 706 * 706 * 707 707 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly 708 708 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly. -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r4787 r4811 61 61 * networking. */ 62 62 #if defined(DEBUG_sandervl) /*|| defined(DEBUG_bird)*/ 63 # define DEBUG_NO_RING0_ASSERTIONS63 # define DEBUG_NO_RING0_ASSERTIONS 64 64 #endif 65 65 #ifdef DEBUG_NO_RING0_ASSERTIONS … … 412 412 413 413 414 /** 415 * The Ring 0 entry point, called by the support library (SUP). 414 415 /** 416 * The Ring 0 entry point, called by the interrupt gate. 416 417 * 417 418 * @returns VBox status code. 418 * @param pVM The VM to operate on. 419 * @param uOperation Which operation to execute. (VMMR0OPERATION) 420 * @param pvArg Argument to the operation. 421 */ 422 VMMR0DECL(int) VMMR0Entry(PVM pVM, unsigned /* make me an enum */ uOperation, void *pvArg) 423 { 424 switch (uOperation) 425 { 419 * @param pVM The VM to operate on. 420 * @param enmOperation Which operation to execute. 421 * @param pvArg Argument to the operation. 422 * @remarks Assume called with interrupts disabled. 423 */ 424 VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg) 425 { 426 switch (enmOperation) 427 { 428 #ifndef VBOX_WITHOUT_IDT_PATCHING 426 429 /* 427 430 * Switch to GC. … … 444 447 445 448 /* 446 * Check if there is an exit R0 action associated with the return code. 449 * We'll let TRPM change the stack frame so our return is different. 450 * Just keep in mind that after the call, things have changed! 447 451 */ 448 switch (rc) 452 if ( rc == VINF_EM_RAW_INTERRUPT 453 || rc == VINF_EM_RAW_INTERRUPT_HYPER) 449 454 { 450 455 /* 451 * Default - no action, just return. 456 * Don't trust the compiler to get this right. 457 * gcc -fomit-frame-pointer screws up big time here. This works fine in 64-bit 458 * mode too because we push the arguments on the stack in the IDT patch code. 452 459 */ 453 default:454 return rc;455 456 /*457 * We'll let TRPM change the stack frame so our return is different.458 * Just keep in mind that after the call, things have changed!459 */460 case VINF_EM_RAW_INTERRUPT:461 case VINF_EM_RAW_INTERRUPT_HYPER:462 {463 #ifdef VBOX_WITHOUT_IDT_PATCHING464 TRPMR0DispatchHostInterrupt(pVM);465 #else /* !VBOX_WITHOUT_IDT_PATCHING */466 /*467 * Don't trust the compiler to get this right.468 * gcc -fomit-frame-pointer screws up big time here. This works fine in 64-bit469 * mode too because we push the arguments on the stack in the IDT patch code.470 */471 460 # if defined(__GNUC__) 472 461 void *pvRet = (uint8_t *)__builtin_frame_address(0) + sizeof(void *); 473 462 # elif defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */ 474 463 void *pvRet = (uint8_t *)_AddressOfReturnAddress(); 475 464 # elif defined(RT_ARCH_X86) 476 465 void *pvRet = (uint8_t *)&pVM - sizeof(pVM); 477 466 # else 478 467 # error "huh?" 479 468 # endif 480 if ( ((uintptr_t *)pvRet)[1] == (uintptr_t)pVM 481 && ((uintptr_t *)pvRet)[2] == (uintptr_t)uOperation 482 && ((uintptr_t *)pvRet)[3] == (uintptr_t)pvArg) 483 TRPMR0SetupInterruptDispatcherFrame(pVM, pvRet); 484 else 469 if ( ((uintptr_t *)pvRet)[1] == (uintptr_t)pVM 470 && ((uintptr_t *)pvRet)[2] == (uintptr_t)enmOperation 471 && ((uintptr_t *)pvRet)[3] == (uintptr_t)pvArg) 472 TRPMR0SetupInterruptDispatcherFrame(pVM, pvRet); 473 else 474 { 475 # if defined(DEBUG) || defined(LOG_ENABLED) 476 static bool s_fHaveWarned = false; 477 if (!s_fHaveWarned) 485 478 { 486 # if defined(DEBUG) || defined(LOG_ENABLED) 487 static bool s_fHaveWarned = false; 488 if (!s_fHaveWarned) 489 { 490 s_fHaveWarned = true; 491 //RTLogPrintf("VMMR0.r0: The compiler can't find the stack frame!\n"); -- @todo export me! 492 RTLogComPrintf("VMMR0.r0: The compiler can't find the stack frame!\n"); 493 } 479 s_fHaveWarned = true; 480 //RTLogPrintf("VMMR0.r0: The compiler can't find the stack frame!\n"); -- @todo export me! 481 RTLogComPrintf("VMMR0.r0: The compiler can't find the stack frame!\n"); 482 } 494 483 # endif 495 TRPMR0DispatchHostInterrupt(pVM); 496 } 497 #endif /* !VBOX_WITHOUT_IDT_PATCHING */ 498 return rc; 484 TRPMR0DispatchHostInterrupt(pVM); 499 485 } 500 486 } 501 /* Won't get here! */ 502 break; 487 return rc; 503 488 } 504 489 … … 508 493 case VMMR0_DO_HWACC_RUN: 509 494 { 510 int rc;511 RTCCUINTREG fFlags;512 513 495 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC); 514 fFlags = ASMIntDisableFlags(); 515 rc = HWACCMR0Enable(pVM); 496 int rc = HWACCMR0Enable(pVM); 516 497 if (VBOX_SUCCESS(rc)) 517 498 { … … 527 508 } 528 509 pVM->vmm.s.iLastGCRc = rc; 529 ASMSetFlags(fFlags);530 510 531 511 #ifdef VBOX_WITH_STATISTICS … … 543 523 RTCCUINTREG fFlags = ASMIntDisableFlags(); 544 524 int rc = VMMR0Init(pVM, (unsigned)(uintptr_t)pvArg); 525 ASMSetFlags(fFlags); 526 return rc; 527 } 528 529 /* 530 * Terminate the R0 part of a VM instance. 531 */ 532 case VMMR0_DO_VMMR0_TERM: 533 { 534 RTCCUINTREG fFlags = ASMIntDisableFlags(); 535 int rc = VMMR0Term(pVM); 536 ASMSetFlags(fFlags); 537 return rc; 538 } 539 540 /* 541 * Setup the hardware accelerated raw-mode session. 542 */ 543 case VMMR0_DO_HWACC_SETUP_VM: 544 { 545 RTCCUINTREG fFlags = ASMIntDisableFlags(); 546 int rc = HWACCMR0SetupVMX(pVM); 547 ASMSetFlags(fFlags); 548 return rc; 549 } 550 551 /* 552 * Switch to GC to execute Hypervisor function. 553 */ 554 case VMMR0_DO_CALL_HYPERVISOR: 555 { 556 /* Safety precaution as VMX disables the switcher. */ 557 Assert(!pVM->vmm.s.fSwitcherDisabled); 558 if (pVM->vmm.s.fSwitcherDisabled) 559 return VERR_NOT_SUPPORTED; 560 561 RTCCUINTREG fFlags = ASMIntDisableFlags(); 562 int rc = pVM->vmm.s.pfnR0HostToGuest(pVM); 563 ASMSetFlags(fFlags); 564 return rc; 565 } 566 567 /* 568 * For profiling. 569 */ 570 case VMMR0_DO_NOP: 571 return VINF_SUCCESS; 572 #endif /* !VBOX_WITHOUT_IDT_PATCHING */ 573 574 default: 575 /* 576 * We're returning VERR_NOT_SUPPORT here so we've got something else 577 * than -1 which the interrupt gate glue code might return. 578 */ 579 Log(("operation %#x is not supported\n", enmOperation)); 580 return VERR_NOT_SUPPORTED; 581 } 582 } 583 584 585 /** 586 * The Ring 0 entry point, called by the fast-ioctl path. 587 * 588 * @returns VBox status code. 589 * @param pVM The VM to operate on. 590 * @param enmOperation Which operation to execute. 591 * @remarks Assume called with interrupts disabled. 592 */ 593 VMMR0DECL(int) VMMR0EntryFast(PVM pVM, VMMR0OPERATION enmOperation) 594 { 595 switch (enmOperation) 596 { 597 /* 598 * Switch to GC and run guest raw mode code. 599 */ 600 case VMMR0_DO_RAW_RUN: 601 { 602 /* Safety precaution as VMX disables the switcher. */ 603 if (RT_LIKELY(!pVM->vmm.s.fSwitcherDisabled)) 604 { 605 int rc = pVM->vmm.s.pfnR0HostToGuest(pVM); 606 pVM->vmm.s.iLastGCRc = rc; 607 608 if ( rc == VINF_EM_RAW_INTERRUPT 609 || rc == VINF_EM_RAW_INTERRUPT_HYPER) 610 TRPMR0DispatchHostInterrupt(pVM); 611 612 #ifdef VBOX_WITH_STATISTICS 613 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC); 614 vmmR0RecordRC(pVM, rc); 615 #endif 616 return rc; 617 } 618 619 Assert(!pVM->vmm.s.fSwitcherDisabled); 620 return VERR_NOT_SUPPORTED; 621 } 622 623 /* 624 * Run guest code using the available hardware acceleration technology. 625 */ 626 case VMMR0_DO_HWACC_RUN: 627 { 628 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC); 629 int rc = HWACCMR0Enable(pVM); 630 if (VBOX_SUCCESS(rc)) 631 { 632 #ifdef DEBUG_NO_RING0_ASSERTIONS 633 g_pVMAssert = pVM; 634 #endif 635 rc = vmmR0CallHostSetJmp(&pVM->vmm.s.CallHostR0JmpBuf, HWACCMR0RunGuestCode, pVM); /* this may resume code. */ 636 #ifdef DEBUG_NO_RING0_ASSERTIONS 637 g_pVMAssert = NULL; 638 #endif 639 int rc2 = HWACCMR0Disable(pVM); 640 AssertRC(rc2); 641 } 642 pVM->vmm.s.iLastGCRc = rc; 643 644 #ifdef VBOX_WITH_STATISTICS 645 vmmR0RecordRC(pVM, rc); 646 #endif 647 /* No special action required for external interrupts, just return. */ 648 return rc; 649 } 650 651 /* 652 * For profiling. 653 */ 654 case VMMR0_DO_NOP: 655 return VINF_SUCCESS; 656 657 /* 658 * Impossible. 659 */ 660 default: 661 AssertMsgFailed(("%#x\n", enmOperation)); 662 return VERR_NOT_SUPPORTED; 663 } 664 } 665 666 667 /** 668 * The Ring 0 entry point, called by the support library (SUP). 669 * 670 * @returns VBox status code. 671 * @param pVM The VM to operate on. 672 * @param enmOperation Which operation to execute. 673 * @param pReq This points to a SUPVMMR0REQHDR packet. Optional. 674 * @param u64Arg Some simple constant argument. 675 * @remarks Assume called with interrupts _enabled_. 676 */ 677 VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg) 678 { 679 switch (enmOperation) 680 { 681 #if 1 /* disable later? */ 682 /* 683 * Alternative to the fast path, all we need to do is disable interrupts. 684 */ 685 case VMMR0_DO_RAW_RUN: 686 case VMMR0_DO_HWACC_RUN: 687 { 688 RTCCUINTREG fFlags = ASMIntDisableFlags(); 689 int rc = VMMR0EntryFast(pVM, enmOperation); 690 ASMSetFlags(fFlags); 691 return rc; 692 } 693 #endif 694 695 /* 696 * Initialize the R0 part of a VM instance. 697 */ 698 case VMMR0_DO_VMMR0_INIT: 699 { 700 RTCCUINTREG fFlags = ASMIntDisableFlags(); 701 int rc = VMMR0Init(pVM, (unsigned)u64Arg); 545 702 ASMSetFlags(fFlags); 546 703 return rc; … … 603 760 case VMMR0_DO_GMM_SEED_CHUNK: 604 761 return GMMR0SeedChunk(pVM, (RTR3PTR)pvArg); 605 #endif 606 607 608 609 #if def VBOX_WITH_INTERNAL_NETWORKING762 #endif 763 764 765 766 #if 0//def VBOX_WITH_INTERNAL_NETWORKING - currently busted 610 767 /* 611 768 * Services. … … 635 792 * Unpack the arguments and call the service. 636 793 */ 637 switch ( uOperation)794 switch (enmOperation) 638 795 { 639 796 case VMMR0_DO_INTNET_OPEN: … … 698 855 * than -1 which the interrupt gate glue code might return. 699 856 */ 700 Log(("operation %#x is not supported\n", uOperation));857 Log(("operation %#x is not supported\n", enmOperation)); 701 858 return VERR_NOT_SUPPORTED; 702 859 } 703 860 } 861 862 704 863 705 864
Note:
See TracChangeset
for help on using the changeset viewer.