Changeset 75585 in vbox
- Timestamp:
- Nov 19, 2018 6:03:23 PM (6 years ago)
- Location:
- trunk
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/VMMDev.h
r75533 r75585 65 65 /** Port for generic request interface (relative offset). */ 66 66 #define VMMDEV_PORT_OFF_REQUEST 0 67 /** Port for requests that can be handled w/o going to ring-3 (relative offset). 68 * This works like VMMDevReq_AcknowledgeEvents when read. */ 69 #define VMMDEV_PORT_OFF_REQUEST_FAST 8 67 70 68 71 … … 529 532 AssertCompileSize(VMMDevReqHostVersion, 24+16); 530 533 531 /** @name VMMD evReqHostVersion::features534 /** @name VMMDEV_HVF_XXX - VMMDevReqHostVersion::features 532 535 * @{ */ 533 536 /** Physical page lists are supported by HGCM. */ 534 #define VMMDEV_HVF_HGCM_PHYS_PAGE_LIST RT_BIT(0) 537 #define VMMDEV_HVF_HGCM_PHYS_PAGE_LIST RT_BIT_32(0) 538 /** HGCM supports the embedded buffer parameter type. */ 539 #define VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS RT_BIT_32(1) 540 /** VMMDev supports fast IRQ acknowledgements. */ 541 #define VMMDEV_HVF_FAST_IRQ_ACK RT_BIT_32(31) 535 542 /** @} */ 536 543 -
trunk/src/VBox/Devices/VMMDev/VMMDev.cpp
r75537 r75585 151 151 152 152 #ifndef VBOX_DEVICE_STRUCT_TESTCASE 153 #ifdef IN_RING3 153 154 154 155 /* -=-=-=-=- Misc Helpers -=-=-=-=- */ … … 229 230 } 230 231 232 #endif /* IN_RING3 */ 233 234 231 235 /** 232 236 * Sets the IRQ (raise it or lower it) for 1.03 additions. … … 241 245 { 242 246 /* Filter unsupported events */ 243 uint32_t fEvents = pThis->u32HostEventFlags & pThis-> pVMMDevRAMR3->V.V1_03.u32GuestEventMask;247 uint32_t fEvents = pThis->u32HostEventFlags & pThis->CTX_SUFF(pVMMDevRAM)->V.V1_03.u32GuestEventMask; 244 248 245 249 Log(("vmmdevSetIRQ: fEvents=%#010x, u32HostEventFlags=%#010x, u32GuestEventMask=%#010x.\n", 246 fEvents, pThis->u32HostEventFlags, pThis-> pVMMDevRAMR3->V.V1_03.u32GuestEventMask));250 fEvents, pThis->u32HostEventFlags, pThis->CTX_SUFF(pVMMDevRAM)->V.V1_03.u32GuestEventMask)); 247 251 248 252 /* Move event flags to VMMDev RAM */ 249 pThis-> pVMMDevRAMR3->V.V1_03.u32HostEvents = fEvents;253 pThis->CTX_SUFF(pVMMDevRAM)->V.V1_03.u32HostEvents = fEvents; 250 254 251 255 uint32_t uIRQLevel = 0; … … 260 264 /* Set IRQ level for pin 0 (see NoWait comment in vmmdevMaybeSetIRQ). */ 261 265 /** @todo make IRQ pin configurable, at least a symbolic constant */ 262 PDMDevHlpPCISetIrqNoWait(pThis-> pDevIns, 0, uIRQLevel);266 PDMDevHlpPCISetIrqNoWait(pThis->CTX_SUFF(pDevIns), 0, uIRQLevel); 263 267 Log(("vmmdevSetIRQ: IRQ set %d\n", uIRQLevel)); 264 268 } … … 266 270 Log(("vmmdevSetIRQ: IRQ is not generated, guest has not yet reported to us.\n")); 267 271 } 272 273 274 #ifdef IN_RING3 268 275 269 276 /** … … 287 294 */ 288 295 pThis->pVMMDevRAMR3->V.V1_04.fHaveEvents = true; 289 PDMDevHlpPCISetIrqNoWait(pThis->pDevIns , 0, 1);296 PDMDevHlpPCISetIrqNoWait(pThis->pDevInsR3, 0, 1); 290 297 Log3(("vmmdevMaybeSetIRQ: IRQ set.\n")); 291 298 } … … 356 363 * Only notify the VM when it's running. 357 364 */ 358 VMSTATE enmVMState = PDMDevHlpVMState(pThis->pDevIns );365 VMSTATE enmVMState = PDMDevHlpVMState(pThis->pDevInsR3); 359 366 /** @todo r=bird: Shouldn't there be more states here? Wouldn't we drop 360 367 * notifications now when we're in the process of suspending or … … 443 450 444 451 /* Clear our IRQ in case it was high for whatever reason. */ 445 PDMDevHlpPCISetIrqNoWait(pThis->pDevIns , 0, 0);452 PDMDevHlpPCISetIrqNoWait(pThis->pDevInsR3, 0, 0); 446 453 447 454 return VINF_SUCCESS; … … 563 570 { 564 571 VMMDevReqNtBugCheck const *pReq = (VMMDevReqNtBugCheck const *)pReqHdr; 565 DBGFR3ReportBugCheck(PDMDevHlpGetVM(pThis->pDevIns ), PDMDevHlpGetVMCPU(pThis->pDevIns), DBGFEVENT_BSOD_VMMDEV,572 DBGFR3ReportBugCheck(PDMDevHlpGetVM(pThis->pDevInsR3), PDMDevHlpGetVMCPU(pThis->pDevInsR3), DBGFEVENT_BSOD_VMMDEV, 566 573 pReq->uBugCheck, pReq->auParameters[0], pReq->auParameters[1], 567 574 pReq->auParameters[2], pReq->auParameters[3]); … … 570 577 { 571 578 LogRel(("VMMDev: NT BugCheck w/o data.\n")); 572 DBGFR3ReportBugCheck(PDMDevHlpGetVM(pThis->pDevIns ), PDMDevHlpGetVMCPU(pThis->pDevIns), DBGFEVENT_BSOD_VMMDEV,579 DBGFR3ReportBugCheck(PDMDevHlpGetVM(pThis->pDevInsR3), PDMDevHlpGetVMCPU(pThis->pDevInsR3), DBGFEVENT_BSOD_VMMDEV, 573 580 0, 0, 0, 0, 0); 574 581 } … … 724 731 725 732 /* Clear our IRQ in case it was high for whatever reason. */ 726 PDMDevHlpPCISetIrqNoWait(pThis->pDevIns , 0, 0);733 PDMDevHlpPCISetIrqNoWait(pThis->pDevInsR3, 0, 0); 727 734 728 735 return VINF_SUCCESS; … … 1162 1169 { 1163 1170 RTTIMESPEC now; 1164 pReq->time = RTTimeSpecGetMilli(PDMDevHlpTMUtcNow(pThis->pDevIns , &now));1171 pReq->time = RTTimeSpecGetMilli(PDMDevHlpTMUtcNow(pThis->pDevInsR3, &now)); 1165 1172 return VINF_SUCCESS; 1166 1173 } … … 1181 1188 AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER); 1182 1189 1183 return PGMR3MappingsSize(PDMDevHlpGetVM(pThis->pDevIns ), &pReq->hypervisorSize);1190 return PGMR3MappingsSize(PDMDevHlpGetVM(pThis->pDevInsR3), &pReq->hypervisorSize); 1184 1191 } 1185 1192 … … 1198 1205 1199 1206 int rc; 1200 PVM pVM = PDMDevHlpGetVM(pThis->pDevIns );1207 PVM pVM = PDMDevHlpGetVM(pThis->pDevInsR3); 1201 1208 if (pReq->hypervisorStart == 0) 1202 1209 rc = PGMR3MappingsUnfix(pVM); … … 1232 1239 AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER); 1233 1240 1234 return VMMR3RegisterPatchMemory(PDMDevHlpGetVM(pThis->pDevIns ), pReq->pPatchMem, pReq->cbPatchMem);1241 return VMMR3RegisterPatchMemory(PDMDevHlpGetVM(pThis->pDevInsR3), pReq->pPatchMem, pReq->cbPatchMem); 1235 1242 } 1236 1243 … … 1248 1255 AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER); 1249 1256 1250 return VMMR3DeregisterPatchMemory(PDMDevHlpGetVM(pThis->pDevIns ), pReq->pPatchMem, pReq->cbPatchMem);1257 return VMMR3DeregisterPatchMemory(PDMDevHlpGetVM(pThis->pDevInsR3), pReq->pPatchMem, pReq->cbPatchMem); 1251 1258 } 1252 1259 … … 1269 1276 { 1270 1277 LogRel(("VMMDev: Guest requests the VM to be suspended (paused)\n")); 1271 return PDMDevHlpVMSuspend(pThis->pDevIns );1278 return PDMDevHlpVMSuspend(pThis->pDevInsR3); 1272 1279 } 1273 1280 … … 1275 1282 { 1276 1283 LogRel(("VMMDev: Guest requests the VM to be turned off\n")); 1277 return PDMDevHlpVMPowerOff(pThis->pDevIns );1284 return PDMDevHlpVMPowerOff(pThis->pDevInsR3); 1278 1285 } 1279 1286 … … 1283 1290 { 1284 1291 LogRel(("VMMDev: Guest requests the VM to be saved and powered off\n")); 1285 return PDMDevHlpVMSuspendSaveAndPowerOff(pThis->pDevIns );1292 return PDMDevHlpVMSuspendSaveAndPowerOff(pThis->pDevInsR3); 1286 1293 } 1287 1294 LogRel(("VMMDev: Guest requests the VM to be saved and powered off, declined\n")); … … 1663 1670 } 1664 1671 1672 #endif /* IN_RING3 */ 1673 1665 1674 1666 1675 /** … … 1675 1684 VMMDevEvents *pReq = (VMMDevEvents *)pReqHdr; 1676 1685 AssertMsgReturn(pReq->header.size == sizeof(*pReq), ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER); 1686 STAM_REL_COUNTER_INC(&pThis->StatSlowIrqAck); 1677 1687 1678 1688 if (!VMMDEV_INTERFACE_VERSION_IS_1_03(pThis)) 1679 1689 { 1690 /* 1691 * Note! This code is duplicated in vmmdevFastRequestIrqAck. 1692 */ 1680 1693 if (pThis->fNewGuestFilterMask) 1681 1694 { … … 1687 1700 1688 1701 pThis->u32HostEventFlags &= ~pThis->u32GuestFilterMask; 1689 pThis->pVMMDevRAMR3->V.V1_04.fHaveEvents = false; 1690 PDMDevHlpPCISetIrqNoWait(pThis->pDevIns, 0, 0); 1702 pThis->CTX_SUFF(pVMMDevRAM)->V.V1_04.fHaveEvents = false; 1703 1704 PDMDevHlpPCISetIrqNoWait(pThis->CTX_SUFF(pDevIns), 0, 0); 1691 1705 } 1692 1706 else … … 1695 1709 } 1696 1710 1711 1712 #ifdef IN_RING3 1697 1713 1698 1714 /** … … 2048 2064 2049 2065 Log(("VMMDevReq_ChangeMemBalloon\n")); 2050 int rc = PGMR3PhysChangeMemBalloon(PDMDevHlpGetVM(pThis->pDevIns ), !!pReq->fInflate, pReq->cPages, pReq->aPhysPage);2066 int rc = PGMR3PhysChangeMemBalloon(PDMDevHlpGetVM(pThis->pDevInsR3), !!pReq->fInflate, pReq->cPages, pReq->aPhysPage); 2051 2067 if (pReq->fInflate) 2052 2068 STAM_REL_U32_INC(&pThis->StatMemBalloonChunks); … … 2284 2300 pReq->build = RTBldCfgVersionBuild(); 2285 2301 pReq->revision = RTBldCfgRevision(); 2286 pReq->features = VMMDEV_HVF_HGCM_PHYS_PAGE_LIST ;2302 pReq->features = VMMDEV_HVF_HGCM_PHYS_PAGE_LIST | VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS | VMMDEV_HVF_FAST_IRQ_ACK; 2287 2303 return VINF_SUCCESS; 2288 2304 } … … 2406 2422 * Forward the request to the VMM. 2407 2423 */ 2408 return PGMR3SharedModuleRegister(PDMDevHlpGetVM(pThis->pDevIns ), pReq->enmGuestOS, pReq->szName, pReq->szVersion,2424 return PGMR3SharedModuleRegister(PDMDevHlpGetVM(pThis->pDevInsR3), pReq->enmGuestOS, pReq->szName, pReq->szVersion, 2409 2425 pReq->GCBaseAddr, pReq->cbModule, pReq->cRegions, pReq->aRegions); 2410 2426 } … … 2436 2452 * Forward the request to the VMM. 2437 2453 */ 2438 return PGMR3SharedModuleUnregister(PDMDevHlpGetVM(pThis->pDevIns ), pReq->szName, pReq->szVersion,2454 return PGMR3SharedModuleUnregister(PDMDevHlpGetVM(pThis->pDevInsR3), pReq->szName, pReq->szVersion, 2439 2455 pReq->GCBaseAddr, pReq->cbModule); 2440 2456 } … … 2452 2468 AssertMsgReturn(pReq->header.size == sizeof(VMMDevSharedModuleCheckRequest), 2453 2469 ("%u\n", pReq->header.size), VERR_INVALID_PARAMETER); 2454 return PGMR3SharedModuleCheckAll(PDMDevHlpGetVM(pThis->pDevIns ));2470 return PGMR3SharedModuleCheckAll(PDMDevHlpGetVM(pThis->pDevInsR3)); 2455 2471 } 2456 2472 … … 2490 2506 2491 2507 # ifdef DEBUG 2492 return PGMR3SharedModuleGetPageState(PDMDevHlpGetVM(pThis->pDevIns ), pReq->GCPtrPage, &pReq->fShared, &pReq->uPageFlags);2508 return PGMR3SharedModuleGetPageState(PDMDevHlpGetVM(pThis->pDevInsR3), pReq->GCPtrPage, &pReq->fShared, &pReq->uPageFlags); 2493 2509 # else 2494 2510 RT_NOREF1(pThis); … … 2550 2566 * Write the core file. 2551 2567 */ 2552 PUVM pUVM = PDMDevHlpGetUVM(pThis->pDevIns );2568 PUVM pUVM = PDMDevHlpGetUVM(pThis->pDevInsR3); 2553 2569 return DBGFR3CoreWrite(pUVM, szCorePath, true /*fReplaceFile*/); 2554 2570 } … … 2570 2586 { 2571 2587 int32_t rcReq = VINF_HGCM_ASYNC_EXECUTE; 2572 PDMDevHlpPhysWrite(pThis->pDevIns , GCPhysReqHdr + RT_UOFFSETOF(VMMDevRequestHeader, rc), &rcReq, sizeof(rcReq));2588 PDMDevHlpPhysWrite(pThis->pDevInsR3, GCPhysReqHdr + RT_UOFFSETOF(VMMDevRequestHeader, rc), &rcReq, sizeof(rcReq)); 2573 2589 } 2574 2590 } … … 2870 2886 2871 2887 /** 2872 * @callback_method_impl{FNIOMIOPORTOUT, Port I/O Handler for the generic2873 * 2888 * @callback_method_impl{FNIOMIOPORTOUT, 2889 * Port I/O write andler for the generic request interface.} 2874 2890 */ 2875 2891 static DECLCALLBACK(int) vmmdevRequestHandler(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT Port, uint32_t u32, unsigned cb) … … 2931 2947 * wasting time with the heap. Larger allocations goes to the heap, though. 2932 2948 */ 2933 VMCPUID iCpu = PDMDevHlpGetCurrentCpuId(p This->pDevIns);2949 VMCPUID iCpu = PDMDevHlpGetCurrentCpuId(pDevIns); 2934 2950 VMMDevRequestHeader *pRequestHeaderFree = NULL; 2935 2951 VMMDevRequestHeader *pRequestHeader = NULL; … … 3026 3042 } 3027 3043 3044 #endif /* IN_RING3 */ 3045 3046 3047 /** 3048 * @callback_method_impl{FNIOMIOPORTOUT, 3049 * Port I/O write handler for requests that can be handled w/o going to ring-3.} 3050 */ 3051 PDMBOTHCBDECL(int) vmmdevFastRequestHandler(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT Port, uint32_t u32, unsigned cb) 3052 { 3053 #ifndef IN_RING3 3054 # if 0 /* This functionality is offered through reading the port (vmmdevFastRequestIrqAck). Leaving it here for later. */ 3055 PVMMDEV pThis = (VMMDevState *)pvUser; 3056 Assert(PDMINS_2_DATA(pDevIns, PVMMDEV) == pThis); 3057 RT_NOREF2(Port, cb); 3058 3059 /* 3060 * We only process a limited set of requests here, reflecting the rest down 3061 * to ring-3. So, try read the whole request into a stack buffer and check 3062 * if we can handle it. 3063 */ 3064 union 3065 { 3066 VMMDevRequestHeader Hdr; 3067 VMMDevEvents Ack; 3068 } uReq; 3069 RT_ZERO(uReq); 3070 3071 VBOXSTRICTRC rcStrict; 3072 if (pThis->fu32AdditionsOk) 3073 { 3074 /* Read it into memory. */ 3075 uint32_t cbToRead = sizeof(uReq); /* (Adjust to stay within a page if we support more than ack requests.) */ 3076 rcStrict = PDMDevHlpPhysRead(pDevIns, u32, &uReq, cbToRead); 3077 if (rcStrict == VINF_SUCCESS) 3078 { 3079 /* 3080 * Validate the request and check that we want to handle it here. 3081 */ 3082 if ( uReq.Hdr.size >= sizeof(uReq.Hdr) 3083 && uReq.Hdr.version == VMMDEV_REQUEST_HEADER_VERSION 3084 && ( uReq.Hdr.requestType == VMMDevReq_AcknowledgeEvents 3085 && uReq.Hdr.size == sizeof(uReq.Ack) 3086 && cbToRead == sizeof(uReq.Ack) 3087 && pThis->CTX_SUFF(pVMMDevRAM) != NULL) 3088 ) 3089 { 3090 RT_UNTRUSTED_VALIDATED_FENCE(); 3091 3092 /* 3093 * Try grab the critical section. 3094 */ 3095 int rc2 = PDMCritSectEnter(&pThis->CritSect, VINF_IOM_R3_IOPORT_WRITE); 3096 if (rc2 == VINF_SUCCESS) 3097 { 3098 /* 3099 * Handle the request and write back the result to the guest. 3100 */ 3101 uReq.Hdr.rc = vmmdevReqHandler_AcknowledgeEvents(pThis, &uReq.Hdr); 3102 3103 rcStrict = PDMDevHlpPhysWrite(pDevIns, u32, &uReq, uReq.Hdr.size); 3104 PDMCritSectLeave(&pThis->CritSect); 3105 if (rcStrict == VINF_SUCCESS) 3106 { /* likely */ } 3107 else 3108 Log(("vmmdevFastRequestHandler: PDMDevHlpPhysWrite(%#RX32+rc,4) -> %Rrc (%RTbool)\n", 3109 u32, VBOXSTRICTRC_VAL(rcStrict), PGM_PHYS_RW_IS_SUCCESS(rcStrict) )); 3110 } 3111 else 3112 { 3113 Log(("vmmdevFastRequestHandler: PDMCritSectEnter -> %Rrc\n", rc2)); 3114 rcStrict = rc2; 3115 } 3116 } 3117 else 3118 { 3119 Log(("vmmdevFastRequestHandler: size=%#x version=%#x requestType=%d (pVMMDevRAM=%p) -> R3\n", 3120 uReq.Hdr.size, uReq.Hdr.version, uReq.Hdr.requestType, pThis->CTX_SUFF(pVMMDevRAM) )); 3121 rcStrict = VINF_IOM_R3_IOPORT_WRITE; 3122 } 3123 } 3124 else 3125 Log(("vmmdevFastRequestHandler: PDMDevHlpPhysRead(%#RX32,%#RX32) -> %Rrc\n", u32, cbToRead, VBOXSTRICTRC_VAL(rcStrict))); 3126 } 3127 else 3128 { 3129 Log(("vmmdevFastRequestHandler: additions nok-okay\n")); 3130 rcStrict = VINF_IOM_R3_IOPORT_WRITE; 3131 } 3132 3133 return VBOXSTRICTRC_VAL(rcStrict); 3134 # else 3135 RT_NOREF(pDevIns, pvUser, Port, u32, cb); 3136 return VINF_IOM_R3_IOPORT_WRITE; 3137 # endif 3138 3139 #else /* IN_RING3 */ 3140 return vmmdevRequestHandler(pDevIns, pvUser, Port, u32, cb); 3141 #endif /* IN_RING3 */ 3142 } 3143 3144 3145 /** 3146 * @callback_method_impl{FNIOMIOPORTIN, 3147 * Port I/O read handler for IRQ acknowledging and getting pending events (same 3148 * as VMMDevReq_AcknowledgeEvents, just faster).} 3149 */ 3150 PDMBOTHCBDECL(int) vmmdevFastRequestIrqAck(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT Port, uint32_t *pu32, unsigned cb) 3151 { 3152 PVMMDEV pThis = (VMMDevState *)pvUser; 3153 Assert(PDMINS_2_DATA(pDevIns, PVMMDEV) == pThis); 3154 RT_NOREF(Port); 3155 3156 /* Only 32-bit accesses. */ 3157 ASSERT_GUEST_MSG_RETURN(cb == sizeof(uint32_t), ("cb=%d\n", cb), VERR_IOM_IOPORT_UNUSED); 3158 3159 /* The VMMDev memory mapping might've failed, go to ring-3 in that case. */ 3160 VBOXSTRICTRC rcStrict; 3161 #ifndef IN_RING3 3162 if (pThis->CTX_SUFF(pVMMDevRAM) != NULL) 3163 #endif 3164 { 3165 /* Enter critical section and check that the additions has been properly 3166 initialized and that we're not in legacy v1.3 device mode. */ 3167 rcStrict = PDMCritSectEnter(&pThis->CritSect, VINF_IOM_R3_IOPORT_READ); 3168 if (rcStrict == VINF_SUCCESS) 3169 { 3170 if ( pThis->fu32AdditionsOk 3171 && !VMMDEV_INTERFACE_VERSION_IS_1_03(pThis)) 3172 { 3173 /* 3174 * Do the job. 3175 * 3176 * Note! This code is duplicated in vmmdevReqHandler_AcknowledgeEvents. 3177 */ 3178 STAM_REL_COUNTER_INC(&pThis->CTX_SUFF_Z(StatFastIrqAck)); 3179 3180 if (pThis->fNewGuestFilterMask) 3181 { 3182 pThis->fNewGuestFilterMask = false; 3183 pThis->u32GuestFilterMask = pThis->u32NewGuestFilterMask; 3184 } 3185 3186 *pu32 = pThis->u32HostEventFlags & pThis->u32GuestFilterMask; 3187 3188 pThis->u32HostEventFlags &= ~pThis->u32GuestFilterMask; 3189 pThis->CTX_SUFF(pVMMDevRAM)->V.V1_04.fHaveEvents = false; 3190 3191 PDMDevHlpPCISetIrqNoWait(pDevIns, 0, 0); 3192 } 3193 else 3194 { 3195 Log(("vmmdevFastRequestIrqAck: fu32AdditionsOk=%d interfaceVersion=%#x\n", pThis->fu32AdditionsOk, 3196 pThis->guestInfo.interfaceVersion)); 3197 *pu32 = UINT32_MAX; 3198 } 3199 3200 PDMCritSectLeave(&pThis->CritSect); 3201 } 3202 } 3203 #ifndef IN_RING3 3204 else 3205 rcStrict = VINF_IOM_R3_IOPORT_READ; 3206 #endif 3207 return VBOXSTRICTRC_VAL(rcStrict); 3208 } 3209 3210 3211 3212 #ifdef IN_RING3 3028 3213 3029 3214 /* -=-=-=-=-=- PCI Device -=-=-=-=-=- */ … … 3117 3302 int rc = PDMDevHlpIOPortRegister(pDevIns, (RTIOPORT)GCPhysAddress + VMMDEV_PORT_OFF_REQUEST, 1, 3118 3303 pThis, vmmdevRequestHandler, NULL, NULL, NULL, "VMMDev Request Handler"); 3119 AssertRC(rc); 3304 AssertLogRelRCReturn(rc, rc); 3305 3306 /* The fast one: */ 3307 rc = PDMDevHlpIOPortRegister(pDevIns, (RTIOPORT)GCPhysAddress + VMMDEV_PORT_OFF_REQUEST_FAST, 1, 3308 pThis, vmmdevFastRequestHandler, vmmdevFastRequestIrqAck, NULL, NULL, "VMMDev Fast R0/RC Requests"); 3309 AssertLogRelRCReturn(rc, rc); 3310 if (pThis->fRZEnabled) 3311 { 3312 rc = PDMDevHlpIOPortRegisterR0(pDevIns, (RTIOPORT)GCPhysAddress + VMMDEV_PORT_OFF_REQUEST_FAST, 1, 3313 PDMINS_2_DATA_R0PTR(pDevIns), "vmmdevFastRequestHandler", "vmmdevFastRequestIrqAck", 3314 NULL, NULL, "VMMDev Fast R0/RC Requests"); 3315 AssertLogRelRCReturn(rc, rc); 3316 rc = PDMDevHlpIOPortRegisterRC(pDevIns, (RTIOPORT)GCPhysAddress + VMMDEV_PORT_OFF_REQUEST_FAST, 1, 3317 PDMINS_2_DATA_RCPTR(pDevIns), "vmmdevFastRequestHandler", "vmmdevFastRequestIrqAck", 3318 NULL, NULL, "VMMDev Fast R0/RC Requests"); 3319 AssertLogRelRCReturn(rc, rc); 3320 } 3321 3120 3322 return rc; 3121 3323 } … … 4099 4301 static DECLCALLBACK(void) vmmdevRelocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta) 4100 4302 { 4101 NOREF(pDevIns); 4102 NOREF(offDelta); 4303 if (offDelta) 4304 { 4305 PVMMDEV pThis = PDMINS_2_DATA(pDevIns, PVMMDEV); 4306 LogFlow(("vmmdevRelocate: offDelta=%RGv\n", offDelta)); 4307 4308 if (pThis->pVMMDevRAMRC) 4309 pThis->pVMMDevRAMRC += offDelta; 4310 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns); 4311 } 4103 4312 } 4104 4313 … … 4164 4373 */ 4165 4374 /* Save PDM device instance data for future reference. */ 4166 pThis->pDevIns = pDevIns; 4375 pThis->pDevInsR3 = pDevIns; 4376 pThis->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns); 4377 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns); 4167 4378 4168 4379 /* PCI vendor, just a free bogus value */ … … 4379 4590 /* 4380 4591 * Allocate and initialize the MMIO2 memory. 4592 * 4593 * We map the first page into raw-mode and kernel contexts so we can handle 4594 * interrupt acknowledge requests more timely. 4381 4595 */ 4382 4596 rc = PDMDevHlpMMIO2Register(pDevIns, &pThis->PciDev, 1 /*iRegion*/, VMMDEV_RAM_SIZE, 0 /*fFlags*/, … … 4386 4600 N_("Failed to allocate %u bytes of memory for the VMM device"), VMMDEV_RAM_SIZE); 4387 4601 vmmdevInitRam(pThis); 4388 4602 if (pThis->fRZEnabled) 4603 { 4604 rc = PDMDevHlpMMIO2MapKernel(pDevIns, &pThis->PciDev, 1 /*iRegion*/, 0 /*off*/, PAGE_SIZE, "VMMDev", &pThis->pVMMDevRAMR0); 4605 if (RT_FAILURE(rc)) 4606 return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS, 4607 N_("Failed to map first page of the VMMDev ram into kernel space: %Rrc"), rc); 4608 4609 #ifdef VBOX_WITH_RAW_MODE 4610 rc = PDMDevHlpMMHyperMapMMIO2(pDevIns, &pThis->PciDev, 1 /*iRegion*/, 0 /*off*/, PAGE_SIZE, "VMMDev", &pThis->pVMMDevRAMRC); 4611 if (RT_FAILURE(rc)) 4612 return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS, 4613 N_("Failed to map first page of the VMMDev ram into raw-mode context: %Rrc"), rc); 4614 #endif 4615 } 4616 4617 /* 4618 * Allocate and initialize the MMIO2 heap. 4619 */ 4389 4620 if (pThis->fHeapEnabled) 4390 4621 { … … 4483 4714 * Statistics. 4484 4715 */ 4485 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMemBalloonChunks, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Memory balloon size", "/Devices/VMMDev/BalloonChunks"); 4716 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMemBalloonChunks, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 4717 "Memory balloon size", "/Devices/VMMDev/BalloonChunks"); 4718 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatFastIrqAckR3, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 4719 "Fast IRQ acknowledgments handled in ring-3.", "/Devices/VMMDev/FastIrqAckR3"); 4720 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatFastIrqAckRZ, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 4721 "Fast IRQ acknowledgments handled in ring-0 or raw-mode.", "/Devices/VMMDev/FastIrqAckRZ"); 4722 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatSlowIrqAck, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 4723 "Slow IRQ acknowledgments (old style).", "/Devices/VMMDev/SlowIrqAck"); 4724 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReqBufAllocs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 4725 "Times a larger request buffer was required.", "/Devices/VMMDev/LargeReqBufAllocs"); 4486 4726 #ifdef VBOX_WITH_HGCM 4487 4727 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatHgcmCmdArrival, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, … … 4494 4734 "Times the allocation cache could not be used.", "/HGCM/LargeCmdAllocs"); 4495 4735 #endif 4496 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReqBufAllocs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,4497 "Times a larger request buffer was required.", "/HGCM/LargeReqBufAllocs");4498 4736 4499 4737 /* … … 4560 4798 PDM_DEVREG_VERSION 4561 4799 }; 4800 #endif /* IN_RING3 */ 4562 4801 #endif /* !VBOX_DEVICE_STRUCT_TESTCASE */ 4563 -
trunk/src/VBox/Devices/VMMDev/VMMDevHGCM.cpp
r75555 r75585 328 328 if (pCmd->pvReqLocked) 329 329 { 330 PDMDevHlpPhysReleasePageMappingLock(pThis->pDevIns , &pCmd->ReqMapLock);330 PDMDevHlpPhysReleasePageMappingLock(pThis->pDevInsR3, &pCmd->ReqMapLock); 331 331 pCmd->pvReqLocked = NULL; 332 332 } … … 673 673 if (pGuestParm->enmType != VMMDevHGCMParmType_Embedded) 674 674 { 675 int rc = vmmdevHGCMGuestBufferRead(pThis->pDevIns , pv, cbData, &pGuestParm->u.ptr);675 int rc = vmmdevHGCMGuestBufferRead(pThis->pDevInsR3, pv, cbData, &pGuestParm->u.ptr); 676 676 ASSERT_GUEST_RETURN(RT_SUCCESS(rc), rc); 677 677 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); … … 872 872 */ 873 873 RTGCPHYS GCPhys; 874 int rc2 = PDMDevHlpPhysGCPtr2GCPhys(pThis->pDevIns , GCPtr, &GCPhys);874 int rc2 = PDMDevHlpPhysGCPtr2GCPhys(pThis->pDevInsR3, GCPtr, &GCPhys); 875 875 if (RT_FAILURE(rc2)) 876 876 GCPhys = NIL_RTGCPHYS; … … 1071 1071 pHeader->result = VINF_SUCCESS; 1072 1072 pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE; 1073 PDMDevHlpPhysWrite(pThis->pDevIns , GCPhys, pHeader, sizeof(*pHeader));1073 PDMDevHlpPhysWrite(pThis->pDevInsR3, GCPhys, pHeader, sizeof(*pHeader)); 1074 1074 } 1075 1075 vmmdevHGCMCmdFree(pThis, pCmd); … … 1212 1212 const void *pvSrc = pHostParm->u.pointer.addr; 1213 1213 uint32_t cbSrc = pHostParm->u.pointer.size; 1214 rc = vmmdevHGCMGuestBufferWrite(pThis->pDevIns , pPtr, pvSrc, cbSrc);1214 rc = vmmdevHGCMGuestBufferWrite(pThis->pDevInsR3, pPtr, pvSrc, cbSrc); 1215 1215 } 1216 1216 break; … … 1228 1228 memcpy((uint8_t *)pCmd->pvReqLocked + pPtr->offFirstPage, pvSrc, cbSrc); 1229 1229 else 1230 rc = PDMDevHlpPhysWrite(pThis->pDevIns , pGuestParm->u.ptr.GCPhysSinglePage, pvSrc, cbSrc);1230 rc = PDMDevHlpPhysWrite(pThis->pDevInsR3, pGuestParm->u.ptr.GCPhysSinglePage, pvSrc, cbSrc); 1231 1231 } 1232 1232 break; … … 1300 1300 * The request data is not be used for anything but checking the request type. 1301 1301 */ 1302 PDMDevHlpPhysRead(pThis->pDevIns , pCmd->GCPhys, pHeader, pCmd->cbRequest);1302 PDMDevHlpPhysRead(pThis->pDevInsR3, pCmd->GCPhys, pHeader, pCmd->cbRequest); 1303 1303 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 1304 1304 … … 1360 1360 1361 1361 /* First write back the request. */ 1362 PDMDevHlpPhysWrite(pThis->pDevIns , pCmd->GCPhys, pHeader, pCmd->cbRequest);1362 PDMDevHlpPhysWrite(pThis->pDevInsR3, pCmd->GCPhys, pHeader, pCmd->cbRequest); 1363 1363 1364 1364 /* Mark request as processed. */ … … 1366 1366 1367 1367 /* Second write the flags to mark the request as processed. */ 1368 PDMDevHlpPhysWrite(pThis->pDevIns , pCmd->GCPhys + RT_UOFFSETOF(VMMDevHGCMRequestHeader, fu32Flags),1368 PDMDevHlpPhysWrite(pThis->pDevInsR3, pCmd->GCPhys + RT_UOFFSETOF(VMMDevHGCMRequestHeader, fu32Flags), 1369 1369 &pHeader->fu32Flags, sizeof(pHeader->fu32Flags)); 1370 1370 … … 1491 1491 * efficient...? */ 1492 1492 /* Not safe to execute asynchronously; forward to EMT */ 1493 int rc = VMR3ReqCallVoidNoWait(PDMDevHlpGetVM(pThis->pDevIns ), VMCPUID_ANY,1493 int rc = VMR3ReqCallVoidNoWait(PDMDevHlpGetVM(pThis->pDevInsR3), VMCPUID_ANY, 1494 1494 (PFNRT)hgcmCompletedWorker, 3, pInterface, result, pCmd); 1495 1495 AssertRC(rc); … … 2141 2141 AssertBreakStmt(pReqHdr, vmmdevHGCMCmdFree(pThis, pCmd); rcFunc = VERR_NO_MEMORY); 2142 2142 2143 PDMDevHlpPhysRead(pThis->pDevIns , pCmd->GCPhys, pReqHdr, pCmd->cbRequest);2143 PDMDevHlpPhysRead(pThis->pDevInsR3, pCmd->GCPhys, pReqHdr, pCmd->cbRequest); 2144 2144 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 2145 2145 … … 2228 2228 2229 2229 /* Write back only the header. */ 2230 PDMDevHlpPhysWrite(pThis->pDevIns , pCmd->GCPhys, pReqHdr, sizeof(*pReqHdr));2230 PDMDevHlpPhysWrite(pThis->pDevInsR3, pCmd->GCPhys, pReqHdr, sizeof(*pReqHdr)); 2231 2231 2232 2232 VMMDevNotifyGuest(pThis, VMMDEV_EVENT_HGCM); -
trunk/src/VBox/Devices/VMMDev/VMMDevState.h
r75537 r75585 135 135 uint32_t fHostCursorRequested; 136 136 137 #if HC_ARCH_BITS == 32 137 138 /** Alignment padding. */ 138 139 uint32_t u32Alignment0; 139 140 /** Pointer to device instance. */ 141 PPDMDEVINSR3 pDevIns; 140 #endif 141 142 /** Pointer to device instance - RC pointer. */ 143 PPDMDEVINSRC pDevInsRC; 144 /** Pointer to device instance - R3 poitner. */ 145 PPDMDEVINSR3 pDevInsR3; 146 /** Pointer to device instance - R0 pointer. */ 147 PPDMDEVINSR0 pDevInsR0; 148 142 149 /** LUN\#0 + Status: VMMDev port base interface. */ 143 150 PDMIBASE IBase; … … 163 170 uint32_t u32Alignment2; 164 171 172 /** Statistics counter for slow IRQ ACK. */ 173 STAMCOUNTER StatSlowIrqAck; 174 /** Statistics counter for fast IRQ ACK - R3. */ 175 STAMCOUNTER StatFastIrqAckR3; 176 /** Statistics counter for fast IRQ ACK - R0 / RC. */ 177 STAMCOUNTER StatFastIrqAckRZ; 165 178 /** IRQ number assigned to the device */ 166 179 uint32_t irq; … … 179 192 180 193 /** GC physical address of VMMDev RAM area */ 181 RTGCPHYS32 GCPhysVMMDevRAM;194 RTGCPHYS32 GCPhysVMMDevRAM; 182 195 /** R3 pointer to VMMDev RAM area */ 183 R3PTRTYPE(VMMDevMemory *) pVMMDevRAMR3; 184 185 /** R3 pointer to VMMDev Heap RAM area 186 */ 196 R3PTRTYPE(VMMDevMemory *) pVMMDevRAMR3; 197 /** R0 pointer to VMMDev RAM area - first page only, could be NULL! */ 198 R0PTRTYPE(VMMDevMemory *) pVMMDevRAMR0; 199 /** R0 pointer to VMMDev RAM area - first page only, could be NULL! */ 200 RCPTRTYPE(VMMDevMemory *) pVMMDevRAMRC; 201 RTGCPTR RCPtrAlignment3b; 202 203 /** R3 pointer to VMMDev Heap RAM area. */ 187 204 R3PTRTYPE(VMMDevMemory *) pVMMDevHeapR3; 188 205 /** GC physical address of VMMDev Heap RAM area */ -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r73606 r75585 3877 3877 * Pass the request on to the support library/driver. 3878 3878 */ 3879 int rc = SUPR3PageMapKernel(pFirstRegMmio->pvR3, off, cb, 0, pR0Ptr); 3879 #if defined(RT_OS_WINDOWS) || defined(RT_OS_LINUX) || defined(RT_OS_OS2) /** @todo Fully implement RTR0MemObjMapKernelEx everywhere. */ 3880 AssertLogRelReturn(off == 0, VERR_NOT_SUPPORTED); 3881 AssertLogRelReturn(pFirstRegMmio->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK, VERR_NOT_SUPPORTED); 3882 int rc = SUPR3PageMapKernel(pFirstRegMmio->pvR3, 0 /*off*/, pFirstRegMmio->RamRange.cb, 0 /*fFlags*/, pR0Ptr); 3883 #else 3884 int rc = SUPR3PageMapKernel(pFirstRegMmio->pvR3, off, cb, 0 /*fFlags*/, pR0Ptr); 3885 #endif 3880 3886 3881 3887 return rc;
Note:
See TracChangeset
for help on using the changeset viewer.