- Timestamp:
- Jun 16, 2023 6:54:50 AM (18 months ago)
- Location:
- trunk
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/VMMDev.h
r98542 r100185 54 54 */ 55 55 56 /** The VMMDev assumed page size (not the real guest page size which might be different 57 * between guest and host, on ARM for example), 4KiB is the minimum supported by both 58 * ARM and x86 and was the one used back when the device supported x86 only. */ 59 #define VMMDEV_PAGE_SIZE _4K 60 /** The VMMDev assumed page shift. */ 61 #define VMMDEV_PAGE_SHIFT 12 62 /** The VMMDev assumed page offset mask. */ 63 #define VMMDEV_PAGE_OFFSET_MASK 0xfff 64 56 65 57 66 /** Size of VMMDev RAM region accessible by guest. … … 59 68 * For now: 4 megabyte. 60 69 */ 61 #define VMMDEV_RAM_SIZE (4 * 256 * PAGE_SIZE)70 #define VMMDEV_RAM_SIZE (4 * 256 * VMMDEV_PAGE_SIZE) 62 71 63 72 /** Size of VMMDev heap region accessible by guest. 64 73 * (Must be a power of two (pci range).) 65 74 */ 66 #define VMMDEV_HEAP_SIZE (4 * PAGE_SIZE)75 #define VMMDEV_HEAP_SIZE (4 * VMMDEV_PAGE_SIZE) 67 76 68 77 /** Port for generic request interface (relative offset). */ … … 71 80 * This works like VMMDevReq_AcknowledgeEvents when read. */ 72 81 #define VMMDEV_PORT_OFF_REQUEST_FAST 8 82 83 84 /** The MMIO region size if MMIO is used instead of PIO. */ 85 #define VMMDEV_MMIO_SIZE _4K 86 /** Port for generic request interface (relative offset). */ 87 #define VMMDEV_MMIO_OFF_REQUEST 0 88 /** Port for requests that can be handled w/o going to ring-3 (relative offset). 89 * This works like VMMDevReq_AcknowledgeEvents when read. */ 90 #define VMMDEV_MMIO_OFF_REQUEST_FAST 8 73 91 74 92 -
trunk/src/VBox/Devices/VMMDev/VMMDev.cpp
r98542 r100185 1832 1832 { 1833 1833 /* 1834 * Note! This code is duplicated in vmmdev FastRequestIrqAck.1834 * Note! This code is duplicated in vmmdevPioFastRequestIrqAck. 1835 1835 */ 1836 1836 if (pThis->fNewGuestFilterMaskValid) … … 3076 3076 3077 3077 /** 3078 * @callback_method_impl{FNIOMIOPORTNEWOUT, 3079 * Port I/O write andler for the generic request interface.} 3080 */ 3081 static DECLCALLBACK(VBOXSTRICTRC) 3082 vmmdevRequestHandler(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb) 3078 * The request handler shared by the PIO and MMIO access paths. 3079 * 3080 * @returns Strict VBox status code. 3081 * @param pDevIns The device instance. 3082 * @param GCPhysReqHdr Physical address of the request header. 3083 */ 3084 static VBOXSTRICTRC vmmdevRequestHandler(PPDMDEVINS pDevIns, RTGCPHYS GCPhysReqHdr) 3083 3085 { 3084 3086 uint64_t tsArrival; 3085 3087 STAM_GET_TS(tsArrival); 3086 3087 RT_NOREF(offPort, cb, pvUser);3088 3088 3089 3089 /* … … 3094 3094 VMMDevRequestHeader requestHeader; 3095 3095 RT_ZERO(requestHeader); 3096 PDMDevHlpPhysRead(pDevIns, (RTGCPHYS)u32, &requestHeader, sizeof(requestHeader));3096 PDMDevHlpPhysRead(pDevIns, GCPhysReqHdr, &requestHeader, sizeof(requestHeader)); 3097 3097 3098 3098 /* The structure size must be greater or equal to the header size. */ … … 3171 3171 if ( ( requestHeader.requestType == VMMDevReq_HGCMCall32 3172 3172 || requestHeader.requestType == VMMDevReq_HGCMCall64) 3173 && (( u32 + requestHeader.size) >> X86_PAGE_SHIFT) == (u32 >> X86_PAGE_SHIFT)3174 && RT_SUCCESS(PDMDevHlpPhysGCPhys2CCPtr(pDevIns, u32, 0 /*fFlags*/, &Lock.pvReq, &Lock.Lock)) )3173 && ((GCPhysReqHdr + requestHeader.size) >> VMMDEV_PAGE_SHIFT) == (GCPhysReqHdr >> VMMDEV_PAGE_SHIFT) 3174 && RT_SUCCESS(PDMDevHlpPhysGCPhys2CCPtr(pDevIns, GCPhysReqHdr, 0 /*fFlags*/, &Lock.pvReq, &Lock.Lock)) ) 3175 3175 { 3176 3176 memcpy((uint8_t *)pRequestHeader + sizeof(VMMDevRequestHeader), … … 3180 3180 else 3181 3181 PDMDevHlpPhysRead(pDevIns, 3182 (RTGCPHYS)u32+ sizeof(VMMDevRequestHeader),3182 GCPhysReqHdr + sizeof(VMMDevRequestHeader), 3183 3183 (uint8_t *)pRequestHeader + sizeof(VMMDevRequestHeader), 3184 3184 cbLeft); … … 3192 3192 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &pThis->CritSect, rcLock); 3193 3193 3194 rcRet = vmmdevReqDispatcher(pDevIns, pThis, pThisCC, pRequestHeader, u32, tsArrival, &fPostOptimize, &pLock);3194 rcRet = vmmdevReqDispatcher(pDevIns, pThis, pThisCC, pRequestHeader, GCPhysReqHdr, tsArrival, &fPostOptimize, &pLock); 3195 3195 3196 3196 PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect); … … 3204 3204 memcpy(pLock->pvReq, pRequestHeader, pRequestHeader->size); 3205 3205 else 3206 PDMDevHlpPhysWrite(pDevIns, u32, pRequestHeader, pRequestHeader->size);3206 PDMDevHlpPhysWrite(pDevIns, GCPhysReqHdr, pRequestHeader, pRequestHeader->size); 3207 3207 } 3208 3208 … … 3233 3233 * Write the result back to guest memory. 3234 3234 */ 3235 PDMDevHlpPhysWrite(pDevIns, u32, &requestHeader, sizeof(requestHeader));3235 PDMDevHlpPhysWrite(pDevIns, GCPhysReqHdr, &requestHeader, sizeof(requestHeader)); 3236 3236 3237 3237 return rcRet; 3238 3238 } 3239 3239 3240 3241 /** 3242 * @callback_method_impl{FNIOMIOPORTNEWOUT, 3243 * Port I/O write andler for the generic request interface.} 3244 */ 3245 static DECLCALLBACK(VBOXSTRICTRC) 3246 vmmdevPioRequestHandler(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb) 3247 { 3248 RT_NOREF(offPort, cb, pvUser); 3249 3250 return vmmdevRequestHandler(pDevIns, u32); 3251 } 3252 3240 3253 #endif /* IN_RING3 */ 3254 3255 3256 /** 3257 * Common worker for hanlding the fast interrupt acknowledge path from both 3258 * PIO and MMIO access handlers. 3259 * 3260 * @returns Strict VBox status code. 3261 * @param pDevIns The device instance. 3262 * @param pu32 Where to store the host event flags. 3263 * @param rcToR3 The status code to return when locking failed in 3264 * non ring-3/userspace environments (R0 or RC). 3265 */ 3266 static VBOXSTRICTRC vmmdevFastReqIrqAck(PPDMDEVINS pDevIns, uint32_t *pu32, int rcToR3) 3267 { 3268 PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV); 3269 PVMMDEVCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVMMDEVCC); 3270 Assert(PDMDEVINS_2_DATA(pDevIns, PVMMDEV) == pThis); 3271 3272 #ifdef IN_RING3 3273 RT_NOREF(rcToR3); 3274 #endif 3275 3276 /* The VMMDev memory mapping might've failed, go to ring-3 in that case. */ 3277 VBOXSTRICTRC rcStrict; 3278 #ifndef IN_RING3 3279 if (pThisCC->CTX_SUFF(pVMMDevRAM) != NULL) 3280 #endif 3281 { 3282 /* Enter critical section and check that the additions has been properly 3283 initialized and that we're not in legacy v1.3 device mode. */ 3284 rcStrict = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, rcToR3); 3285 if (rcStrict == VINF_SUCCESS) 3286 { 3287 if ( pThis->fu32AdditionsOk 3288 && !VMMDEV_INTERFACE_VERSION_IS_1_03(pThis)) 3289 { 3290 /* 3291 * Do the job. 3292 * 3293 * Note! This code is duplicated in vmmdevReqHandler_AcknowledgeEvents. 3294 */ 3295 STAM_REL_COUNTER_INC(&pThis->CTX_SUFF_Z(StatFastIrqAck)); 3296 3297 if (pThis->fNewGuestFilterMaskValid) 3298 { 3299 pThis->fNewGuestFilterMaskValid = false; 3300 pThis->fGuestFilterMask = pThis->fNewGuestFilterMask; 3301 } 3302 3303 *pu32 = pThis->fHostEventFlags & pThis->fGuestFilterMask; 3304 3305 pThis->fHostEventFlags &= ~pThis->fGuestFilterMask; 3306 pThisCC->CTX_SUFF(pVMMDevRAM)->V.V1_04.fHaveEvents = false; 3307 3308 PDMDevHlpPCISetIrqNoWait(pDevIns, 0, 0); 3309 } 3310 else 3311 { 3312 Log(("vmmdevFastRequestIrqAck: fu32AdditionsOk=%d interfaceVersion=%#x\n", pThis->fu32AdditionsOk, 3313 pThis->guestInfo.interfaceVersion)); 3314 *pu32 = UINT32_MAX; 3315 } 3316 3317 PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect); 3318 } 3319 } 3320 #ifndef IN_RING3 3321 else 3322 rcStrict = rcToR3; 3323 #endif 3324 return rcStrict; 3325 } 3241 3326 3242 3327 … … 3246 3331 */ 3247 3332 static DECLCALLBACK(VBOXSTRICTRC) 3248 vmmdev FastRequestHandler(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)3333 vmmdevPioFastRequestHandler(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb) 3249 3334 { 3250 3335 #ifndef IN_RING3 3251 # if 0 /* This functionality is offered through reading the port (vmmdev FastRequestIrqAck). Leaving it here for later. */3336 # if 0 /* This functionality is offered through reading the port (vmmdevPioFastRequestIrqAck). Leaving it here for later. */ 3252 3337 PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV); 3253 3338 RT_NOREF(pvUser, Port, cb); … … 3334 3419 3335 3420 #else /* IN_RING3 */ 3336 return vmmdev RequestHandler(pDevIns, pvUser, offPort, u32, cb);3421 return vmmdevPioRequestHandler(pDevIns, pvUser, offPort, u32, cb); 3337 3422 #endif /* IN_RING3 */ 3338 3423 } … … 3345 3430 */ 3346 3431 static DECLCALLBACK(VBOXSTRICTRC) 3347 vmmdevFastRequestIrqAck(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb) 3348 { 3349 PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV); 3350 PVMMDEVCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVMMDEVCC); 3351 Assert(PDMDEVINS_2_DATA(pDevIns, PVMMDEV) == pThis); 3432 vmmdevPioFastRequestIrqAck(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb) 3433 { 3352 3434 RT_NOREF(pvUser, offPort); 3353 3435 … … 3355 3437 ASSERT_GUEST_MSG_RETURN(cb == sizeof(uint32_t), ("cb=%d\n", cb), VERR_IOM_IOPORT_UNUSED); 3356 3438 3357 /* The VMMDev memory mapping might've failed, go to ring-3 in that case. */ 3439 return vmmdevFastReqIrqAck(pDevIns, pu32, VINF_IOM_R3_IOPORT_READ); 3440 } 3441 3442 3443 /** 3444 * @callback_method_impl{FNIOMMMIONEWREAD, Read a MMIO register.} 3445 */ 3446 static DECLCALLBACK(VBOXSTRICTRC) vmmdevMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb) 3447 { 3448 const uint32_t offReg = (uint32_t)off; 3449 RT_NOREF(pvUser); 3450 3451 /* Only 32-bit accesses. */ 3452 ASSERT_GUEST_MSG_RETURN(cb == sizeof(uint32_t), ("cb=%d\n", cb), VINF_IOM_MMIO_UNUSED_FF); 3453 3454 Log2(("vmmdevMmioRead %RGp (offset %04X) size=%u\n", off, offReg, cb)); 3455 3358 3456 VBOXSTRICTRC rcStrict; 3457 switch (offReg) 3458 { 3459 case VMMDEV_MMIO_OFF_REQUEST_FAST: 3460 rcStrict = vmmdevFastReqIrqAck(pDevIns, (uint32_t *)pv, VINF_IOM_R3_MMIO_READ); 3461 break; 3462 case VMMDEV_MMIO_OFF_REQUEST: 3463 default: 3464 Log(("VMMDev: Trying to read unimplemented register at offset %04X!\n", offReg)); 3465 rcStrict = VINF_IOM_MMIO_UNUSED_FF; 3466 break; 3467 } 3468 3469 return rcStrict; 3470 } 3471 3472 3473 /** 3474 * @callback_method_impl{FNIOMMMIONEWWRITE, Write to a MMIO register.} 3475 */ 3476 static DECLCALLBACK(VBOXSTRICTRC) vmmdevMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb) 3477 { 3478 const uint32_t offReg = (uint32_t)off; 3479 RT_NOREF(pvUser); 3480 3481 /* Only 32-bit and 64-bit accesses. */ 3482 ASSERT_GUEST_MSG_RETURN(cb == sizeof(uint32_t) || cb == sizeof(uint64_t), 3483 ("cb=%u\n", cb), VINF_IOM_MMIO_UNUSED_FF); 3484 3485 uint64_t u64Val; 3486 if (cb == sizeof(uint64_t)) 3487 u64Val = *(uint64_t *)pv; 3488 else if (cb == sizeof(uint32_t)) 3489 u64Val = *(uint32_t *)pv; 3490 3491 Log2(("vmmdevMmioWrite %RGp (offset %04X) %#RX64 size=%u\n", off, offReg, u64Val, cb)); 3492 3493 VBOXSTRICTRC rcStrict; 3494 switch (offReg) 3495 { 3496 case VMMDEV_MMIO_OFF_REQUEST: 3497 rcStrict = vmmdevRequestHandler(pDevIns, u64Val); 3498 break; 3499 case VMMDEV_MMIO_OFF_REQUEST_FAST: 3359 3500 #ifndef IN_RING3 3360 if (pThisCC->CTX_SUFF(pVMMDevRAM) != NULL) 3501 rcStrict = VINF_IOM_R3_MMIO_WRITE; 3502 #else 3503 rcStrict = vmmdevRequestHandler(pDevIns, u64Val); 3361 3504 #endif 3362 { 3363 /* Enter critical section and check that the additions has been properly 3364 initialized and that we're not in legacy v1.3 device mode. */ 3365 rcStrict = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VINF_IOM_R3_IOPORT_READ); 3366 if (rcStrict == VINF_SUCCESS) 3367 { 3368 if ( pThis->fu32AdditionsOk 3369 && !VMMDEV_INTERFACE_VERSION_IS_1_03(pThis)) 3370 { 3371 /* 3372 * Do the job. 3373 * 3374 * Note! This code is duplicated in vmmdevReqHandler_AcknowledgeEvents. 3375 */ 3376 STAM_REL_COUNTER_INC(&pThis->CTX_SUFF_Z(StatFastIrqAck)); 3377 3378 if (pThis->fNewGuestFilterMaskValid) 3379 { 3380 pThis->fNewGuestFilterMaskValid = false; 3381 pThis->fGuestFilterMask = pThis->fNewGuestFilterMask; 3382 } 3383 3384 *pu32 = pThis->fHostEventFlags & pThis->fGuestFilterMask; 3385 3386 pThis->fHostEventFlags &= ~pThis->fGuestFilterMask; 3387 pThisCC->CTX_SUFF(pVMMDevRAM)->V.V1_04.fHaveEvents = false; 3388 3389 PDMDevHlpPCISetIrqNoWait(pDevIns, 0, 0); 3390 } 3391 else 3392 { 3393 Log(("vmmdevFastRequestIrqAck: fu32AdditionsOk=%d interfaceVersion=%#x\n", pThis->fu32AdditionsOk, 3394 pThis->guestInfo.interfaceVersion)); 3395 *pu32 = UINT32_MAX; 3396 } 3397 3398 PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect); 3399 } 3400 } 3401 #ifndef IN_RING3 3402 else 3403 rcStrict = VINF_IOM_R3_IOPORT_READ; 3404 #endif 3505 break; 3506 default: 3507 /* Ignore writes to unimplemented or read-only registers. */ 3508 Log(("VMMDev: Trying to write unimplemented or R/O register at offset %04X!\n", offReg)); 3509 rcStrict = VINF_SUCCESS; 3510 break; 3511 } 3512 3405 3513 return rcStrict; 3406 3514 } … … 4091 4199 pHlp->pfnSSMPutBool(pSSM, pThis->fKeepCredentials); 4092 4200 pHlp->pfnSSMPutBool(pSSM, pThis->fHeapEnabled); 4201 pHlp->pfnSSMPutBool(pSSM, pThis->fMmioReq); 4093 4202 4094 4203 return VINF_SSM_DONT_CALL_AGAIN; … … 4197 4306 return pHlp->pfnSSMSetCfgError(pSSM, RT_SRC_POS, N_("Config mismatch - fHeapEnabled: config=%RTbool saved=%RTbool"), 4198 4307 pThis->fHeapEnabled, f); 4308 4309 f = false; 4310 if (uVersion >= VMMDEV_SAVED_STATE_VERSION_MMIO_ACCESS) 4311 { 4312 rc = pHlp->pfnSSMGetBool(pSSM, &f); 4313 AssertRCReturn(rc, rc); 4314 } 4315 if (pThis->fMmioReq != f) 4316 return pHlp->pfnSSMSetCfgError(pSSM, RT_SRC_POS, N_("Config mismatch - fMmioReq: config=%RTbool saved=%RTbool"), 4317 pThis->fMmioReq, f); 4199 4318 } 4200 4319 … … 4726 4845 "HeartbeatInterval|" 4727 4846 "HeartbeatTimeout|" 4847 "MmioReq|" 4728 4848 "TestingEnabled|" 4729 4849 "TestingMMIO|" … … 4771 4891 N_("Configuration error: Failed querying \"KeepCredentials\" as a boolean")); 4772 4892 4773 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "HeapEnabled", &pThis->fHeapEnabled, true); 4893 /* The heap is of no use on non x86 guest architectures. */ 4894 static const bool fHeapEnabledDef = PDMDevHlpCpuIsGuestArchX86(pDevIns); 4895 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "HeapEnabled", &pThis->fHeapEnabled, fHeapEnabledDef); 4774 4896 if (RT_FAILURE(rc)) 4775 4897 return PDMDEV_SET_ERROR(pDevIns, rc, … … 4811 4933 N_("Configuration error: Heartbeat timeout \"HeartbeatTimeout\" value (%'ull ns) is too close to the interval (%'ull ns)"), 4812 4934 pThis->cNsHeartbeatTimeout, pThis->cNsHeartbeatInterval); 4935 4936 /* On everthing els than x86 we have to offer the MMIO interface because port I/O is either not available or emulated through MMIO anyway. */ 4937 static const bool fMmioReqEnabledDef = !PDMDevHlpCpuIsGuestArchX86(pDevIns); 4938 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "MmioReq", &pThis->fMmioReq, fMmioReqEnabledDef); 4939 if (RT_FAILURE(rc)) 4940 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed querying \"MmioReq\" as a boolean")); 4813 4941 4814 4942 #ifndef VBOX_WITHOUT_TESTING_FEATURES … … 4938 5066 * so we have to do it via the mapper callback. 4939 5067 */ 4940 rc = PDMDevHlpIoPortCreate(pDevIns, 1 /*cPorts*/, pPciDev, RT_MAKE_U32(0, 0), vmmdev RequestHandler, NULL /*pfnIn*/,5068 rc = PDMDevHlpIoPortCreate(pDevIns, 1 /*cPorts*/, pPciDev, RT_MAKE_U32(0, 0), vmmdevPioRequestHandler, NULL /*pfnIn*/, 4941 5069 NULL /*pvUser*/, "VMMDev Request Handler", NULL, &pThis->hIoPortReq); 4942 5070 AssertRCReturn(rc, rc); 4943 5071 4944 rc = PDMDevHlpIoPortCreate(pDevIns, 1 /*cPorts*/, pPciDev, RT_MAKE_U32(1, 0), vmmdev FastRequestHandler,4945 vmmdev FastRequestIrqAck, NULL, "VMMDev Fast R0/RC Requests", NULL /*pvUser*/, &pThis->hIoPortFast);5072 rc = PDMDevHlpIoPortCreate(pDevIns, 1 /*cPorts*/, pPciDev, RT_MAKE_U32(1, 0), vmmdevPioFastRequestHandler, 5073 vmmdevPioFastRequestIrqAck, NULL, "VMMDev Fast R0/RC Requests", NULL /*pvUser*/, &pThis->hIoPortFast); 4946 5074 AssertRCReturn(rc, rc); 4947 5075 … … 4976 5104 rc = PDMDevHlpRegisterVMMDevHeap(pDevIns, NIL_RTGCPHYS, pThisCC->pVMMDevHeapR3, VMMDEV_HEAP_SIZE); 4977 5105 AssertLogRelRCReturn(rc, rc); 5106 } 5107 5108 if (pThis->fMmioReq) 5109 { 5110 rc = PDMDevHlpPCIIORegionCreateMmio(pDevIns, 3 /*iPciRegion*/, VMMDEV_MMIO_SIZE, PCI_ADDRESS_SPACE_MEM, 5111 vmmdevMmioWrite, vmmdevMmioRead, NULL, 5112 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU, 5113 "VMMDev MMIO Request Handler", &pThis->hMmioReq); 5114 AssertRCReturn(rc, rc); 4978 5115 } 4979 5116 … … 5123 5260 /* 5124 5261 * We map the first page of the VMMDevRAM into raw-mode and kernel contexts so we 5125 * can handle interrupt acknowledge requests more timely (vmmdev FastRequestIrqAck).5126 */ 5127 rc = PDMDevHlpMmio2SetUpContext(pDevIns, pThis->hMmio2VMMDevRAM, 0, GUEST_PAGE_SIZE, (void **)&pThisCC->CTX_SUFF(pVMMDevRAM));5262 * can handle interrupt acknowledge requests more timely (vmmdevPioFastRequestIrqAck). 5263 */ 5264 rc = PDMDevHlpMmio2SetUpContext(pDevIns, pThis->hMmio2VMMDevRAM, 0, VMMDEV_PAGE_SIZE, (void **)&pThisCC->CTX_SUFF(pVMMDevRAM)); 5128 5265 AssertRCReturn(rc, rc); 5129 5266 5130 rc = PDMDevHlpIoPortSetUpContext(pDevIns, pThis->hIoPortFast, vmmdev FastRequestHandler, vmmdevFastRequestIrqAck, NULL);5267 rc = PDMDevHlpIoPortSetUpContext(pDevIns, pThis->hIoPortFast, vmmdevPioFastRequestHandler, vmmdevPioFastRequestIrqAck, NULL); 5131 5268 AssertRCReturn(rc, rc); 5269 5270 if (pThis->fMmioReq) 5271 { 5272 rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmioReq, vmmdevMmioWrite, vmmdevMmioRead, NULL /*pvUser*/); 5273 AssertRCReturn(rc, rc); 5274 } 5132 5275 5133 5276 # ifndef VBOX_WITHOUT_TESTING_FEATURES -
trunk/src/VBox/Devices/VMMDev/VMMDevHGCM.cpp
r98103 r100185 691 691 if (pPtr->cPages == 1) 692 692 return true; 693 RTGCPHYS64 Phys = pPtr->paPages[0] + GUEST_PAGE_SIZE;693 RTGCPHYS64 Phys = pPtr->paPages[0] + VMMDEV_PAGE_SIZE; 694 694 if (Phys != pPtr->paPages[1]) 695 695 return false; … … 699 699 do 700 700 { 701 Phys += GUEST_PAGE_SIZE;701 Phys += VMMDEV_PAGE_SIZE; 702 702 if (Phys != pPtr->paPages[iPage]) 703 703 return false; … … 734 734 for (uint32_t iPage = 0; iPage < pPtr->cPages && cbRemaining > 0; ++iPage) 735 735 { 736 uint32_t cbToRead = GUEST_PAGE_SIZE - offPage;736 uint32_t cbToRead = VMMDEV_PAGE_SIZE - offPage; 737 737 if (cbToRead > cbRemaining) 738 738 cbToRead = cbRemaining; … … 773 773 for (iPage = 0; iPage < pPtr->cPages && cbRemaining > 0; ++iPage) 774 774 { 775 uint32_t cbToWrite = GUEST_PAGE_SIZE - offPage;775 uint32_t cbToWrite = VMMDEV_PAGE_SIZE - offPage; 776 776 if (cbToWrite > cbRemaining) 777 777 cbToWrite = cbRemaining; … … 1098 1098 ASSERT_GUEST_RETURN(cbData <= VMMDEV_MAX_HGCM_DATA_SIZE, VERR_INVALID_PARAMETER); 1099 1099 1100 const uint32_t offFirstPage = cbData > 0 ? GCPtr & GUEST_PAGE_OFFSET_MASK : 0;1101 const uint32_t cPages = cbData > 0 ? (offFirstPage + cbData + GUEST_PAGE_SIZE - 1) / GUEST_PAGE_SIZE : 0;1100 const uint32_t offFirstPage = cbData > 0 ? GCPtr & VMMDEV_PAGE_OFFSET_MASK : 0; 1101 const uint32_t cPages = cbData > 0 ? (offFirstPage + cbData + VMMDEV_PAGE_SIZE - 1) / VMMDEV_PAGE_SIZE : 0; 1102 1102 1103 1103 pGuestParm->u.ptr.cbData = cbData; … … 1119 1119 1120 1120 /* Gonvert the guest linear pointers of pages to physical addresses. */ 1121 GCPtr &= ~(RTGCPTR) GUEST_PAGE_OFFSET_MASK;1121 GCPtr &= ~(RTGCPTR)VMMDEV_PAGE_OFFSET_MASK; 1122 1122 for (uint32_t iPage = 0; iPage < cPages; ++iPage) 1123 1123 { … … 1134 1134 1135 1135 pGuestParm->u.ptr.paPages[iPage] = GCPhys; 1136 GCPtr += GUEST_PAGE_SIZE;1136 GCPtr += VMMDEV_PAGE_SIZE; 1137 1137 } 1138 1138 } … … 1179 1179 ("%#x\n", pPageListInfo->flags), VERR_INVALID_FLAGS); 1180 1180 /* First page offset. */ 1181 ASSERT_GUEST_MSG_RETURN(pPageListInfo->offFirstPage < GUEST_PAGE_SIZE,1181 ASSERT_GUEST_MSG_RETURN(pPageListInfo->offFirstPage < VMMDEV_PAGE_SIZE, 1182 1182 ("%#x\n", pPageListInfo->offFirstPage), VERR_INVALID_PARAMETER); 1183 1183 … … 1187 1187 ASSERT_GUEST_MSG_RETURN( pPageListInfo->cPages 1188 1188 == (pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList ? 1 1189 : RT_ALIGN_32(pPageListInfo->offFirstPage + cbData, GUEST_PAGE_SIZE)1190 >> GUEST_PAGE_SHIFT)1189 : RT_ALIGN_32(pPageListInfo->offFirstPage + cbData, VMMDEV_PAGE_SIZE) 1190 >> VMMDEV_PAGE_SHIFT) 1191 1191 || pGuestParm->enmType == VMMDevHGCMParmType_PageList, 1192 1192 ("offFirstPage=%#x cbData=%#x cPages=%#x enmType=%d\n", … … 1203 1203 { 1204 1204 /* Validate page offsets */ 1205 ASSERT_GUEST_MSG_RETURN( !(pPageListInfo->aPages[0] & GUEST_PAGE_OFFSET_MASK)1206 || (pPageListInfo->aPages[0] & GUEST_PAGE_OFFSET_MASK) == pPageListInfo->offFirstPage,1205 ASSERT_GUEST_MSG_RETURN( !(pPageListInfo->aPages[0] & VMMDEV_PAGE_OFFSET_MASK) 1206 || (pPageListInfo->aPages[0] & VMMDEV_PAGE_OFFSET_MASK) == pPageListInfo->offFirstPage, 1207 1207 ("%#RX64 offFirstPage=%#x\n", pPageListInfo->aPages[0], pPageListInfo->offFirstPage), 1208 1208 VERR_INVALID_POINTER); 1209 1209 uint32_t const cPages = pPageListInfo->cPages; 1210 1210 for (uint32_t iPage = 1; iPage < cPages; iPage++) 1211 ASSERT_GUEST_MSG_RETURN(!(pPageListInfo->aPages[iPage] & GUEST_PAGE_OFFSET_MASK),1211 ASSERT_GUEST_MSG_RETURN(!(pPageListInfo->aPages[iPage] & VMMDEV_PAGE_OFFSET_MASK), 1212 1212 ("[%#zx]=%#RX64\n", iPage, pPageListInfo->aPages[iPage]), VERR_INVALID_POINTER); 1213 1213 RT_UNTRUSTED_VALIDATED_FENCE(); -
trunk/src/VBox/Devices/VMMDev/VMMDevState.h
r98103 r100185 268 268 /** FLag whether CPU hotplug events are monitored */ 269 269 bool fCpuHotPlugEventsEnabled; 270 /** Flag whether the VMM device is offering the request ports 271 * over MMIO as well (mainly for ARM at the moment). */ 272 bool fMmioReq; 270 273 /** Alignment padding. */ 271 bool afPadding8[ 3];274 bool afPadding8[2]; 272 275 /** CPU hotplug event */ 273 276 VMMDevCpuEventType enmCpuHotPlugEvent; … … 406 409 /** Handle for the fast VMM request I/O port (PCI region \#0). */ 407 410 IOMIOPORTHANDLE hIoPortFast; 411 /** Handle for the VMM request MMIO region (PCI region \#3). */ 412 IOMMMIOHANDLE hMmioReq; 408 413 /** Handle for the VMMDev RAM (PCI region \#1). */ 409 414 PGMMMIO2HANDLE hMmio2VMMDevRAM; … … 567 572 568 573 /** The saved state version. */ 569 #define VMMDEV_SAVED_STATE_VERSION VMMDEV_SAVED_STATE_VERSION_VMM_MOUSE_EXTENDED_DATA 574 #define VMMDEV_SAVED_STATE_VERSION VMMDEV_SAVED_STATE_VERSION_MMIO_ACCESS 575 /** Added support to optionally use MMIO instead of PIO for passing requests to the host (mainly for ARM). */ 576 #define VMMDEV_SAVED_STATE_VERSION_MMIO_ACCESS 20 570 577 /** The saved state version with VMMDev mouse buttons state and wheel movement data. */ 571 578 #define VMMDEV_SAVED_STATE_VERSION_VMM_MOUSE_EXTENDED_DATA 19
Note:
See TracChangeset
for help on using the changeset viewer.