VirtualBox

Changeset 100185 in vbox for trunk


Ignore:
Timestamp:
Jun 16, 2023 6:54:50 AM (18 months ago)
Author:
vboxsync
Message:

Devices/VMMDev: Add an MMIO interface in addition to the existing PIO interface for guest additions running inside an ARM based guest. Also remove the dependency from the architecture page size and introduce a 4KiB VMM page size as ARM has different page sizes (4KiB, 16KiB, 64KiB) and it can differ between host and guest, bugref:10456

Location:
trunk
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/VMMDev.h

    r98542 r100185  
    5454 */
    5555
     56/** The VMMDev assumed page size (not the real guest page size which might be different
     57 * between guest and host, on ARM for example), 4KiB is the minimum supported by both
     58 * ARM and x86 and was the one used back when the device supported x86 only. */
     59#define VMMDEV_PAGE_SIZE                                    _4K
     60/** The VMMDev assumed page shift. */
     61#define VMMDEV_PAGE_SHIFT                                   12
     62/** The VMMDev assumed page offset mask. */
     63#define VMMDEV_PAGE_OFFSET_MASK                             0xfff
     64
    5665
    5766/** Size of VMMDev RAM region accessible by guest.
     
    5968 * For now: 4 megabyte.
    6069 */
    61 #define VMMDEV_RAM_SIZE                                     (4 * 256 * PAGE_SIZE)
     70#define VMMDEV_RAM_SIZE                                     (4 * 256 * VMMDEV_PAGE_SIZE)
    6271
    6372/** Size of VMMDev heap region accessible by guest.
    6473 *  (Must be a power of two (pci range).)
    6574 */
    66 #define VMMDEV_HEAP_SIZE                                    (4 * PAGE_SIZE)
     75#define VMMDEV_HEAP_SIZE                                    (4 * VMMDEV_PAGE_SIZE)
    6776
    6877/** Port for generic request interface (relative offset). */
     
    7180 * This works like VMMDevReq_AcknowledgeEvents when read.  */
    7281#define VMMDEV_PORT_OFF_REQUEST_FAST                        8
     82
     83
     84/** The MMIO region size if MMIO is used instead of PIO. */
     85#define VMMDEV_MMIO_SIZE                                    _4K
     86/** Port for generic request interface (relative offset). */
     87#define VMMDEV_MMIO_OFF_REQUEST                             0
     88/** Port for requests that can be handled w/o going to ring-3 (relative offset).
     89 * This works like VMMDevReq_AcknowledgeEvents when read.  */
     90#define VMMDEV_MMIO_OFF_REQUEST_FAST                        8
    7391
    7492
  • trunk/src/VBox/Devices/VMMDev/VMMDev.cpp

    r98542 r100185  
    18321832    {
    18331833        /*
    1834          * Note! This code is duplicated in vmmdevFastRequestIrqAck.
     1834         * Note! This code is duplicated in vmmdevPioFastRequestIrqAck.
    18351835         */
    18361836        if (pThis->fNewGuestFilterMaskValid)
     
    30763076
    30773077/**
    3078  * @callback_method_impl{FNIOMIOPORTNEWOUT,
    3079  * Port I/O write andler for the generic request interface.}
    3080  */
    3081 static DECLCALLBACK(VBOXSTRICTRC)
    3082 vmmdevRequestHandler(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
     3078 * The request handler shared by the PIO and MMIO access paths.
     3079 *
     3080 * @returns Strict VBox status code.
     3081 * @param   pDevIns         The device instance.
     3082 * @param   GCPhysReqHdr    Physical address of the request header.
     3083 */
     3084static VBOXSTRICTRC vmmdevRequestHandler(PPDMDEVINS pDevIns, RTGCPHYS GCPhysReqHdr)
    30833085{
    30843086    uint64_t tsArrival;
    30853087    STAM_GET_TS(tsArrival);
    3086 
    3087     RT_NOREF(offPort, cb, pvUser);
    30883088
    30893089    /*
     
    30943094    VMMDevRequestHeader requestHeader;
    30953095    RT_ZERO(requestHeader);
    3096     PDMDevHlpPhysRead(pDevIns, (RTGCPHYS)u32, &requestHeader, sizeof(requestHeader));
     3096    PDMDevHlpPhysRead(pDevIns, GCPhysReqHdr, &requestHeader, sizeof(requestHeader));
    30973097
    30983098    /* The structure size must be greater or equal to the header size. */
     
    31713171                    if (   (   requestHeader.requestType == VMMDevReq_HGCMCall32
    31723172                            || requestHeader.requestType == VMMDevReq_HGCMCall64)
    3173                         && ((u32 + requestHeader.size) >> X86_PAGE_SHIFT) == (u32 >> X86_PAGE_SHIFT)
    3174                         && RT_SUCCESS(PDMDevHlpPhysGCPhys2CCPtr(pDevIns, u32, 0 /*fFlags*/, &Lock.pvReq, &Lock.Lock)) )
     3173                        && ((GCPhysReqHdr + requestHeader.size) >> VMMDEV_PAGE_SHIFT) == (GCPhysReqHdr >> VMMDEV_PAGE_SHIFT)
     3174                        && RT_SUCCESS(PDMDevHlpPhysGCPhys2CCPtr(pDevIns, GCPhysReqHdr, 0 /*fFlags*/, &Lock.pvReq, &Lock.Lock)) )
    31753175                    {
    31763176                        memcpy((uint8_t *)pRequestHeader + sizeof(VMMDevRequestHeader),
     
    31803180                    else
    31813181                        PDMDevHlpPhysRead(pDevIns,
    3182                                           (RTGCPHYS)u32             + sizeof(VMMDevRequestHeader),
     3182                                          GCPhysReqHdr              + sizeof(VMMDevRequestHeader),
    31833183                                          (uint8_t *)pRequestHeader + sizeof(VMMDevRequestHeader),
    31843184                                          cbLeft);
     
    31923192                PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &pThis->CritSect, rcLock);
    31933193
    3194                 rcRet = vmmdevReqDispatcher(pDevIns, pThis, pThisCC, pRequestHeader, u32, tsArrival, &fPostOptimize, &pLock);
     3194                rcRet = vmmdevReqDispatcher(pDevIns, pThis, pThisCC, pRequestHeader, GCPhysReqHdr, tsArrival, &fPostOptimize, &pLock);
    31953195
    31963196                PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
     
    32043204                        memcpy(pLock->pvReq, pRequestHeader, pRequestHeader->size);
    32053205                    else
    3206                         PDMDevHlpPhysWrite(pDevIns, u32, pRequestHeader, pRequestHeader->size);
     3206                        PDMDevHlpPhysWrite(pDevIns, GCPhysReqHdr, pRequestHeader, pRequestHeader->size);
    32073207                }
    32083208
     
    32333233     * Write the result back to guest memory.
    32343234     */
    3235     PDMDevHlpPhysWrite(pDevIns, u32, &requestHeader, sizeof(requestHeader));
     3235    PDMDevHlpPhysWrite(pDevIns, GCPhysReqHdr, &requestHeader, sizeof(requestHeader));
    32363236
    32373237    return rcRet;
    32383238}
    32393239
     3240
     3241/**
     3242 * @callback_method_impl{FNIOMIOPORTNEWOUT,
     3243 * Port I/O write andler for the generic request interface.}
     3244 */
     3245static DECLCALLBACK(VBOXSTRICTRC)
     3246vmmdevPioRequestHandler(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
     3247{
     3248    RT_NOREF(offPort, cb, pvUser);
     3249
     3250    return vmmdevRequestHandler(pDevIns, u32);
     3251}
     3252
    32403253#endif /* IN_RING3 */
     3254
     3255
     3256/**
     3257 * Common worker for hanlding the fast interrupt acknowledge path from both
     3258 * PIO and MMIO access handlers.
     3259 *
     3260 * @returns Strict VBox status code.
     3261 * @param   pDevIns         The device instance.
     3262 * @param   pu32            Where to store the host event flags.
     3263 * @param   rcToR3          The status code to return when locking failed in
     3264 *                          non ring-3/userspace environments (R0 or RC).
     3265 */
     3266static VBOXSTRICTRC vmmdevFastReqIrqAck(PPDMDEVINS pDevIns, uint32_t *pu32, int rcToR3)
     3267{
     3268    PVMMDEV   pThis   = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
     3269    PVMMDEVCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVMMDEVCC);
     3270    Assert(PDMDEVINS_2_DATA(pDevIns, PVMMDEV) == pThis);
     3271
     3272#ifdef IN_RING3
     3273    RT_NOREF(rcToR3);
     3274#endif
     3275
     3276    /* The VMMDev memory mapping might've failed, go to ring-3 in that case. */
     3277    VBOXSTRICTRC rcStrict;
     3278#ifndef IN_RING3
     3279    if (pThisCC->CTX_SUFF(pVMMDevRAM) != NULL)
     3280#endif
     3281    {
     3282        /* Enter critical section and check that the additions has been properly
     3283           initialized and that we're not in legacy v1.3 device mode. */
     3284        rcStrict = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, rcToR3);
     3285        if (rcStrict == VINF_SUCCESS)
     3286        {
     3287            if (   pThis->fu32AdditionsOk
     3288                && !VMMDEV_INTERFACE_VERSION_IS_1_03(pThis))
     3289            {
     3290                /*
     3291                 * Do the job.
     3292                 *
     3293                 * Note! This code is duplicated in vmmdevReqHandler_AcknowledgeEvents.
     3294                 */
     3295                STAM_REL_COUNTER_INC(&pThis->CTX_SUFF_Z(StatFastIrqAck));
     3296
     3297                if (pThis->fNewGuestFilterMaskValid)
     3298                {
     3299                    pThis->fNewGuestFilterMaskValid = false;
     3300                    pThis->fGuestFilterMask = pThis->fNewGuestFilterMask;
     3301                }
     3302
     3303                *pu32 = pThis->fHostEventFlags & pThis->fGuestFilterMask;
     3304
     3305                pThis->fHostEventFlags &= ~pThis->fGuestFilterMask;
     3306                pThisCC->CTX_SUFF(pVMMDevRAM)->V.V1_04.fHaveEvents = false;
     3307
     3308                PDMDevHlpPCISetIrqNoWait(pDevIns, 0, 0);
     3309            }
     3310            else
     3311            {
     3312                Log(("vmmdevFastRequestIrqAck: fu32AdditionsOk=%d interfaceVersion=%#x\n", pThis->fu32AdditionsOk,
     3313                     pThis->guestInfo.interfaceVersion));
     3314                *pu32 = UINT32_MAX;
     3315            }
     3316
     3317            PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
     3318        }
     3319    }
     3320#ifndef IN_RING3
     3321    else
     3322        rcStrict = rcToR3;
     3323#endif
     3324    return rcStrict;
     3325}
    32413326
    32423327
     
    32463331 */
    32473332static DECLCALLBACK(VBOXSTRICTRC)
    3248 vmmdevFastRequestHandler(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
     3333vmmdevPioFastRequestHandler(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
    32493334{
    32503335#ifndef IN_RING3
    3251 # if 0 /* This functionality is offered through reading the port (vmmdevFastRequestIrqAck). Leaving it here for later. */
     3336# if 0 /* This functionality is offered through reading the port (vmmdevPioFastRequestIrqAck). Leaving it here for later. */
    32523337    PVMMDEV pThis = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
    32533338    RT_NOREF(pvUser, Port, cb);
     
    33343419
    33353420#else  /* IN_RING3 */
    3336     return vmmdevRequestHandler(pDevIns, pvUser, offPort, u32, cb);
     3421    return vmmdevPioRequestHandler(pDevIns, pvUser, offPort, u32, cb);
    33373422#endif /* IN_RING3 */
    33383423}
     
    33453430 */
    33463431static DECLCALLBACK(VBOXSTRICTRC)
    3347 vmmdevFastRequestIrqAck(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
    3348 {
    3349     PVMMDEV   pThis   = PDMDEVINS_2_DATA(pDevIns, PVMMDEV);
    3350     PVMMDEVCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVMMDEVCC);
    3351     Assert(PDMDEVINS_2_DATA(pDevIns, PVMMDEV) == pThis);
     3432vmmdevPioFastRequestIrqAck(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
     3433{
    33523434    RT_NOREF(pvUser, offPort);
    33533435
     
    33553437    ASSERT_GUEST_MSG_RETURN(cb == sizeof(uint32_t), ("cb=%d\n", cb), VERR_IOM_IOPORT_UNUSED);
    33563438
    3357     /* The VMMDev memory mapping might've failed, go to ring-3 in that case. */
     3439    return vmmdevFastReqIrqAck(pDevIns, pu32, VINF_IOM_R3_IOPORT_READ);
     3440}
     3441
     3442
     3443/**
     3444 * @callback_method_impl{FNIOMMMIONEWREAD, Read a MMIO register.}
     3445 */
     3446static DECLCALLBACK(VBOXSTRICTRC) vmmdevMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
     3447{
     3448    const uint32_t offReg = (uint32_t)off;
     3449    RT_NOREF(pvUser);
     3450
     3451    /* Only 32-bit accesses. */
     3452    ASSERT_GUEST_MSG_RETURN(cb == sizeof(uint32_t), ("cb=%d\n", cb), VINF_IOM_MMIO_UNUSED_FF);
     3453
     3454    Log2(("vmmdevMmioRead %RGp (offset %04X) size=%u\n", off, offReg, cb));
     3455
    33583456    VBOXSTRICTRC rcStrict;
     3457    switch (offReg)
     3458    {
     3459        case VMMDEV_MMIO_OFF_REQUEST_FAST:
     3460            rcStrict = vmmdevFastReqIrqAck(pDevIns, (uint32_t *)pv, VINF_IOM_R3_MMIO_READ);
     3461            break;
     3462        case VMMDEV_MMIO_OFF_REQUEST:
     3463        default:
     3464            Log(("VMMDev: Trying to read unimplemented register at offset %04X!\n", offReg));
     3465            rcStrict = VINF_IOM_MMIO_UNUSED_FF;
     3466            break;
     3467    }
     3468
     3469    return rcStrict;
     3470}
     3471
     3472
     3473/**
     3474 * @callback_method_impl{FNIOMMMIONEWWRITE, Write to a MMIO register.}
     3475 */
     3476static DECLCALLBACK(VBOXSTRICTRC) vmmdevMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
     3477{
     3478    const uint32_t offReg = (uint32_t)off;
     3479    RT_NOREF(pvUser);
     3480
     3481    /* Only 32-bit and 64-bit accesses. */
     3482    ASSERT_GUEST_MSG_RETURN(cb == sizeof(uint32_t) || cb == sizeof(uint64_t),
     3483                            ("cb=%u\n", cb), VINF_IOM_MMIO_UNUSED_FF);
     3484
     3485    uint64_t u64Val;
     3486    if (cb == sizeof(uint64_t))
     3487        u64Val = *(uint64_t *)pv;
     3488    else if (cb == sizeof(uint32_t))
     3489        u64Val = *(uint32_t *)pv;
     3490
     3491    Log2(("vmmdevMmioWrite %RGp (offset %04X) %#RX64 size=%u\n", off, offReg, u64Val, cb));
     3492
     3493    VBOXSTRICTRC rcStrict;
     3494    switch (offReg)
     3495    {
     3496        case VMMDEV_MMIO_OFF_REQUEST:
     3497            rcStrict = vmmdevRequestHandler(pDevIns, u64Val);
     3498            break;
     3499        case VMMDEV_MMIO_OFF_REQUEST_FAST:
    33593500#ifndef IN_RING3
    3360     if (pThisCC->CTX_SUFF(pVMMDevRAM) != NULL)
     3501            rcStrict = VINF_IOM_R3_MMIO_WRITE;
     3502#else
     3503            rcStrict = vmmdevRequestHandler(pDevIns, u64Val);
    33613504#endif
    3362     {
    3363         /* Enter critical section and check that the additions has been properly
    3364            initialized and that we're not in legacy v1.3 device mode. */
    3365         rcStrict = PDMDevHlpCritSectEnter(pDevIns, &pThis->CritSect, VINF_IOM_R3_IOPORT_READ);
    3366         if (rcStrict == VINF_SUCCESS)
    3367         {
    3368             if (   pThis->fu32AdditionsOk
    3369                 && !VMMDEV_INTERFACE_VERSION_IS_1_03(pThis))
    3370             {
    3371                 /*
    3372                  * Do the job.
    3373                  *
    3374                  * Note! This code is duplicated in vmmdevReqHandler_AcknowledgeEvents.
    3375                  */
    3376                 STAM_REL_COUNTER_INC(&pThis->CTX_SUFF_Z(StatFastIrqAck));
    3377 
    3378                 if (pThis->fNewGuestFilterMaskValid)
    3379                 {
    3380                     pThis->fNewGuestFilterMaskValid = false;
    3381                     pThis->fGuestFilterMask = pThis->fNewGuestFilterMask;
    3382                 }
    3383 
    3384                 *pu32 = pThis->fHostEventFlags & pThis->fGuestFilterMask;
    3385 
    3386                 pThis->fHostEventFlags &= ~pThis->fGuestFilterMask;
    3387                 pThisCC->CTX_SUFF(pVMMDevRAM)->V.V1_04.fHaveEvents = false;
    3388 
    3389                 PDMDevHlpPCISetIrqNoWait(pDevIns, 0, 0);
    3390             }
    3391             else
    3392             {
    3393                 Log(("vmmdevFastRequestIrqAck: fu32AdditionsOk=%d interfaceVersion=%#x\n", pThis->fu32AdditionsOk,
    3394                      pThis->guestInfo.interfaceVersion));
    3395                 *pu32 = UINT32_MAX;
    3396             }
    3397 
    3398             PDMDevHlpCritSectLeave(pDevIns, &pThis->CritSect);
    3399         }
    3400     }
    3401 #ifndef IN_RING3
    3402     else
    3403         rcStrict = VINF_IOM_R3_IOPORT_READ;
    3404 #endif
     3505            break;
     3506        default:
     3507            /* Ignore writes to unimplemented or read-only registers. */
     3508            Log(("VMMDev: Trying to write unimplemented or R/O register at offset %04X!\n", offReg));
     3509            rcStrict = VINF_SUCCESS;
     3510            break;
     3511    }
     3512
    34053513    return rcStrict;
    34063514}
     
    40914199    pHlp->pfnSSMPutBool(pSSM, pThis->fKeepCredentials);
    40924200    pHlp->pfnSSMPutBool(pSSM, pThis->fHeapEnabled);
     4201    pHlp->pfnSSMPutBool(pSSM, pThis->fMmioReq);
    40934202
    40944203    return VINF_SSM_DONT_CALL_AGAIN;
     
    41974306            return pHlp->pfnSSMSetCfgError(pSSM, RT_SRC_POS, N_("Config mismatch - fHeapEnabled: config=%RTbool saved=%RTbool"),
    41984307                                           pThis->fHeapEnabled, f);
     4308
     4309        f = false;
     4310        if (uVersion >= VMMDEV_SAVED_STATE_VERSION_MMIO_ACCESS)
     4311        {
     4312            rc = pHlp->pfnSSMGetBool(pSSM, &f);
     4313            AssertRCReturn(rc, rc);
     4314        }
     4315        if (pThis->fMmioReq != f)
     4316            return pHlp->pfnSSMSetCfgError(pSSM, RT_SRC_POS, N_("Config mismatch - fMmioReq: config=%RTbool saved=%RTbool"),
     4317                                           pThis->fMmioReq, f);
    41994318    }
    42004319
     
    47264845                                  "HeartbeatInterval|"
    47274846                                  "HeartbeatTimeout|"
     4847                                  "MmioReq|"
    47284848                                  "TestingEnabled|"
    47294849                                  "TestingMMIO|"
     
    47714891                                N_("Configuration error: Failed querying \"KeepCredentials\" as a boolean"));
    47724892
    4773     rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "HeapEnabled", &pThis->fHeapEnabled, true);
     4893    /* The heap is of no use on non x86 guest architectures. */
     4894    static const bool fHeapEnabledDef = PDMDevHlpCpuIsGuestArchX86(pDevIns);
     4895    rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "HeapEnabled", &pThis->fHeapEnabled, fHeapEnabledDef);
    47744896    if (RT_FAILURE(rc))
    47754897        return PDMDEV_SET_ERROR(pDevIns, rc,
     
    48114933                                   N_("Configuration error: Heartbeat timeout \"HeartbeatTimeout\" value (%'ull ns) is too close to the interval (%'ull ns)"),
    48124934                                   pThis->cNsHeartbeatTimeout, pThis->cNsHeartbeatInterval);
     4935
     4936    /* On everthing els than x86 we have to offer the MMIO interface because port I/O is either not available or emulated through MMIO anyway. */
     4937    static const bool fMmioReqEnabledDef = !PDMDevHlpCpuIsGuestArchX86(pDevIns);
     4938    rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "MmioReq", &pThis->fMmioReq, fMmioReqEnabledDef);
     4939    if (RT_FAILURE(rc))
     4940        return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed querying \"MmioReq\" as a boolean"));
    48134941
    48144942#ifndef VBOX_WITHOUT_TESTING_FEATURES
     
    49385066     * so we have to do it via the mapper callback.
    49395067     */
    4940     rc = PDMDevHlpIoPortCreate(pDevIns, 1 /*cPorts*/, pPciDev, RT_MAKE_U32(0, 0), vmmdevRequestHandler, NULL /*pfnIn*/,
     5068    rc = PDMDevHlpIoPortCreate(pDevIns, 1 /*cPorts*/, pPciDev, RT_MAKE_U32(0, 0), vmmdevPioRequestHandler, NULL /*pfnIn*/,
    49415069                               NULL /*pvUser*/, "VMMDev Request Handler",  NULL, &pThis->hIoPortReq);
    49425070    AssertRCReturn(rc, rc);
    49435071
    4944     rc = PDMDevHlpIoPortCreate(pDevIns, 1 /*cPorts*/, pPciDev, RT_MAKE_U32(1, 0),  vmmdevFastRequestHandler,
    4945                                vmmdevFastRequestIrqAck, NULL, "VMMDev Fast R0/RC Requests", NULL /*pvUser*/, &pThis->hIoPortFast);
     5072    rc = PDMDevHlpIoPortCreate(pDevIns, 1 /*cPorts*/, pPciDev, RT_MAKE_U32(1, 0),  vmmdevPioFastRequestHandler,
     5073                               vmmdevPioFastRequestIrqAck, NULL, "VMMDev Fast R0/RC Requests", NULL /*pvUser*/, &pThis->hIoPortFast);
    49465074    AssertRCReturn(rc, rc);
    49475075
     
    49765104        rc = PDMDevHlpRegisterVMMDevHeap(pDevIns, NIL_RTGCPHYS, pThisCC->pVMMDevHeapR3, VMMDEV_HEAP_SIZE);
    49775105        AssertLogRelRCReturn(rc, rc);
     5106    }
     5107
     5108    if (pThis->fMmioReq)
     5109    {
     5110        rc = PDMDevHlpPCIIORegionCreateMmio(pDevIns, 3 /*iPciRegion*/, VMMDEV_MMIO_SIZE, PCI_ADDRESS_SPACE_MEM,
     5111                                            vmmdevMmioWrite, vmmdevMmioRead, NULL,
     5112                                            IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
     5113                                            "VMMDev MMIO Request Handler", &pThis->hMmioReq);
     5114        AssertRCReturn(rc, rc);
    49785115    }
    49795116
     
    51235260    /*
    51245261     * We map the first page of the VMMDevRAM into raw-mode and kernel contexts so we
    5125      * can handle interrupt acknowledge requests more timely (vmmdevFastRequestIrqAck).
    5126      */
    5127     rc = PDMDevHlpMmio2SetUpContext(pDevIns, pThis->hMmio2VMMDevRAM, 0, GUEST_PAGE_SIZE, (void **)&pThisCC->CTX_SUFF(pVMMDevRAM));
     5262     * can handle interrupt acknowledge requests more timely (vmmdevPioFastRequestIrqAck).
     5263     */
     5264    rc = PDMDevHlpMmio2SetUpContext(pDevIns, pThis->hMmio2VMMDevRAM, 0, VMMDEV_PAGE_SIZE, (void **)&pThisCC->CTX_SUFF(pVMMDevRAM));
    51285265    AssertRCReturn(rc, rc);
    51295266
    5130     rc = PDMDevHlpIoPortSetUpContext(pDevIns, pThis->hIoPortFast, vmmdevFastRequestHandler, vmmdevFastRequestIrqAck, NULL);
     5267    rc = PDMDevHlpIoPortSetUpContext(pDevIns, pThis->hIoPortFast, vmmdevPioFastRequestHandler, vmmdevPioFastRequestIrqAck, NULL);
    51315268    AssertRCReturn(rc, rc);
     5269
     5270    if (pThis->fMmioReq)
     5271    {
     5272        rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmioReq, vmmdevMmioWrite, vmmdevMmioRead, NULL /*pvUser*/);
     5273        AssertRCReturn(rc, rc);
     5274    }
    51325275
    51335276# ifndef VBOX_WITHOUT_TESTING_FEATURES
  • trunk/src/VBox/Devices/VMMDev/VMMDevHGCM.cpp

    r98103 r100185  
    691691    if (pPtr->cPages == 1)
    692692        return true;
    693     RTGCPHYS64 Phys = pPtr->paPages[0] + GUEST_PAGE_SIZE;
     693    RTGCPHYS64 Phys = pPtr->paPages[0] + VMMDEV_PAGE_SIZE;
    694694    if (Phys != pPtr->paPages[1])
    695695        return false;
     
    699699        do
    700700        {
    701             Phys += GUEST_PAGE_SIZE;
     701            Phys += VMMDEV_PAGE_SIZE;
    702702            if (Phys != pPtr->paPages[iPage])
    703703                return false;
     
    734734    for (uint32_t iPage = 0; iPage < pPtr->cPages && cbRemaining > 0; ++iPage)
    735735    {
    736         uint32_t cbToRead = GUEST_PAGE_SIZE - offPage;
     736        uint32_t cbToRead = VMMDEV_PAGE_SIZE - offPage;
    737737        if (cbToRead > cbRemaining)
    738738            cbToRead = cbRemaining;
     
    773773    for (iPage = 0; iPage < pPtr->cPages && cbRemaining > 0; ++iPage)
    774774    {
    775         uint32_t cbToWrite = GUEST_PAGE_SIZE - offPage;
     775        uint32_t cbToWrite = VMMDEV_PAGE_SIZE - offPage;
    776776        if (cbToWrite > cbRemaining)
    777777            cbToWrite = cbRemaining;
     
    10981098                ASSERT_GUEST_RETURN(cbData <= VMMDEV_MAX_HGCM_DATA_SIZE, VERR_INVALID_PARAMETER);
    10991099
    1100                 const uint32_t offFirstPage = cbData > 0 ? GCPtr & GUEST_PAGE_OFFSET_MASK : 0;
    1101                 const uint32_t cPages       = cbData > 0 ? (offFirstPage + cbData + GUEST_PAGE_SIZE - 1) / GUEST_PAGE_SIZE : 0;
     1100                const uint32_t offFirstPage = cbData > 0 ? GCPtr & VMMDEV_PAGE_OFFSET_MASK : 0;
     1101                const uint32_t cPages       = cbData > 0 ? (offFirstPage + cbData + VMMDEV_PAGE_SIZE - 1) / VMMDEV_PAGE_SIZE : 0;
    11021102
    11031103                pGuestParm->u.ptr.cbData        = cbData;
     
    11191119
    11201120                    /* Gonvert the guest linear pointers of pages to physical addresses. */
    1121                     GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
     1121                    GCPtr &= ~(RTGCPTR)VMMDEV_PAGE_OFFSET_MASK;
    11221122                    for (uint32_t iPage = 0; iPage < cPages; ++iPage)
    11231123                    {
     
    11341134
    11351135                        pGuestParm->u.ptr.paPages[iPage] = GCPhys;
    1136                         GCPtr += GUEST_PAGE_SIZE;
     1136                        GCPtr += VMMDEV_PAGE_SIZE;
    11371137                    }
    11381138                }
     
    11791179                                        ("%#x\n", pPageListInfo->flags), VERR_INVALID_FLAGS);
    11801180                /* First page offset. */
    1181                 ASSERT_GUEST_MSG_RETURN(pPageListInfo->offFirstPage < GUEST_PAGE_SIZE,
     1181                ASSERT_GUEST_MSG_RETURN(pPageListInfo->offFirstPage < VMMDEV_PAGE_SIZE,
    11821182                                        ("%#x\n", pPageListInfo->offFirstPage), VERR_INVALID_PARAMETER);
    11831183
     
    11871187                ASSERT_GUEST_MSG_RETURN(      pPageListInfo->cPages
    11881188                                           == (pGuestParm->enmType == VMMDevHGCMParmType_ContiguousPageList ? 1
    1189                                                :    RT_ALIGN_32(pPageListInfo->offFirstPage + cbData, GUEST_PAGE_SIZE)
    1190                                                  >> GUEST_PAGE_SHIFT)
     1189                                               :    RT_ALIGN_32(pPageListInfo->offFirstPage + cbData, VMMDEV_PAGE_SIZE)
     1190                                                 >> VMMDEV_PAGE_SHIFT)
    11911191                                        || pGuestParm->enmType == VMMDevHGCMParmType_PageList,
    11921192                                        ("offFirstPage=%#x cbData=%#x cPages=%#x enmType=%d\n",
     
    12031203                {
    12041204                    /* Validate page offsets */
    1205                     ASSERT_GUEST_MSG_RETURN(   !(pPageListInfo->aPages[0] & GUEST_PAGE_OFFSET_MASK)
    1206                                             || (pPageListInfo->aPages[0] & GUEST_PAGE_OFFSET_MASK) == pPageListInfo->offFirstPage,
     1205                    ASSERT_GUEST_MSG_RETURN(   !(pPageListInfo->aPages[0] & VMMDEV_PAGE_OFFSET_MASK)
     1206                                            || (pPageListInfo->aPages[0] & VMMDEV_PAGE_OFFSET_MASK) == pPageListInfo->offFirstPage,
    12071207                                            ("%#RX64 offFirstPage=%#x\n", pPageListInfo->aPages[0], pPageListInfo->offFirstPage),
    12081208                                            VERR_INVALID_POINTER);
    12091209                    uint32_t const cPages = pPageListInfo->cPages;
    12101210                    for (uint32_t iPage = 1; iPage < cPages; iPage++)
    1211                         ASSERT_GUEST_MSG_RETURN(!(pPageListInfo->aPages[iPage] & GUEST_PAGE_OFFSET_MASK),
     1211                        ASSERT_GUEST_MSG_RETURN(!(pPageListInfo->aPages[iPage] & VMMDEV_PAGE_OFFSET_MASK),
    12121212                                                ("[%#zx]=%#RX64\n", iPage, pPageListInfo->aPages[iPage]), VERR_INVALID_POINTER);
    12131213                    RT_UNTRUSTED_VALIDATED_FENCE();
  • trunk/src/VBox/Devices/VMMDev/VMMDevState.h

    r98103 r100185  
    268268    /** FLag whether CPU hotplug events are monitored */
    269269    bool                fCpuHotPlugEventsEnabled;
     270    /** Flag whether the VMM device is offering the request ports
     271     * over MMIO as well (mainly for ARM at the moment). */
     272    bool                fMmioReq;
    270273    /** Alignment padding. */
    271     bool                afPadding8[3];
     274    bool                afPadding8[2];
    272275    /** CPU hotplug event */
    273276    VMMDevCpuEventType  enmCpuHotPlugEvent;
     
    406409    /** Handle for the fast VMM request I/O port (PCI region \#0). */
    407410    IOMIOPORTHANDLE     hIoPortFast;
     411    /** Handle for the VMM request MMIO region (PCI region \#3). */
     412    IOMMMIOHANDLE       hMmioReq;
    408413    /** Handle for the VMMDev RAM (PCI region \#1). */
    409414    PGMMMIO2HANDLE      hMmio2VMMDevRAM;
     
    567572
    568573/** The saved state version. */
    569 #define VMMDEV_SAVED_STATE_VERSION                              VMMDEV_SAVED_STATE_VERSION_VMM_MOUSE_EXTENDED_DATA
     574#define VMMDEV_SAVED_STATE_VERSION                              VMMDEV_SAVED_STATE_VERSION_MMIO_ACCESS
     575/** Added support to optionally use MMIO instead of PIO for passing requests to the host (mainly for ARM). */
     576#define VMMDEV_SAVED_STATE_VERSION_MMIO_ACCESS                  20
    570577/** The saved state version with VMMDev mouse buttons state and wheel movement data. */
    571578#define VMMDEV_SAVED_STATE_VERSION_VMM_MOUSE_EXTENDED_DATA      19
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette