VirtualBox

Ignore:
Timestamp:
Dec 15, 2021 3:51:28 PM (3 years ago)
Author:
vboxsync
Message:

Improve transitional behavior, and save/load exec code. Some Rx buffer handling code optimization for speed, and make it easier to understand and maintain. Add missing function comments and improve others. Try to make debug logging even clearer and more succinct. And any other miscellaneous small improvements I could find. See BugRef(8651) Comment #171

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Devices/VirtIO/VirtioCore.h

    r92091 r92939  
    3636# define VIRTIO_HEX_DUMP(logLevel, pv, cb, base, title) do { } while (0)
    3737#endif
     38
     39/** Marks the start of the virtio saved state (just for sanity). */
     40#define VIRTIO_SAVEDSTATE_MARKER                        UINT64_C(0x1133557799bbddff)
    3841
    3942/** Pointer to the shared VirtIO state. */
     
    5760#define VIRTIO_PAGE_SIZE                 4096                    /**< Page size used by VirtIO specification   */
    5861
    59 
    60 /* Note: The VirtIO specification, particularly rev. 0.95, and clarified in rev 1.0 for transitional devices,
    61          says the page sized used for Queue Size calculations is usually 4096 bytes, but dependent on the
    62          the transport. In an appendix of the 0.95 spec, the 'mmio device', which has not been
    63          implemented by VBox legacy device in VirtualBox, says guest must report the page size. For now
    64          will set page size to a static 4096 based on the original VBox legacy VirtIO implementation which
    65          tied it to PAGE_SIZE which appears to work (or at least good enough for most practical purposes)      */
    66 
    67 
    68 /** The following virtioCoreGCPhysChain*() functions mimic the functionality of the related RT s/g functions,
    69  *  except they work with the data type GCPhys rather than void *
     62/**
     63 * @todo Move the following virtioCoreGCPhysChain*() functions mimic the functionality of the related
     64 *       into some VirtualBox source tree common location and out of this code.
     65 *
     66 *       They behave identically to the S/G utilities in the RT library, except they work with that
     67 *       GCPhys data type specifically instead of void *, to avoid potentially disastrous mismatch
     68 *       between sizeof(void *) and sizeof(GCPhys).
     69 *
    7070 */
    7171typedef struct VIRTIOSGSEG                                      /**< An S/G entry                              */
     
    9191
    9292/**
    93  * VirtIO buffers are descriptor chains (scatter-gather vectors). Each buffer is described
    94  * by the index of its head descriptor, which in optionally chains to another descriptor
    95  * and so on.
    96  *
    97  * Each descriptor, [len, GCPhys] pair in the chain represents either an OUT segment (e.g. guest-to-host)
    98  * or an IN segment (host-to-guest). A VIRTQBUF is created and retured from a call to
    99  * virtioCoreR3VirtqAvailBufPeek() or virtioCoreR3VirtqAvailBufGet(). That function consolodates
    100  * the VirtIO descriptor chain into a representation, where pSgPhysSend is a GCPhys s/g buffer containing
    101  * all of the OUT descriptors and pSgPhysReturn is a GCPhys s/g buffer containing all of IN descriptors
    102  * to be filled with data on the host to return to theguest.
     93 * VirtIO buffers are descriptor chains (e.g. scatter-gather vectors). A VirtIO buffer is referred to by the index
     94 * of its head descriptor. Each descriptor optionally chains to another descriptor, and so on.
     95 *
     96 * For any given descriptor, each length and GCPhys pair in the chain represents either an OUT segment (e.g. guest-to-host)
     97 * or an IN segment (host-to-guest).
     98 *
     99 * A VIRTQBUF is created and retured from a call to to either virtioCoreR3VirtqAvailBufPeek() or virtioCoreR3VirtqAvailBufGet().
     100 *
     101 * Those functions consolidate the VirtIO descriptor chain into a single representation where:
     102 *
     103 *     pSgPhysSend    GCPhys s/g buffer containing all of the (VirtIO) OUT descriptors
     104 *     pSgPhysReturn  GCPhys s/g buffer containing all of the (VirtIO)  IN descriptors
     105 *
     106 * The OUT descriptors are data sent from guest to host (dev-specific commands and/or data)
     107 * The IN are to be filled with data (converted to physical) on host, to be returned to guest
     108 *
    103109 */
    104110typedef struct VIRTQBUF
     
    166172static const VIRTIO_FEATURES_LIST s_aCoreFeatures[] =
    167173{
     174    { VIRTIO_F_VERSION_1,               "   VERSION_1            Guest driver supports VirtIO specification V1.0+ (e.g. \"modern\")\n" },
     175    { VIRTIO_F_RING_EVENT_IDX,          "   RING_EVENT_IDX       Enables use_event and avail_event fields described in 2.4.7, 2.4.8\n" },
    168176    { VIRTIO_F_RING_INDIRECT_DESC,      "   RING_INDIRECT_DESC   Driver can use descriptors with VIRTQ_DESC_F_INDIRECT flag set\n" },
    169     { VIRTIO_F_RING_EVENT_IDX,          "   RING_EVENT_IDX       Enables use_event and avail_event fields described in 2.4.7, 2.4.8\n" },
    170     { VIRTIO_F_VERSION_1,               "   VERSION              Used to detect legacy drivers.\n" },
    171177};
    172 
    173178
    174179#define VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED ( 0 )            /**< TBD: Add VIRTIO_F_INDIRECT_DESC           */
     
    202207    kvirtIoVmStateChangedFor32BitHack = 0x7fffffff
    203208} VIRTIOVMSTATECHANGED;
    204 
    205 
    206209
    207210/** @def Virtio Device PCI Capabilities type codes */
     
    305308typedef struct VIRTQUEUE
    306309{
    307     RTGCPHYS                    GCPhysVirtqDesc;                  /**< (MMIO) PhysAdr per-Q desc structs   GUEST */
    308     RTGCPHYS                    GCPhysVirtqAvail;                 /**< (MMIO) PhysAdr per-Q avail structs  GUEST */
    309     RTGCPHYS                    GCPhysVirtqUsed;                  /**< (MMIO) PhysAdr per-Q used structs   GUEST */
    310     uint16_t                    uMsixVector;                      /**< (MMIO) Per-queue vector for MSI-X   GUEST */
    311     uint16_t                    uEnable;                          /**< (MMIO) Per-queue enable             GUEST */
    312     uint16_t                    uNotifyOffset;                    /**< (MMIO) per-Q notify offset          HOST */
    313     uint16_t                    uQueueSize;                       /**< (MMIO) Per-queue size          HOST/GUEST */
     310    RTGCPHYS                    GCPhysVirtqDesc;                  /**< (MMIO) Addr of virtq's desc  ring   GUEST */
     311    RTGCPHYS                    GCPhysVirtqAvail;                 /**< (MMIO) Addr of virtq's avail ring   GUEST */
     312    RTGCPHYS                    GCPhysVirtqUsed;                  /**< (MMIO) Addr of virtq's used  ring   GUEST */
     313    uint16_t                    uMsixVector;                      /**< (MMIO) MSI-X vector                 GUEST */
     314    uint16_t                    uEnable;                          /**< (MMIO) Queue enable flag            GUEST */
     315    uint16_t                    uNotifyOffset;                    /**< (MMIO) Notification offset for queue HOST */
     316    uint16_t                    uQueueSize;                       /**< (MMIO) Size of queue           HOST/GUEST */
    314317    uint16_t                    uAvailIdxShadow;                  /**< Consumer's position in avail ring         */
    315318    uint16_t                    uUsedIdxShadow;                   /**< Consumer's position in used ring          */
     
    317320    char                        szName[32];                       /**< Dev-specific name of queue                */
    318321    bool                        fUsedRingEvent;                   /**< Flags if used idx to notify guest reached */
    319     uint8_t                     padding[3];
     322    bool                        fAttached;                        /**< Flags if dev-specific client attached     */
    320323} VIRTQUEUE, *PVIRTQUEUE;
    321324
     
    331334    uint64_t                    uDeviceFeatures;                  /**< (MMIO) Host features offered         HOST */
    332335    uint64_t                    uDriverFeatures;                  /**< (MMIO) Host features accepted       GUEST */
     336    uint32_t                    fDriverFeaturesWritten;           /**< (MMIO) Host features complete tracking    */
    333337    uint32_t                    uDeviceFeaturesSelect;            /**< (MMIO) hi/lo select uDeviceFeatures GUEST */
    334338    uint32_t                    uDriverFeaturesSelect;            /**< (MMIO) hi/lo select uDriverFeatures GUEST */
     
    343347    uint8_t                     fMsiSupport;                      /**< Flag set if using MSI instead of ISR      */
    344348    uint16_t                    uVirtqSelect;                     /**< (MMIO) queue selector               GUEST */
    345     uint32_t                    fLegacyDriver;                    /**< Set if guest driver < VirtIO 1.0          */
     349    uint32_t                    fLegacyDriver;                    /**< Set if guest drv < VirtIO 1.0 and allowed */
     350    uint32_t                    fOfferLegacy;                     /**< Set at init call from dev-specific code   */
    346351
    347352    /** @name The locations of the capability structures in PCI config space and the BAR.
     
    354359    /** @} */
    355360
    356 
    357 
    358361    IOMMMIOHANDLE               hMmioPciCap;                      /**< MMIO handle of PCI cap. region (\#2)      */
    359362    IOMIOPORTHANDLE             hLegacyIoPorts;                   /**< Handle of legacy I/O port range.          */
    360 
    361363
    362364#ifdef VBOX_WITH_STATISTICS
     
    374376    STAMPROFILEADV              StatWriteRC;                       /** I/O port and MMIO R3 Write profiling      */
    375377#endif
    376 
    377 
    378378    /** @} */
     379
    379380} VIRTIOCORE;
    380381
     
    389390     * @{  */
    390391    /**
    391      * Implementation-specific client callback to report VirtIO version as modern or legacy.
    392      * That's the only meaningful distinction in the VirtIO specification. Beyond that
    393      * versioning is loosely discernable through feature negotiation. There will be two callbacks,
    394      * the first indicates the guest driver is considered legacy VirtIO, as it is critical to
    395      * assume that initially. A 2nd callback will occur during feature negotiation
    396      * which will indicate the guest is modern, if the guest acknowledges VIRTIO_F_VERSION_1,
    397      * feature, or legacy if the feature isn't negotiated. That 2nd callback allows
    398      * the device-specific code to configure its behavior in terms of both guest version and features.
     392     * Implementation-specific client callback to report VirtIO when feature negotiation is
     393     * complete. It should be invoked by the VirtIO core only once.
    399394     *
    400      * @param   pVirtio    Pointer to the shared virtio state.
    401      * @param   fModern    True if guest driver identified itself as modern (e.g. VirtIO 1.0 featured)
     395     * @param   pVirtio           Pointer to the shared virtio state.
     396     * @param   fDriverFeatures   Bitmask of features the guest driver has accepted/declined.
     397     * @param   fLegacy           true if legacy mode offered and until guest driver identifies itself
     398     *                            as modern(e.g. VirtIO 1.0 featured)
    402399     */
    403     DECLCALLBACKMEMBER(void, pfnGuestVersionHandler,(PVIRTIOCORE pVirtio, uint32_t fModern));
     400    DECLCALLBACKMEMBER(void, pfnFeatureNegotiationComplete, (PVIRTIOCORE pVirtio, uint64_t fDriverFeatures, uint32_t fLegacy));
    404401
    405402    /**
     
    436433    DECLCALLBACKMEMBER(int, pfnDevCapWrite,(PPDMDEVINS pDevIns, uint32_t offCap, const void *pvBuf, uint32_t cbWrite));
    437434
    438 
    439435    /**
    440436     * When guest-to-host queue notifications are enabled, the guest driver notifies the host
     
    469465{
    470466    /**
    471      * When guest-to-host queue notifications are enabled, the guest driver notifies the host
    472      * that the avail queue has buffers, and this callback informs the client.
     467     * This callback notifies the device-specific portion of this device implementation (if guest-to-host
     468     * queue notifications are enabled), that the guest driver has notified the host (this device)
     469     * that the VirtIO "avail" ring of a queue has some new s/g buffers added by the guest VirtIO driver.
    473470     *
    474471     * @param   pVirtio    Pointer to the shared virtio state.
     
    488485} VIRTIOCORERC;
    489486
    490 
    491487/** @typedef VIRTIOCORECC
    492488 * The instance data for the current context. */
    493489typedef CTX_SUFF(VIRTIOCORE) VIRTIOCORECC;
    494490
    495 
    496491/** @name API for VirtIO parent device
    497492 * @{ */
     
    502497 * This should be called from PDMDEVREGR3::pfnConstruct.
    503498 *
    504  * @param   pDevIns                 The device instance.
     499 * @param   pDevIns                 Device instance.
    505500 * @param   pVirtio                 Pointer to the shared virtio state.  This
    506501 *                                  must be the first member in the shared
     
    519514int virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC,
    520515                          PVIRTIOPCIPARAMS pPciParams, const char *pcszInstance,
    521                           uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg);
    522 
     516                          uint64_t fDevSpecificFeatures, uint32_t fOfferLegacy, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg);
    523517/**
    524518 * Initiate orderly reset procedure. This is an exposed API for clients that might need it.
     
    532526 * 'Attaches' host device-specific implementation's queue state to host VirtIO core
    533527 * virtqueue management infrastructure, informing the virtio core of the name of the
    534  * queue associated with the queue number. uVirtqNbr is used as the 'handle' for virt queues
    535  * in this API (and is opaquely the index into the VirtIO core's array of queue state).
    536  *
    537  * Virtqueue numbers are VirtIO specification defined (i.e. they are unique within each
    538  * VirtIO device type).
     528 * queue to associate with the queue number.
     529
     530 * Note: uVirtqNbr (ordinal index) is used as the 'handle' for virtqs in this VirtioCore
     531 * implementation's API (as an opaque selector into the VirtIO core's array of queues' states).
     532 *
     533 * Virtqueue numbers are actually VirtIO-specification defined device-specifically
     534 * (i.e. they are unique within each VirtIO device type), but are in some cases scalable
     535 * so only the pattern of queue numbers is defined by the spec and implementations may contain
     536 * a self-determined plurality of queues.
    539537 *
    540538 * @param   pVirtio     Pointer to the shared virtio state.
     
    547545
    548546/**
    549  * Enables or disables a virtq
     547 * Detaches host device-specific implementation's queue state from the host VirtIO core
     548 * virtqueue management infrastructure, informing the VirtIO core that the queue is
     549 * not utilized by the device-specific code.
    550550 *
    551551 * @param   pVirtio     Pointer to the shared virtio state.
    552552 * @param   uVirtqNbr   Virtq number
    553  * @param   fEnable     Flags whether to enable or disable the virtq
    554  *
    555  */
    556 void  virtioCoreVirtqEnable(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, bool fEnable);
     553 * @param   pcszName    Name to give queue
     554 *
     555 * @returns VBox status code.
     556 */
     557int  virtioCoreR3VirtqDetach(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
     558
     559/**
     560 * Checks to see whether queue is attached to core.
     561 *
     562 * @param   pVirtio     Pointer to the shared virtio state.
     563 * @param   uVirtqNbr   Virtq number
     564 *
     565 * Returns boolean true or false indicating whether dev-specific reflection
     566 * of queue is attached to core.
     567 */
     568bool  virtioCoreR3VirtqIsAttached(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
     569
     570/**
     571 * Checks to see whether queue is enabled.
     572 *
     573 * @param   pVirtio     Pointer to the shared virtio state.
     574 * @param   uVirtqNbr   Virtq number
     575 *
     576 * Returns boolean true or false indicating core queue enable state.
     577 * There is no API function to enable the queue, because the actual enabling is handled
     578 * by the guest via MMIO.
     579 *
     580 * NOTE: Guest VirtIO driver's claim over this state is overridden (which violates VirtIO 1.0 spec
     581 * in a carefully controlled manner) in the case where the queue MUST be disabled, due to observed
     582 * control queue corruption (e.g. null GCPhys virtq base addr) while restoring legacy-only device's
     583 * (DevVirtioNet.cpp) as a way to flag that the queue is unusable-as-saved and must to be removed.
     584 * That is all handled in the load/save exec logic. Device reset could potentially, depending on
     585 * parameters passed from host VirtIO device to guest VirtIO driver, result in guest re-establishing
     586 * queue, except, in that situation, the queue operational state would be valid.
     587 */
     588bool  virtioCoreR3VirtqIsEnabled(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
    557589
    558590/**
    559591 * Enable or disable notification for the specified queue.
    560592 *
    561  * With notification enabled, the guest driver notifies the host device (via MMIO
    562  * to the queue notification offset describe in VirtIO 1.0, 4.1.4.4 "Notification Structure Layout")
    563  * whenever the guest driver adds a new entry to the avail ring of the respective queue.
    564  *
    565  * Note: In the VirtIO world, the device sets flags in the used ring to communicate to the driver how to
    566  * handle notifications for the avail ring and the drivers sets flags in the avail ring to communicate
    567  * to the device how to handle sending interrupts for the used ring.
     593 * When queue notifications are enabled, the guest VirtIO driver notifies host VirtIO device
     594 * (via MMIO, see VirtIO 1.0, 4.1.4.4 "Notification Structure Layout") whenever guest driver adds
     595 * a new s/g buffer to the "avail" ring of the queue.
     596 *
     597 * Note: VirtIO queue layout includes flags the device controls in "used" ring to inform guest
     598 * driver if it should notify host of guest's buffer additions to the "avail" ring, and
     599 * conversely, the guest driver sets flags in the "avail" ring to communicate to host device
     600 * whether or not to interrupt guest when it adds buffers to used ring.
    568601 *
    569602 * @param   pVirtio     Pointer to the shared virtio state.
     
    581614
    582615/**
    583  * Displays the VirtIO spec-related features offered and their accepted/declined status
    584  * by both the VirtIO core and dev-specific device code (which invokes this function).
    585  * The result is a comprehensive list of available features the VirtIO specification
    586  * defines, which ones were actually offered by the device, and which ones were accepted
    587  * by the guest driver, thus providing a legible summary view of the configuration
    588  * the device is operating with.
    589  *
     616 * Displays a well-formated human-readable translation of otherwise inscrutable bitmasks
     617 * that embody features VirtIO specification definitions, indicating: Totality of features
     618 * that can be implemented by host and guest, which features were offered by the host, and
     619 * which were actually accepted by the guest. It displays it as a summary view of the device's
     620 * finalized operational state (host-guest negotiated architecture) in such a way that shows
     621 * which options are available for implementing or enabling.
     622 *
     623 * The non-device-specific VirtIO features list are managed by core API (e.g. implied).
     624 * Only dev-specific features must be passed as parameter.
     625
    590626 * @param   pVirtio     Pointer to the shared virtio state.
    591627 * @param   pHlp        Pointer to the debug info hlp struct
    592  * @param   s_aDevSpecificFeatures
    593  *                      Features specification lists for device-specific implementation
    594  *                      (i.e: net controller, scsi controller ...)
     628 * @param   s_aDevSpecificFeatures  Dev-specific features (virtio-net, virtio-scsi...)
    595629 * @param   cFeatures   Number of features in aDevSpecificFeatures
    596630 */
     
    599633
    600634/*
    601  * Debuging assist feature displays the state of the VirtIO core code, which includes
     635 * Debug-assist utility function to display state of the VirtIO core code, including
    602636 * an overview of the state of all of the queues.
    603637 *
     
    608642 *
    609643 * This is implemented currently to be invoked by the inheriting device-specific code
    610  * (see DevVirtioNet for an example, which receives the debugvm callback directly).
    611  * DevVirtioNet lists the available sub-options if no arguments are provided. In that
     644 * (see the the VirtualBox virtio-net (VirtIO network controller device implementation)
     645 * for an example of code that receive debugvm callback directly).
     646 *
     647 * DevVirtioNet lists available sub-options if no arguments are provided. In that
    612648 * example this virtq info related function is invoked hierarchically when virtio-net
    613649 * displays its device-specific queue info.
     
    629665
    630666/**
    631  * This function is identical to virtioCoreR3VirtqAvailBufGet(), except it doesn't 'consume'
    632  * the buffer from the avail ring of the virtq. The peek operation becomes identical to a get
    633  * operation if virtioCoreR3VirtqAvailRingNext() is called to consume the buffer from the avail ring,
    634  * at which point virtioCoreR3VirtqUsedBufPut() must be called to complete the roundtrip
    635  * transaction by putting the descriptor on the used ring.
    636  *
     667 * This function is identical to virtioCoreR3VirtqAvailBufGet(), *except* it doesn't consume
     668 * peeked buffer from avail ring of the virtq. The function *becomes* identical to the
     669 * virtioCoreR3VirtqAvailBufGet() only if virtioCoreR3VirtqAvailRingNext() is invoked to
     670 * consume buf from the queue's avail ring, followed by invocation of virtioCoreR3VirtqUsedBufPut(),
     671 * to hand host-processed buffer back to guest, which completes guest-initiated virtq buffer circuit.
    637672 *
    638673 * @param   pDevIns     The device instance.
     
    652687/**
    653688 * This function fetches the next buffer (descriptor chain) from the VirtIO "avail" ring of
    654  * indicated queue, and converts the buf's s/g vectors into OUT (e.g. guest-to-host)
     689 * indicated queue, separating the buf's s/g vectors into OUT (e.g. guest-to-host)
    655690 * components and and IN (host-to-guest) components.
    656691 *
    657  * The caller is responsible for GCPhys to host virtual memory conversions. If the
     692 * Caller is responsible for GCPhys to host virtual memory conversions. If the
    658693 * virtq buffer being peeked at is "consumed", virtioCoreR3VirtqAvailRingNext() must
    659  * be called and in that case virtioCoreR3VirtqUsedBufPut() must be called to
    660  * complete the roundtrip virtq transaction.
     694 * be called, and after that virtioCoreR3VirtqUsedBufPut() must be called to
     695 * complete the buffer transfer cycle with the guest.
    661696 *
    662697 * @param   pDevIns     The device instance.
     
    678713
    679714/**
    680  * Fetches a specific descriptor chain using avail ring of indicated queue and converts the descriptor
    681  * chain into its OUT (to device) and IN (to guest) components.
     715 * Fetches a specific descriptor chain using avail ring of indicated queue and converts the
     716 * descriptor chain into its OUT (to device) and IN (to guest) components.
    682717 *
    683718 * The caller is responsible for GCPhys to host virtual memory conversions and *must*
     
    704739/**
    705740 * Returns data to the guest to complete a transaction initiated by virtioCoreR3VirtqAvailBufGet(),
    706  * or virtioCoreR3VirtqAvailBufPeek()/virtioCoreR3VirtqBufSync() call pairs to complete each
    707  * intervening a roundtrip transaction, ultimately putting each descriptor chain pulled from the
    708  * avail ring of a queue onto the used ring of the queue. wherein I/O transactions are always
    709  * initiated by the guest and completed by the host. In other words, for the host to send any
    710  * data to the guest, the guest must provide buffers, for the host to fill, via the avail ring
    711  * of the virtq.
     741 * (or virtioCoreR3VirtqAvailBufPeek()/virtioCoreR3VirtqBufSync() call pair), to complete each
     742 * buffer transfer transaction (guest-host buffer cycle), ultimately moving each descriptor chain
     743 * from the avail ring of a queue onto the used ring of the queue. Note that VirtIO buffer
     744 * transactions are *always* initiated by the guest and completed by the host. In other words,
     745 * for the host to send any I/O related data to the guest (and in some cases configuration data),
     746 * the guest must provide buffers via the virtq's avail ring, for the host to fill.
    712747 *
    713748 * At some some point virtioCoreR3VirtqUsedRingSync() must be called to return data to the guest,
    714  * completing all pending virtioCoreR3VirtqAvailBufPut() transactions that have accumulated since
    715  * the last call to virtioCoreR3VirtqUsedRingSync()
    716 
    717  * @note This does a write-ahead to the used ring of the guest's queue. The data
    718  *       written won't be seen by the guest until the next call to virtioCoreVirtqUsedRingSync()
    719  *
     749 * completing all pending virtioCoreR3VirtqAvailBufPut() operations that have accumulated since
     750 * the last call to virtioCoreR3VirtqUsedRingSync().
     751
     752 * @note This function effectively performs write-ahead to the used ring of the virtq.
     753 *       Data written won't be seen by the guest until the next call to virtioCoreVirtqUsedRingSync()
    720754 *
    721755 * @param   pDevIns         The device instance (for reading).
     
    729763 *                          buffer originally pulled from the queue.
    730764 *
    731  * @param   fFence          If true, put up copy fence (memory barrier) after
     765 * @param   fFence          If true (default), put up copy-fence (memory barrier) after
    732766 *                          copying to guest phys. mem.
    733767 *
     
    741775 */
    742776int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, PRTSGBUF pSgVirtReturn,
    743                                  PVIRTQBUF pVirtqBuf, bool fFence);
     777                                 PVIRTQBUF pVirtqBuf, bool fFence = true);
     778
     779
     780/**
     781 * Quicker variant of same-named function (directly above) that it overloads,
     782 * Instead, this variant accepts as input a pointer to a buffer and count,
     783 * instead of S/G buffer thus doesn't have to copy between two S/G buffers and avoids some overhead.
     784 *
     785 * @param   pDevIns         The device instance (for reading).
     786 * @param   pVirtio         Pointer to the shared virtio state.
     787 * @param   uVirtqNbr       Virtq number
     788 * @param   cb              Number of bytes to add to copy to phys. buf.
     789 * @param   pv              Virtual mem buf to copy to phys buf.
     790 * @param   cbEnqueue       How many bytes in packet to enqueue (0 = don't enqueue)
     791 * @param   fFence          If true (default), put up copy-fence (memory barrier) after
     792 *                          copying to guest phys. mem.
     793 *
     794 * @returns VBox status code.
     795 * @retval  VINF_SUCCESS       Success
     796 * @retval  VERR_INVALID_STATE VirtIO not in ready state
     797 * @retval  VERR_NOT_AVAILABLE Virtq is empty
     798 *
     799 * @note    This function will not release any reference to pVirtqBuf.  The
     800 *          caller must take care of that.
     801 */
     802int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, size_t cb, const void *pv,
     803                            PVIRTQBUF pVirtqBuf, uint32_t cbEnqueue, bool fFence = true);
     804
     805
    744806/**
    745807 * Advance index of avail ring to next entry in specified virtq (see virtioCoreR3VirtqAvailBufPeek())
     
    751813
    752814/**
    753  * Checks to see if guest has acknowledged device's VIRTIO_F_VERSION_1 feature.
    754  * If not, it's presumed to be a VirtIO legacy guest driver. Note that legacy drivers
    755  * may start using the device prematurely, as opposed to the rigorously sane protocol
    756  * prescribed by the "modern" VirtIO spec. Early access implies a legacy driver.
    757  * Therefore legacy mode is the assumption until feature negotiation.
     815 * Checks to see if guest has accepted host device's VIRTIO_F_VERSION_1 (i.e. "modern")
     816 * behavioral modeling, indicating guest agreed to comply with the modern VirtIO 1.0+ specification.
     817 * Otherwise unavoidable presumption is that the host device is dealing with legacy VirtIO
     818 * guest drive, thus must be prepared to cope with less mature architecture and behaviors
     819 * from  prototype era of VirtIO. (see comments in PDM-invoked device constructor for more information).
    758820 *
    759821 * @param   pVirtio      Pointer to the virtio state.
     
    761823int virtioCoreIsLegacyMode(PVIRTIOCORE pVirtio);
    762824
     825/**
     826 * This VirtIO transitional device supports "modern" (rev 1.0+) as well as "legacy" (e.g. < 1.0) VirtIO drivers.
     827 * Some legacy guest drivers are known to mishandle PCI bus mastering wherein the PCI flavor of GC phys
     828 * access functions can't be used. The following wrappers select the memory access method based on whether the
     829 * device is operating in legacy mode or not.
     830 */
     831DECLINLINE(int) virtioCoreGCPhysWrite(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbWrite)
     832{
     833    int rc;
     834    if (virtioCoreIsLegacyMode(pVirtio))
     835        rc = PDMDevHlpPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);
     836    else
     837        rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);
     838    return rc;
     839}
     840
     841DECLINLINE(int) virtioCoreGCPhysRead(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
     842{
     843    int rc;
     844    if (virtioCoreIsLegacyMode(pVirtio))
     845        rc = PDMDevHlpPhysRead(pDevIns, GCPhys, pvBuf, cbRead);
     846    else
     847        rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhys, pvBuf, cbRead);
     848    return rc;
     849}
     850
     851/*
     852 * (See comments for corresponding function in sg.h)
     853 */
    763854DECLINLINE(void) virtioCoreGCPhysChainInit(PVIRTIOSGBUF pGcSgBuf, PVIRTIOSGSEG paSegs, size_t cSegs)
    764855{
     
    782873}
    783874
     875/*
     876 * (See comments for corresponding function in sg.h)
     877 */
    784878DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainGet(PVIRTIOSGBUF pGcSgBuf, size_t *pcbData)
    785879{
     
    826920}
    827921
     922/*
     923 * (See comments for corresponding function in sg.h)
     924 */
    828925DECLINLINE(void) virtioCoreGCPhysChainReset(PVIRTIOSGBUF pGcSgBuf)
    829926{
     
    843940}
    844941
     942/*
     943 * (See comments for corresponding function in sg.h)
     944 */
    845945DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainAdvance(PVIRTIOSGBUF pGcSgBuf, size_t cbAdvance)
    846946{
     
    860960}
    861961
     962/*
     963 * (See comments for corresponding function in sg.h)
     964 */
    862965DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainGetNextSeg(PVIRTIOSGBUF pGcSgBuf, size_t *pcbSeg)
    863966{
     
    871974}
    872975
    873 DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PVIRTIOSGBUF pGcSgBuf)
     976/**
     977 * Calculate the length of a GCPhys s/g buffer by tallying the size of each segment.
     978 *
     979 * @param   pGcSgBuf        Guest Context (GCPhys) S/G buffer to calculate length of
     980 */
     981DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PCVIRTIOSGBUF pGcSgBuf)
    874982{
    875983    size_t   cb = 0;
    876984    unsigned i  = pGcSgBuf->cSegs;
    877      while (i-- > 0)
    878          cb += pGcSgBuf->paSegs[i].cbSeg;
    879      return cb;
    880  }
    881 
     985    while (i-- > 0)
     986        cb += pGcSgBuf->paSegs[i].cbSeg;
     987    return cb;
     988}
     989
     990/*
     991 * (See comments for corresponding function in sg.h)
     992 */
     993DECLINLINE(size_t) virtioCoreGCPhysChainCalcLengthLeft(PVIRTIOSGBUF pGcSgBuf)
     994{
     995    size_t   cb = pGcSgBuf->cbSegLeft;
     996    unsigned i  = pGcSgBuf->cSegs;
     997    while (i-- > pGcSgBuf->idxSeg + 1)
     998        cb += pGcSgBuf->paSegs[i].cbSeg;
     999    return cb;
     1000}
    8821001#define VIRTQNAME(a_pVirtio, a_uVirtq) ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName)
    8831002
    8841003/**
    885  * Add some bytes to a virtq (s/g) buffer, converting them from virtual memory to GCPhys
    886  *
    887  * To be performant it is left to the caller to validate the size of the buffer with regard
    888  * to data being pulled from it to avoid overruns/underruns.
     1004 * Convert and append bytes from a virtual-memory simple buffer to VirtIO guest's
     1005 * physical memory described by a buffer pulled form the avail ring of a virtq.
    8891006 *
    8901007 * @param   pVirtio     Pointer to the shared virtio state.
    891  * @param   pVirtqBuf   output: virtq buffer
     1008 * @param   pVirtqBuf   VirtIO buffer to fill
    8921009 * @param   pv          input: virtual memory buffer to receive bytes
    8931010 * @param   cb          number of bytes to add to the s/g buffer.
     
    8951012DECLINLINE(void) virtioCoreR3VirqBufFill(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf, void *pv, size_t cb)
    8961013{
    897     uint8_t *pb = (uint8_t *)pv;
    898     size_t cbLim = RT_MIN(pVirtqBuf->cbPhysReturn, cb);
    899     while (cbLim)
     1014    uint8_t *pvBuf = (uint8_t *)pv;
     1015    size_t cbRemain = cb, cbTotal = 0;
     1016    PVIRTIOSGBUF pSgPhysReturn = pVirtqBuf->pSgPhysReturn;
     1017    while (cbRemain)
    9001018    {
    901         size_t cbSeg = cbLim;
    902         RTGCPHYS GCPhys = virtioCoreGCPhysChainGetNextSeg(pVirtqBuf->pSgPhysReturn, &cbSeg);
    903         PDMDevHlpPCIPhysWrite(pVirtio->pDevInsR3, GCPhys, pb, cbSeg);
    904         pb += cbSeg;
    905         cbLim -= cbSeg;
    906         pVirtqBuf->cbPhysSend -= cbSeg;
     1019        uint32_t cbBounded = RT_MIN(pSgPhysReturn->cbSegLeft, cbRemain);
     1020        Assert(cbBounded > 0);
     1021        virtioCoreGCPhysWrite(pVirtio, CTX_SUFF(pVirtio->pDevIns), (RTGCPHYS)pSgPhysReturn->GCPhysCur, pvBuf, cbBounded);
     1022        virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbBounded);
     1023        pvBuf += cbBounded;
     1024        cbRemain -= cbBounded;
     1025        cbTotal += cbBounded;
    9071026    }
    908     LogFunc(("Added %d/%d bytes to %s buffer, head idx: %u (%d bytes remain)\n",
    909              cb - cbLim, cb, VIRTQNAME(pVirtio, pVirtqBuf->uVirtq),
    910              pVirtqBuf->uHeadIdx, pVirtqBuf->cbPhysReturn));
    911 }
    912 
    913 /**
    914  * Extract some bytes out of a virtq (s/g) buffer, converting them from GCPhys to virtual memory
    915  *
    916  * To be performant it is left to the caller to validate the size of the buffer with regard
    917  * to data being pulled from it to avoid overruns/underruns.
     1027    LogFunc(("Appended %d bytes to guest phys buf [head: %u]. %d bytes unused in buf.)\n",
     1028             cbTotal, pVirtqBuf->uHeadIdx, virtioCoreGCPhysChainCalcLengthLeft(pSgPhysReturn)));
     1029}
     1030
     1031/**
     1032 * Extract some bytes from of a virtq s/g buffer, converting them from GCPhys space to
     1033 * to ordinary virtual memory (i.e. making data directly accessible to host device code)
     1034 *
     1035 * As a performance optimization, it is left to the caller to validate buffer size.
    9181036 *
    9191037 * @param   pVirtio     Pointer to the shared virtio state.
     
    9371055    LogFunc(("Drained %d/%d bytes from %s buffer, head idx: %u (%d bytes left)\n",
    9381056             cb - cbLim, cb, VIRTQNAME(pVirtio, pVirtqBuf->uVirtq),
    939              pVirtqBuf->uHeadIdx, pVirtqBuf->cbPhysSend));
     1057             pVirtqBuf->uHeadIdx, virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn)));
    9401058}
    9411059
     
    10161134 * VirtIO implementation to identify this device's operational configuration after features
    10171135 * have been negotiated with guest VirtIO driver. Feature negotiation entails host indicating
    1018  * to guest which features it supports, then guest accepting among those offered which features
     1136 * to guest which features it supports, then guest accepting from among the offered, which features
    10191137 * it will enable. That becomes the agreement between the host and guest. The bitmask containing
    10201138 * virtio core features plus device-specific features is provided as a parameter to virtioCoreR3Init()
     
    10311149
    10321150/**
    1033  * Get the the name of the VM state change associated with the enumeration variable
     1151 * Get name of the VM state change associated with the enumeration variable
    10341152 *
    10351153 * @param enmState       VM state (enumeration value)
     
    10781196/**
    10791197 * Debug assist for any consumer device code
    1080 &
    10811198 * Do a hex dump of memory in guest physical context
    10821199 *
     
    10931210 */
    10941211
    1095 /**
    1096  * Calculate the length of a GCPhys s/g buffer by tallying the size of each segment.
    1097  *
    1098  * @param   pGcSgBuf        Guest Context (GCPhys) S/G buffer to calculate length of
    1099  */
    1100 DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PCVIRTIOSGBUF pGcSgBuf)
    1101 {
    1102     size_t   cb = 0;
    1103     unsigned i  = pGcSgBuf->cSegs;
    1104     while (i-- > 0)
    1105         cb += pGcSgBuf->paSegs[i].cbSeg;
    1106     return cb;
    1107 }
    1108 
    1109 /**
    1110  * This VirtIO transitional device supports "modern" (rev 1.0+) as well as "legacy" (e.g. < 1.0) VirtIO drivers.
    1111  * Some legacy guest drivers are known to mishandle PCI bus mastering wherein the PCI flavor of GC phys
    1112  * access functions can't be used. The following wrappers select the mem access method based on whether the
    1113  * device is operating in legacy mode or not.
    1114  */
    1115 DECLINLINE(int) virtioCoreGCPhysWrite(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbWrite)
    1116 {
    1117     int rc;
    1118     if (virtioCoreIsLegacyMode(pVirtio))
    1119         rc = PDMDevHlpPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);
    1120     else
    1121         rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);
    1122     return rc;
    1123 }
    1124 
    1125 DECLINLINE(int) virtioCoreGCPhysRead(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
    1126 {
    1127     int rc;
    1128     if (virtioCoreIsLegacyMode(pVirtio))
    1129         rc = PDMDevHlpPhysRead(pDevIns, GCPhys, pvBuf, cbRead);
    1130     else
    1131         rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhys, pvBuf, cbRead);
    1132     return rc;
    1133 }
    1134 
    11351212/** Misc VM and PDM boilerplate */
    1136 int      virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM);
    1137 int      virtioCoreR3LoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM);
     1213int      virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t cQueues);
     1214int      virtioCoreR3ModernDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uTestVersion, uint32_t cQueues);
     1215int      virtioCoreR3LegacyDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uVirtioLegacy_3_1_Beta);
    11381216void     virtioCoreR3VmStateChanged(PVIRTIOCORE pVirtio, VIRTIOVMSTATECHANGED enmState);
    11391217void     virtioCoreR3Term(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC);
     
    11471225 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
    11481226 */
    1149 
    11501227#ifdef LOG_ENABLED
    11511228
     
    12011278 * the memory described by cb and pv.
    12021279 *
    1203  * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
     1280 * cb, pv and fWrite are implicit parameters and must be defined by invoker.
    12041281 */
    12051282#define VIRTIO_DEV_CONFIG_ACCESS(member, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
     
    12161293/**
    12171294 * Copies bytes into memory described by cb, pv from the specified member field of the config struct.
    1218  * The operation is a nop and logs error if implied parameter fWrite is true.
     1295 * The operation is a NOP, logging an error if an implied parameter, fWrite, is boolean true.
    12191296 *
    12201297 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
     
    12371314 * the memory described by cb and pv.
    12381315 *
    1239  * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
     1316 * cb, pv and fWrite are implicit parameters and must be defined by invoker.
    12401317 */
    12411318#define VIRTIO_DEV_CONFIG_ACCESS_INDEXED(member, uIdx, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
     
    12541331 * The operation is a nop and logs error if implied parameter fWrite is true.
    12551332 *
    1256  * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
     1333 * cb, pv and fWrite are implicit parameters and must be defined by invoker.
    12571334 */
    12581335#define VIRTIO_DEV_CONFIG_ACCESS_INDEXED_READONLY(member, uidx, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette