Changeset 92939 in vbox for trunk/src/VBox/Devices/VirtIO/VirtioCore.h
- Timestamp:
- Dec 15, 2021 3:51:28 PM (3 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/VirtIO/VirtioCore.h
r92091 r92939 36 36 # define VIRTIO_HEX_DUMP(logLevel, pv, cb, base, title) do { } while (0) 37 37 #endif 38 39 /** Marks the start of the virtio saved state (just for sanity). */ 40 #define VIRTIO_SAVEDSTATE_MARKER UINT64_C(0x1133557799bbddff) 38 41 39 42 /** Pointer to the shared VirtIO state. */ … … 57 60 #define VIRTIO_PAGE_SIZE 4096 /**< Page size used by VirtIO specification */ 58 61 59 60 /* Note: The VirtIO specification, particularly rev. 0.95, and clarified in rev 1.0 for transitional devices, 61 says the page sized used for Queue Size calculations is usually 4096 bytes, but dependent on the 62 the transport. In an appendix of the 0.95 spec, the 'mmio device', which has not been 63 implemented by VBox legacy device in VirtualBox, says guest must report the page size. For now 64 will set page size to a static 4096 based on the original VBox legacy VirtIO implementation which 65 tied it to PAGE_SIZE which appears to work (or at least good enough for most practical purposes) */ 66 67 68 /** The following virtioCoreGCPhysChain*() functions mimic the functionality of the related RT s/g functions, 69 * except they work with the data type GCPhys rather than void * 62 /** 63 * @todo Move the following virtioCoreGCPhysChain*() functions mimic the functionality of the related 64 * into some VirtualBox source tree common location and out of this code. 65 * 66 * They behave identically to the S/G utilities in the RT library, except they work with that 67 * GCPhys data type specifically instead of void *, to avoid potentially disastrous mismatch 68 * between sizeof(void *) and sizeof(GCPhys). 69 * 70 70 */ 71 71 typedef struct VIRTIOSGSEG /**< An S/G entry */ … … 91 91 92 92 /** 93 * VirtIO buffers are descriptor chains (scatter-gather vectors). Each buffer is described 94 * by the index of its head descriptor, which in optionally chains to another descriptor 95 * and so on. 96 * 97 * Each descriptor, [len, GCPhys] pair in the chain represents either an OUT segment (e.g. guest-to-host) 98 * or an IN segment (host-to-guest). A VIRTQBUF is created and retured from a call to 99 * virtioCoreR3VirtqAvailBufPeek() or virtioCoreR3VirtqAvailBufGet(). That function consolodates 100 * the VirtIO descriptor chain into a representation, where pSgPhysSend is a GCPhys s/g buffer containing 101 * all of the OUT descriptors and pSgPhysReturn is a GCPhys s/g buffer containing all of IN descriptors 102 * to be filled with data on the host to return to theguest. 93 * VirtIO buffers are descriptor chains (e.g. scatter-gather vectors). A VirtIO buffer is referred to by the index 94 * of its head descriptor. Each descriptor optionally chains to another descriptor, and so on. 95 * 96 * For any given descriptor, each length and GCPhys pair in the chain represents either an OUT segment (e.g. guest-to-host) 97 * or an IN segment (host-to-guest). 98 * 99 * A VIRTQBUF is created and retured from a call to to either virtioCoreR3VirtqAvailBufPeek() or virtioCoreR3VirtqAvailBufGet(). 100 * 101 * Those functions consolidate the VirtIO descriptor chain into a single representation where: 102 * 103 * pSgPhysSend GCPhys s/g buffer containing all of the (VirtIO) OUT descriptors 104 * pSgPhysReturn GCPhys s/g buffer containing all of the (VirtIO) IN descriptors 105 * 106 * The OUT descriptors are data sent from guest to host (dev-specific commands and/or data) 107 * The IN are to be filled with data (converted to physical) on host, to be returned to guest 108 * 103 109 */ 104 110 typedef struct VIRTQBUF … … 166 172 static const VIRTIO_FEATURES_LIST s_aCoreFeatures[] = 167 173 { 174 { VIRTIO_F_VERSION_1, " VERSION_1 Guest driver supports VirtIO specification V1.0+ (e.g. \"modern\")\n" }, 175 { VIRTIO_F_RING_EVENT_IDX, " RING_EVENT_IDX Enables use_event and avail_event fields described in 2.4.7, 2.4.8\n" }, 168 176 { VIRTIO_F_RING_INDIRECT_DESC, " RING_INDIRECT_DESC Driver can use descriptors with VIRTQ_DESC_F_INDIRECT flag set\n" }, 169 { VIRTIO_F_RING_EVENT_IDX, " RING_EVENT_IDX Enables use_event and avail_event fields described in 2.4.7, 2.4.8\n" },170 { VIRTIO_F_VERSION_1, " VERSION Used to detect legacy drivers.\n" },171 177 }; 172 173 178 174 179 #define VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED ( 0 ) /**< TBD: Add VIRTIO_F_INDIRECT_DESC */ … … 202 207 kvirtIoVmStateChangedFor32BitHack = 0x7fffffff 203 208 } VIRTIOVMSTATECHANGED; 204 205 206 209 207 210 /** @def Virtio Device PCI Capabilities type codes */ … … 305 308 typedef struct VIRTQUEUE 306 309 { 307 RTGCPHYS GCPhysVirtqDesc; /**< (MMIO) PhysAdr per-Q desc structsGUEST */308 RTGCPHYS GCPhysVirtqAvail; /**< (MMIO) PhysAdr per-Q avail structsGUEST */309 RTGCPHYS GCPhysVirtqUsed; /**< (MMIO) PhysAdr per-Q used structsGUEST */310 uint16_t uMsixVector; /**< (MMIO) Per-queue vector for MSI-XGUEST */311 uint16_t uEnable; /**< (MMIO) Per-queue enableGUEST */312 uint16_t uNotifyOffset; /**< (MMIO) per-Q notify offsetHOST */313 uint16_t uQueueSize; /**< (MMIO) Per-queue sizeHOST/GUEST */310 RTGCPHYS GCPhysVirtqDesc; /**< (MMIO) Addr of virtq's desc ring GUEST */ 311 RTGCPHYS GCPhysVirtqAvail; /**< (MMIO) Addr of virtq's avail ring GUEST */ 312 RTGCPHYS GCPhysVirtqUsed; /**< (MMIO) Addr of virtq's used ring GUEST */ 313 uint16_t uMsixVector; /**< (MMIO) MSI-X vector GUEST */ 314 uint16_t uEnable; /**< (MMIO) Queue enable flag GUEST */ 315 uint16_t uNotifyOffset; /**< (MMIO) Notification offset for queue HOST */ 316 uint16_t uQueueSize; /**< (MMIO) Size of queue HOST/GUEST */ 314 317 uint16_t uAvailIdxShadow; /**< Consumer's position in avail ring */ 315 318 uint16_t uUsedIdxShadow; /**< Consumer's position in used ring */ … … 317 320 char szName[32]; /**< Dev-specific name of queue */ 318 321 bool fUsedRingEvent; /**< Flags if used idx to notify guest reached */ 319 uint8_t padding[3];322 bool fAttached; /**< Flags if dev-specific client attached */ 320 323 } VIRTQUEUE, *PVIRTQUEUE; 321 324 … … 331 334 uint64_t uDeviceFeatures; /**< (MMIO) Host features offered HOST */ 332 335 uint64_t uDriverFeatures; /**< (MMIO) Host features accepted GUEST */ 336 uint32_t fDriverFeaturesWritten; /**< (MMIO) Host features complete tracking */ 333 337 uint32_t uDeviceFeaturesSelect; /**< (MMIO) hi/lo select uDeviceFeatures GUEST */ 334 338 uint32_t uDriverFeaturesSelect; /**< (MMIO) hi/lo select uDriverFeatures GUEST */ … … 343 347 uint8_t fMsiSupport; /**< Flag set if using MSI instead of ISR */ 344 348 uint16_t uVirtqSelect; /**< (MMIO) queue selector GUEST */ 345 uint32_t fLegacyDriver; /**< Set if guest driver < VirtIO 1.0 */ 349 uint32_t fLegacyDriver; /**< Set if guest drv < VirtIO 1.0 and allowed */ 350 uint32_t fOfferLegacy; /**< Set at init call from dev-specific code */ 346 351 347 352 /** @name The locations of the capability structures in PCI config space and the BAR. … … 354 359 /** @} */ 355 360 356 357 358 361 IOMMMIOHANDLE hMmioPciCap; /**< MMIO handle of PCI cap. region (\#2) */ 359 362 IOMIOPORTHANDLE hLegacyIoPorts; /**< Handle of legacy I/O port range. */ 360 361 363 362 364 #ifdef VBOX_WITH_STATISTICS … … 374 376 STAMPROFILEADV StatWriteRC; /** I/O port and MMIO R3 Write profiling */ 375 377 #endif 376 377 378 378 /** @} */ 379 379 380 } VIRTIOCORE; 380 381 … … 389 390 * @{ */ 390 391 /** 391 * Implementation-specific client callback to report VirtIO version as modern or legacy. 392 * That's the only meaningful distinction in the VirtIO specification. Beyond that 393 * versioning is loosely discernable through feature negotiation. There will be two callbacks, 394 * the first indicates the guest driver is considered legacy VirtIO, as it is critical to 395 * assume that initially. A 2nd callback will occur during feature negotiation 396 * which will indicate the guest is modern, if the guest acknowledges VIRTIO_F_VERSION_1, 397 * feature, or legacy if the feature isn't negotiated. That 2nd callback allows 398 * the device-specific code to configure its behavior in terms of both guest version and features. 392 * Implementation-specific client callback to report VirtIO when feature negotiation is 393 * complete. It should be invoked by the VirtIO core only once. 399 394 * 400 * @param pVirtio Pointer to the shared virtio state. 401 * @param fModern True if guest driver identified itself as modern (e.g. VirtIO 1.0 featured) 395 * @param pVirtio Pointer to the shared virtio state. 396 * @param fDriverFeatures Bitmask of features the guest driver has accepted/declined. 397 * @param fLegacy true if legacy mode offered and until guest driver identifies itself 398 * as modern(e.g. VirtIO 1.0 featured) 402 399 */ 403 DECLCALLBACKMEMBER(void, pfn GuestVersionHandler,(PVIRTIOCORE pVirtio, uint32_t fModern));400 DECLCALLBACKMEMBER(void, pfnFeatureNegotiationComplete, (PVIRTIOCORE pVirtio, uint64_t fDriverFeatures, uint32_t fLegacy)); 404 401 405 402 /** … … 436 433 DECLCALLBACKMEMBER(int, pfnDevCapWrite,(PPDMDEVINS pDevIns, uint32_t offCap, const void *pvBuf, uint32_t cbWrite)); 437 434 438 439 435 /** 440 436 * When guest-to-host queue notifications are enabled, the guest driver notifies the host … … 469 465 { 470 466 /** 471 * When guest-to-host queue notifications are enabled, the guest driver notifies the host 472 * that the avail queue has buffers, and this callback informs the client. 467 * This callback notifies the device-specific portion of this device implementation (if guest-to-host 468 * queue notifications are enabled), that the guest driver has notified the host (this device) 469 * that the VirtIO "avail" ring of a queue has some new s/g buffers added by the guest VirtIO driver. 473 470 * 474 471 * @param pVirtio Pointer to the shared virtio state. … … 488 485 } VIRTIOCORERC; 489 486 490 491 487 /** @typedef VIRTIOCORECC 492 488 * The instance data for the current context. */ 493 489 typedef CTX_SUFF(VIRTIOCORE) VIRTIOCORECC; 494 490 495 496 491 /** @name API for VirtIO parent device 497 492 * @{ */ … … 502 497 * This should be called from PDMDEVREGR3::pfnConstruct. 503 498 * 504 * @param pDevIns The device instance.499 * @param pDevIns Device instance. 505 500 * @param pVirtio Pointer to the shared virtio state. This 506 501 * must be the first member in the shared … … 519 514 int virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, 520 515 PVIRTIOPCIPARAMS pPciParams, const char *pcszInstance, 521 uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg); 522 516 uint64_t fDevSpecificFeatures, uint32_t fOfferLegacy, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg); 523 517 /** 524 518 * Initiate orderly reset procedure. This is an exposed API for clients that might need it. … … 532 526 * 'Attaches' host device-specific implementation's queue state to host VirtIO core 533 527 * virtqueue management infrastructure, informing the virtio core of the name of the 534 * queue associated with the queue number. uVirtqNbr is used as the 'handle' for virt queues 535 * in this API (and is opaquely the index into the VirtIO core's array of queue state). 536 * 537 * Virtqueue numbers are VirtIO specification defined (i.e. they are unique within each 538 * VirtIO device type). 528 * queue to associate with the queue number. 529 530 * Note: uVirtqNbr (ordinal index) is used as the 'handle' for virtqs in this VirtioCore 531 * implementation's API (as an opaque selector into the VirtIO core's array of queues' states). 532 * 533 * Virtqueue numbers are actually VirtIO-specification defined device-specifically 534 * (i.e. they are unique within each VirtIO device type), but are in some cases scalable 535 * so only the pattern of queue numbers is defined by the spec and implementations may contain 536 * a self-determined plurality of queues. 539 537 * 540 538 * @param pVirtio Pointer to the shared virtio state. … … 547 545 548 546 /** 549 * Enables or disables a virtq 547 * Detaches host device-specific implementation's queue state from the host VirtIO core 548 * virtqueue management infrastructure, informing the VirtIO core that the queue is 549 * not utilized by the device-specific code. 550 550 * 551 551 * @param pVirtio Pointer to the shared virtio state. 552 552 * @param uVirtqNbr Virtq number 553 * @param fEnable Flags whether to enable or disable the virtq 554 * 555 */ 556 void virtioCoreVirtqEnable(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, bool fEnable); 553 * @param pcszName Name to give queue 554 * 555 * @returns VBox status code. 556 */ 557 int virtioCoreR3VirtqDetach(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr); 558 559 /** 560 * Checks to see whether queue is attached to core. 561 * 562 * @param pVirtio Pointer to the shared virtio state. 563 * @param uVirtqNbr Virtq number 564 * 565 * Returns boolean true or false indicating whether dev-specific reflection 566 * of queue is attached to core. 567 */ 568 bool virtioCoreR3VirtqIsAttached(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr); 569 570 /** 571 * Checks to see whether queue is enabled. 572 * 573 * @param pVirtio Pointer to the shared virtio state. 574 * @param uVirtqNbr Virtq number 575 * 576 * Returns boolean true or false indicating core queue enable state. 577 * There is no API function to enable the queue, because the actual enabling is handled 578 * by the guest via MMIO. 579 * 580 * NOTE: Guest VirtIO driver's claim over this state is overridden (which violates VirtIO 1.0 spec 581 * in a carefully controlled manner) in the case where the queue MUST be disabled, due to observed 582 * control queue corruption (e.g. null GCPhys virtq base addr) while restoring legacy-only device's 583 * (DevVirtioNet.cpp) as a way to flag that the queue is unusable-as-saved and must to be removed. 584 * That is all handled in the load/save exec logic. Device reset could potentially, depending on 585 * parameters passed from host VirtIO device to guest VirtIO driver, result in guest re-establishing 586 * queue, except, in that situation, the queue operational state would be valid. 587 */ 588 bool virtioCoreR3VirtqIsEnabled(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr); 557 589 558 590 /** 559 591 * Enable or disable notification for the specified queue. 560 592 * 561 * With notification enabled, the guest driver notifies the host device (via MMIO 562 * to the queue notification offset describe in VirtIO 1.0, 4.1.4.4 "Notification Structure Layout") 563 * whenever the guest driver adds a new entry to the avail ring of the respective queue. 564 * 565 * Note: In the VirtIO world, the device sets flags in the used ring to communicate to the driver how to 566 * handle notifications for the avail ring and the drivers sets flags in the avail ring to communicate 567 * to the device how to handle sending interrupts for the used ring. 593 * When queue notifications are enabled, the guest VirtIO driver notifies host VirtIO device 594 * (via MMIO, see VirtIO 1.0, 4.1.4.4 "Notification Structure Layout") whenever guest driver adds 595 * a new s/g buffer to the "avail" ring of the queue. 596 * 597 * Note: VirtIO queue layout includes flags the device controls in "used" ring to inform guest 598 * driver if it should notify host of guest's buffer additions to the "avail" ring, and 599 * conversely, the guest driver sets flags in the "avail" ring to communicate to host device 600 * whether or not to interrupt guest when it adds buffers to used ring. 568 601 * 569 602 * @param pVirtio Pointer to the shared virtio state. … … 581 614 582 615 /** 583 * Displays the VirtIO spec-related features offered and their accepted/declined status 584 * by both the VirtIO core and dev-specific device code (which invokes this function). 585 * The result is a comprehensive list of available features the VirtIO specification 586 * defines, which ones were actually offered by the device, and which ones were accepted 587 * by the guest driver, thus providing a legible summary view of the configuration 588 * the device is operating with. 589 * 616 * Displays a well-formated human-readable translation of otherwise inscrutable bitmasks 617 * that embody features VirtIO specification definitions, indicating: Totality of features 618 * that can be implemented by host and guest, which features were offered by the host, and 619 * which were actually accepted by the guest. It displays it as a summary view of the device's 620 * finalized operational state (host-guest negotiated architecture) in such a way that shows 621 * which options are available for implementing or enabling. 622 * 623 * The non-device-specific VirtIO features list are managed by core API (e.g. implied). 624 * Only dev-specific features must be passed as parameter. 625 590 626 * @param pVirtio Pointer to the shared virtio state. 591 627 * @param pHlp Pointer to the debug info hlp struct 592 * @param s_aDevSpecificFeatures 593 * Features specification lists for device-specific implementation 594 * (i.e: net controller, scsi controller ...) 628 * @param s_aDevSpecificFeatures Dev-specific features (virtio-net, virtio-scsi...) 595 629 * @param cFeatures Number of features in aDevSpecificFeatures 596 630 */ … … 599 633 600 634 /* 601 * Debug ing assist feature displays the state of the VirtIO core code, which includes635 * Debug-assist utility function to display state of the VirtIO core code, including 602 636 * an overview of the state of all of the queues. 603 637 * … … 608 642 * 609 643 * This is implemented currently to be invoked by the inheriting device-specific code 610 * (see DevVirtioNet for an example, which receives the debugvm callback directly). 611 * DevVirtioNet lists the available sub-options if no arguments are provided. In that 644 * (see the the VirtualBox virtio-net (VirtIO network controller device implementation) 645 * for an example of code that receive debugvm callback directly). 646 * 647 * DevVirtioNet lists available sub-options if no arguments are provided. In that 612 648 * example this virtq info related function is invoked hierarchically when virtio-net 613 649 * displays its device-specific queue info. … … 629 665 630 666 /** 631 * This function is identical to virtioCoreR3VirtqAvailBufGet(), except it doesn't 'consume' 632 * the buffer from the avail ring of the virtq. The peek operation becomes identical to a get 633 * operation if virtioCoreR3VirtqAvailRingNext() is called to consume the buffer from the avail ring, 634 * at which point virtioCoreR3VirtqUsedBufPut() must be called to complete the roundtrip 635 * transaction by putting the descriptor on the used ring. 636 * 667 * This function is identical to virtioCoreR3VirtqAvailBufGet(), *except* it doesn't consume 668 * peeked buffer from avail ring of the virtq. The function *becomes* identical to the 669 * virtioCoreR3VirtqAvailBufGet() only if virtioCoreR3VirtqAvailRingNext() is invoked to 670 * consume buf from the queue's avail ring, followed by invocation of virtioCoreR3VirtqUsedBufPut(), 671 * to hand host-processed buffer back to guest, which completes guest-initiated virtq buffer circuit. 637 672 * 638 673 * @param pDevIns The device instance. … … 652 687 /** 653 688 * This function fetches the next buffer (descriptor chain) from the VirtIO "avail" ring of 654 * indicated queue, and convertsthe buf's s/g vectors into OUT (e.g. guest-to-host)689 * indicated queue, separating the buf's s/g vectors into OUT (e.g. guest-to-host) 655 690 * components and and IN (host-to-guest) components. 656 691 * 657 * The caller is responsible for GCPhys to host virtual memory conversions. If the692 * Caller is responsible for GCPhys to host virtual memory conversions. If the 658 693 * virtq buffer being peeked at is "consumed", virtioCoreR3VirtqAvailRingNext() must 659 * be called and in that casevirtioCoreR3VirtqUsedBufPut() must be called to660 * complete the roundtrip virtq transaction.694 * be called, and after that virtioCoreR3VirtqUsedBufPut() must be called to 695 * complete the buffer transfer cycle with the guest. 661 696 * 662 697 * @param pDevIns The device instance. … … 678 713 679 714 /** 680 * Fetches a specific descriptor chain using avail ring of indicated queue and converts the descriptor681 * chain into its OUT (to device) and IN (to guest) components.715 * Fetches a specific descriptor chain using avail ring of indicated queue and converts the 716 * descriptor chain into its OUT (to device) and IN (to guest) components. 682 717 * 683 718 * The caller is responsible for GCPhys to host virtual memory conversions and *must* … … 704 739 /** 705 740 * Returns data to the guest to complete a transaction initiated by virtioCoreR3VirtqAvailBufGet(), 706 * or virtioCoreR3VirtqAvailBufPeek()/virtioCoreR3VirtqBufSync() call pairsto complete each707 * intervening a roundtrip transaction, ultimately putting each descriptor chain pulled from the708 * avail ring of a queue onto the used ring of the queue. wherein I/O transactions are always709 * initiated by the guest and completed by the host. In other words, for the host to send any710 * data to the guest, the guest must provide buffers, for the host to fill, via the avail ring711 * of the virtq.741 * (or virtioCoreR3VirtqAvailBufPeek()/virtioCoreR3VirtqBufSync() call pair), to complete each 742 * buffer transfer transaction (guest-host buffer cycle), ultimately moving each descriptor chain 743 * from the avail ring of a queue onto the used ring of the queue. Note that VirtIO buffer 744 * transactions are *always* initiated by the guest and completed by the host. In other words, 745 * for the host to send any I/O related data to the guest (and in some cases configuration data), 746 * the guest must provide buffers via the virtq's avail ring, for the host to fill. 712 747 * 713 748 * At some some point virtioCoreR3VirtqUsedRingSync() must be called to return data to the guest, 714 * completing all pending virtioCoreR3VirtqAvailBufPut() transactions that have accumulated since 715 * the last call to virtioCoreR3VirtqUsedRingSync() 716 717 * @note This does a write-ahead to the used ring of the guest's queue. The data 718 * written won't be seen by the guest until the next call to virtioCoreVirtqUsedRingSync() 719 * 749 * completing all pending virtioCoreR3VirtqAvailBufPut() operations that have accumulated since 750 * the last call to virtioCoreR3VirtqUsedRingSync(). 751 752 * @note This function effectively performs write-ahead to the used ring of the virtq. 753 * Data written won't be seen by the guest until the next call to virtioCoreVirtqUsedRingSync() 720 754 * 721 755 * @param pDevIns The device instance (for reading). … … 729 763 * buffer originally pulled from the queue. 730 764 * 731 * @param fFence If true , put up copyfence (memory barrier) after765 * @param fFence If true (default), put up copy-fence (memory barrier) after 732 766 * copying to guest phys. mem. 733 767 * … … 741 775 */ 742 776 int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, PRTSGBUF pSgVirtReturn, 743 PVIRTQBUF pVirtqBuf, bool fFence); 777 PVIRTQBUF pVirtqBuf, bool fFence = true); 778 779 780 /** 781 * Quicker variant of same-named function (directly above) that it overloads, 782 * Instead, this variant accepts as input a pointer to a buffer and count, 783 * instead of S/G buffer thus doesn't have to copy between two S/G buffers and avoids some overhead. 784 * 785 * @param pDevIns The device instance (for reading). 786 * @param pVirtio Pointer to the shared virtio state. 787 * @param uVirtqNbr Virtq number 788 * @param cb Number of bytes to add to copy to phys. buf. 789 * @param pv Virtual mem buf to copy to phys buf. 790 * @param cbEnqueue How many bytes in packet to enqueue (0 = don't enqueue) 791 * @param fFence If true (default), put up copy-fence (memory barrier) after 792 * copying to guest phys. mem. 793 * 794 * @returns VBox status code. 795 * @retval VINF_SUCCESS Success 796 * @retval VERR_INVALID_STATE VirtIO not in ready state 797 * @retval VERR_NOT_AVAILABLE Virtq is empty 798 * 799 * @note This function will not release any reference to pVirtqBuf. The 800 * caller must take care of that. 801 */ 802 int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, size_t cb, const void *pv, 803 PVIRTQBUF pVirtqBuf, uint32_t cbEnqueue, bool fFence = true); 804 805 744 806 /** 745 807 * Advance index of avail ring to next entry in specified virtq (see virtioCoreR3VirtqAvailBufPeek()) … … 751 813 752 814 /** 753 * Checks to see if guest has ac knowledged device's VIRTIO_F_VERSION_1 feature.754 * If not, it's presumed to be a VirtIO legacy guest driver. Note that legacy drivers755 * may start using the device prematurely, as opposed to the rigorously sane protocol756 * prescribed by the "modern" VirtIO spec. Early access implies a legacy driver.757 * Therefore legacy mode is the assumption until feature negotiation.815 * Checks to see if guest has accepted host device's VIRTIO_F_VERSION_1 (i.e. "modern") 816 * behavioral modeling, indicating guest agreed to comply with the modern VirtIO 1.0+ specification. 817 * Otherwise unavoidable presumption is that the host device is dealing with legacy VirtIO 818 * guest drive, thus must be prepared to cope with less mature architecture and behaviors 819 * from prototype era of VirtIO. (see comments in PDM-invoked device constructor for more information). 758 820 * 759 821 * @param pVirtio Pointer to the virtio state. … … 761 823 int virtioCoreIsLegacyMode(PVIRTIOCORE pVirtio); 762 824 825 /** 826 * This VirtIO transitional device supports "modern" (rev 1.0+) as well as "legacy" (e.g. < 1.0) VirtIO drivers. 827 * Some legacy guest drivers are known to mishandle PCI bus mastering wherein the PCI flavor of GC phys 828 * access functions can't be used. The following wrappers select the memory access method based on whether the 829 * device is operating in legacy mode or not. 830 */ 831 DECLINLINE(int) virtioCoreGCPhysWrite(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbWrite) 832 { 833 int rc; 834 if (virtioCoreIsLegacyMode(pVirtio)) 835 rc = PDMDevHlpPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite); 836 else 837 rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite); 838 return rc; 839 } 840 841 DECLINLINE(int) virtioCoreGCPhysRead(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead) 842 { 843 int rc; 844 if (virtioCoreIsLegacyMode(pVirtio)) 845 rc = PDMDevHlpPhysRead(pDevIns, GCPhys, pvBuf, cbRead); 846 else 847 rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhys, pvBuf, cbRead); 848 return rc; 849 } 850 851 /* 852 * (See comments for corresponding function in sg.h) 853 */ 763 854 DECLINLINE(void) virtioCoreGCPhysChainInit(PVIRTIOSGBUF pGcSgBuf, PVIRTIOSGSEG paSegs, size_t cSegs) 764 855 { … … 782 873 } 783 874 875 /* 876 * (See comments for corresponding function in sg.h) 877 */ 784 878 DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainGet(PVIRTIOSGBUF pGcSgBuf, size_t *pcbData) 785 879 { … … 826 920 } 827 921 922 /* 923 * (See comments for corresponding function in sg.h) 924 */ 828 925 DECLINLINE(void) virtioCoreGCPhysChainReset(PVIRTIOSGBUF pGcSgBuf) 829 926 { … … 843 940 } 844 941 942 /* 943 * (See comments for corresponding function in sg.h) 944 */ 845 945 DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainAdvance(PVIRTIOSGBUF pGcSgBuf, size_t cbAdvance) 846 946 { … … 860 960 } 861 961 962 /* 963 * (See comments for corresponding function in sg.h) 964 */ 862 965 DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainGetNextSeg(PVIRTIOSGBUF pGcSgBuf, size_t *pcbSeg) 863 966 { … … 871 974 } 872 975 873 DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PVIRTIOSGBUF pGcSgBuf) 976 /** 977 * Calculate the length of a GCPhys s/g buffer by tallying the size of each segment. 978 * 979 * @param pGcSgBuf Guest Context (GCPhys) S/G buffer to calculate length of 980 */ 981 DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PCVIRTIOSGBUF pGcSgBuf) 874 982 { 875 983 size_t cb = 0; 876 984 unsigned i = pGcSgBuf->cSegs; 877 while (i-- > 0) 878 cb += pGcSgBuf->paSegs[i].cbSeg; 879 return cb; 880 } 881 985 while (i-- > 0) 986 cb += pGcSgBuf->paSegs[i].cbSeg; 987 return cb; 988 } 989 990 /* 991 * (See comments for corresponding function in sg.h) 992 */ 993 DECLINLINE(size_t) virtioCoreGCPhysChainCalcLengthLeft(PVIRTIOSGBUF pGcSgBuf) 994 { 995 size_t cb = pGcSgBuf->cbSegLeft; 996 unsigned i = pGcSgBuf->cSegs; 997 while (i-- > pGcSgBuf->idxSeg + 1) 998 cb += pGcSgBuf->paSegs[i].cbSeg; 999 return cb; 1000 } 882 1001 #define VIRTQNAME(a_pVirtio, a_uVirtq) ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName) 883 1002 884 1003 /** 885 * Add some bytes to a virtq (s/g) buffer, converting them from virtual memory to GCPhys 886 * 887 * To be performant it is left to the caller to validate the size of the buffer with regard 888 * to data being pulled from it to avoid overruns/underruns. 1004 * Convert and append bytes from a virtual-memory simple buffer to VirtIO guest's 1005 * physical memory described by a buffer pulled form the avail ring of a virtq. 889 1006 * 890 1007 * @param pVirtio Pointer to the shared virtio state. 891 * @param pVirtqBuf output: virtq buffer1008 * @param pVirtqBuf VirtIO buffer to fill 892 1009 * @param pv input: virtual memory buffer to receive bytes 893 1010 * @param cb number of bytes to add to the s/g buffer. … … 895 1012 DECLINLINE(void) virtioCoreR3VirqBufFill(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf, void *pv, size_t cb) 896 1013 { 897 uint8_t *pb = (uint8_t *)pv; 898 size_t cbLim = RT_MIN(pVirtqBuf->cbPhysReturn, cb); 899 while (cbLim) 1014 uint8_t *pvBuf = (uint8_t *)pv; 1015 size_t cbRemain = cb, cbTotal = 0; 1016 PVIRTIOSGBUF pSgPhysReturn = pVirtqBuf->pSgPhysReturn; 1017 while (cbRemain) 900 1018 { 901 size_t cbSeg = cbLim; 902 RTGCPHYS GCPhys = virtioCoreGCPhysChainGetNextSeg(pVirtqBuf->pSgPhysReturn, &cbSeg); 903 PDMDevHlpPCIPhysWrite(pVirtio->pDevInsR3, GCPhys, pb, cbSeg); 904 pb += cbSeg; 905 cbLim -= cbSeg; 906 pVirtqBuf->cbPhysSend -= cbSeg; 1019 uint32_t cbBounded = RT_MIN(pSgPhysReturn->cbSegLeft, cbRemain); 1020 Assert(cbBounded > 0); 1021 virtioCoreGCPhysWrite(pVirtio, CTX_SUFF(pVirtio->pDevIns), (RTGCPHYS)pSgPhysReturn->GCPhysCur, pvBuf, cbBounded); 1022 virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbBounded); 1023 pvBuf += cbBounded; 1024 cbRemain -= cbBounded; 1025 cbTotal += cbBounded; 907 1026 } 908 LogFunc(("Added %d/%d bytes to %s buffer, head idx: %u (%d bytes remain)\n", 909 cb - cbLim, cb, VIRTQNAME(pVirtio, pVirtqBuf->uVirtq), 910 pVirtqBuf->uHeadIdx, pVirtqBuf->cbPhysReturn)); 911 } 912 913 /** 914 * Extract some bytes out of a virtq (s/g) buffer, converting them from GCPhys to virtual memory 915 * 916 * To be performant it is left to the caller to validate the size of the buffer with regard 917 * to data being pulled from it to avoid overruns/underruns. 1027 LogFunc(("Appended %d bytes to guest phys buf [head: %u]. %d bytes unused in buf.)\n", 1028 cbTotal, pVirtqBuf->uHeadIdx, virtioCoreGCPhysChainCalcLengthLeft(pSgPhysReturn))); 1029 } 1030 1031 /** 1032 * Extract some bytes from of a virtq s/g buffer, converting them from GCPhys space to 1033 * to ordinary virtual memory (i.e. making data directly accessible to host device code) 1034 * 1035 * As a performance optimization, it is left to the caller to validate buffer size. 918 1036 * 919 1037 * @param pVirtio Pointer to the shared virtio state. … … 937 1055 LogFunc(("Drained %d/%d bytes from %s buffer, head idx: %u (%d bytes left)\n", 938 1056 cb - cbLim, cb, VIRTQNAME(pVirtio, pVirtqBuf->uVirtq), 939 pVirtqBuf->uHeadIdx, pVirtqBuf->cbPhysSend));1057 pVirtqBuf->uHeadIdx, virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn))); 940 1058 } 941 1059 … … 1016 1134 * VirtIO implementation to identify this device's operational configuration after features 1017 1135 * have been negotiated with guest VirtIO driver. Feature negotiation entails host indicating 1018 * to guest which features it supports, then guest accepting among those offeredwhich features1136 * to guest which features it supports, then guest accepting from among the offered, which features 1019 1137 * it will enable. That becomes the agreement between the host and guest. The bitmask containing 1020 1138 * virtio core features plus device-specific features is provided as a parameter to virtioCoreR3Init() … … 1031 1149 1032 1150 /** 1033 * Get the thename of the VM state change associated with the enumeration variable1151 * Get name of the VM state change associated with the enumeration variable 1034 1152 * 1035 1153 * @param enmState VM state (enumeration value) … … 1078 1196 /** 1079 1197 * Debug assist for any consumer device code 1080 &1081 1198 * Do a hex dump of memory in guest physical context 1082 1199 * … … 1093 1210 */ 1094 1211 1095 /**1096 * Calculate the length of a GCPhys s/g buffer by tallying the size of each segment.1097 *1098 * @param pGcSgBuf Guest Context (GCPhys) S/G buffer to calculate length of1099 */1100 DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PCVIRTIOSGBUF pGcSgBuf)1101 {1102 size_t cb = 0;1103 unsigned i = pGcSgBuf->cSegs;1104 while (i-- > 0)1105 cb += pGcSgBuf->paSegs[i].cbSeg;1106 return cb;1107 }1108 1109 /**1110 * This VirtIO transitional device supports "modern" (rev 1.0+) as well as "legacy" (e.g. < 1.0) VirtIO drivers.1111 * Some legacy guest drivers are known to mishandle PCI bus mastering wherein the PCI flavor of GC phys1112 * access functions can't be used. The following wrappers select the mem access method based on whether the1113 * device is operating in legacy mode or not.1114 */1115 DECLINLINE(int) virtioCoreGCPhysWrite(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbWrite)1116 {1117 int rc;1118 if (virtioCoreIsLegacyMode(pVirtio))1119 rc = PDMDevHlpPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);1120 else1121 rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);1122 return rc;1123 }1124 1125 DECLINLINE(int) virtioCoreGCPhysRead(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)1126 {1127 int rc;1128 if (virtioCoreIsLegacyMode(pVirtio))1129 rc = PDMDevHlpPhysRead(pDevIns, GCPhys, pvBuf, cbRead);1130 else1131 rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhys, pvBuf, cbRead);1132 return rc;1133 }1134 1135 1212 /** Misc VM and PDM boilerplate */ 1136 int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM); 1137 int virtioCoreR3LoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM); 1213 int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t cQueues); 1214 int virtioCoreR3ModernDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uTestVersion, uint32_t cQueues); 1215 int virtioCoreR3LegacyDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uVirtioLegacy_3_1_Beta); 1138 1216 void virtioCoreR3VmStateChanged(PVIRTIOCORE pVirtio, VIRTIOVMSTATECHANGED enmState); 1139 1217 void virtioCoreR3Term(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC); … … 1147 1225 * cb, pv and fWrite are implicit parameters and must be defined by the invoker. 1148 1226 */ 1149 1150 1227 #ifdef LOG_ENABLED 1151 1228 … … 1201 1278 * the memory described by cb and pv. 1202 1279 * 1203 * cb, pv and fWrite are implicit parameters and must be defined by theinvoker.1280 * cb, pv and fWrite are implicit parameters and must be defined by invoker. 1204 1281 */ 1205 1282 #define VIRTIO_DEV_CONFIG_ACCESS(member, tCfgStruct, uOffsetOfAccess, pCfgStruct) \ … … 1216 1293 /** 1217 1294 * Copies bytes into memory described by cb, pv from the specified member field of the config struct. 1218 * The operation is a nop and logs error if implied parameter fWrite istrue.1295 * The operation is a NOP, logging an error if an implied parameter, fWrite, is boolean true. 1219 1296 * 1220 1297 * cb, pv and fWrite are implicit parameters and must be defined by the invoker. … … 1237 1314 * the memory described by cb and pv. 1238 1315 * 1239 * cb, pv and fWrite are implicit parameters and must be defined by theinvoker.1316 * cb, pv and fWrite are implicit parameters and must be defined by invoker. 1240 1317 */ 1241 1318 #define VIRTIO_DEV_CONFIG_ACCESS_INDEXED(member, uIdx, tCfgStruct, uOffsetOfAccess, pCfgStruct) \ … … 1254 1331 * The operation is a nop and logs error if implied parameter fWrite is true. 1255 1332 * 1256 * cb, pv and fWrite are implicit parameters and must be defined by theinvoker.1333 * cb, pv and fWrite are implicit parameters and must be defined by invoker. 1257 1334 */ 1258 1335 #define VIRTIO_DEV_CONFIG_ACCESS_INDEXED_READONLY(member, uidx, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
Note:
See TracChangeset
for help on using the changeset viewer.