VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/Virtio_1_0_impl.h@ 80931

Last change on this file since 80931 was 80931, checked in by vboxsync, 5 years ago

Storage/DevVirtioSCSI.cpp: Fixed errors that prevented it from building on Windows and tested. See bugref:9440, Comment #93

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 29.6 KB
Line 
1/* $Id: Virtio_1_0_impl.h 80931 2019-09-22 09:56:37Z vboxsync $ $Revision: 80931 $ $Date: 2019-09-22 09:56:37 +0000 (Sun, 22 Sep 2019) $ $Author: vboxsync $ */
2/** @file
3 * Virtio_1_0_impl.h - Virtio Declarations
4 */
5
6/*
7 * Copyright (C) 2009-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h
19#define VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#include "Virtio_1_0.h"
25
26/** @name Saved state versions.
27 * The saved state version is changed if either common or any of specific
28 * parts are changed. That is, it is perfectly possible that the version
29 * of saved vnet state will increase as a result of change in vblk structure
30 * for example.
31 */
32#define VIRTIO_SAVEDSTATE_VERSION 1
33/** @} */
34
35#define VIRTIO_F_VERSION_1 RT_BIT_64(32) /**< Required feature bit for 1.0 devices */
36
37#define VIRTIO_F_INDIRECT_DESC RT_BIT_64(28) /**< Allow descs to point to list of descs */
38#define VIRTIO_F_EVENT_IDX RT_BIT_64(29) /**< Allow notification disable for n elems */
39#define VIRTIO_F_RING_INDIRECT_DESC RT_BIT_64(28) /**< Doc bug: Goes under two names in spec */
40#define VIRTIO_F_RING_EVENT_IDX RT_BIT_64(29) /**< Doc bug: Goes under two names in spec */
41
42#define VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED ( 0 ) /**< TBD: Add VIRTIO_F_INDIRECT_DESC */
43
44#define VIRTIO_ISR_VIRTQ_INTERRUPT RT_BIT_32(0) /**< Virtq interrupt bit of ISR register */
45#define VIRTIO_ISR_DEVICE_CONFIG RT_BIT_32(1) /**< Device configuration changed bit of ISR */
46#define DEVICE_PCI_VENDOR_ID_VIRTIO 0x1AF4 /**< Guest driver locates dev via (mandatory) */
47#define DEVICE_PCI_REVISION_ID_VIRTIO 1 /**< VirtIO 1.0 non-transitional drivers >= 1 */
48
49/** Reserved (*negotiated*) Feature Bits (e.g. device independent features, VirtIO 1.0 spec,section 6) */
50
51#define VIRTIO_MSI_NO_VECTOR 0xffff /**< Vector value to disable MSI for queue */
52
53/** Device Status field constants (from Virtio 1.0 spec) */
54#define VIRTIO_STATUS_ACKNOWLEDGE 0x01 /**< Guest driver: Located this VirtIO device */
55#define VIRTIO_STATUS_DRIVER 0x02 /**< Guest driver: Can drive this VirtIO dev. */
56#define VIRTIO_STATUS_DRIVER_OK 0x04 /**< Guest driver: Driver set-up and ready */
57#define VIRTIO_STATUS_FEATURES_OK 0x08 /**< Guest driver: Feature negotiation done */
58#define VIRTIO_STATUS_FAILED 0x80 /**< Guest driver: Fatal error, gave up */
59#define VIRTIO_STATUS_DEVICE_NEEDS_RESET 0x40 /**< Device experienced unrecoverable error */
60
61/** @def Virtio Device PCI Capabilities type codes */
62#define VIRTIO_PCI_CAP_COMMON_CFG 1 /**< Common configuration PCI capability ID */
63#define VIRTIO_PCI_CAP_NOTIFY_CFG 2 /**< Notification area PCI capability ID */
64#define VIRTIO_PCI_CAP_ISR_CFG 3 /**< ISR PCI capability id */
65#define VIRTIO_PCI_CAP_DEVICE_CFG 4 /**< Device-specific PCI cfg capability ID */
66#define VIRTIO_PCI_CAP_PCI_CFG 5 /**< PCI CFG capability ID */
67
68#define VIRTIO_PCI_CAP_ID_VENDOR 0x09 /**< Vendor-specific PCI CFG Device Cap. ID */
69
70/**
71 * The following is the PCI capability struct common to all VirtIO capability types
72 */
73typedef struct virtio_pci_cap
74{
75 /* All little-endian */
76 uint8_t uCapVndr; /**< Generic PCI field: PCI_CAP_ID_VNDR */
77 uint8_t uCapNext; /**< Generic PCI field: next ptr. */
78 uint8_t uCapLen; /**< Generic PCI field: capability length */
79 uint8_t uCfgType; /**< Identifies the structure. */
80 uint8_t uBar; /**< Where to find it. */
81 uint8_t uPadding[3]; /**< Pad to full dword. */
82 uint32_t uOffset; /**< Offset within bar. (L.E.) */
83 uint32_t uLength; /**< Length of struct, in bytes. (L.E.) */
84} VIRTIO_PCI_CAP_T, *PVIRTIO_PCI_CAP_T;
85
86/**
87 * Local implementation's usage context of a queue (e.g. not part of VirtIO specification)
88 */
89typedef struct VIRTQSTATE
90{
91 const char szVirtqName[32]; /**< Dev-specific name of queue */
92 uint16_t uAvailIdx; /**< Consumer's position in avail ring */
93 uint16_t uUsedIdx; /**< Consumer's position in used ring */
94 bool fEventThresholdReached; /**< Don't lose track while queueing ahead */
95} VIRTQSTATE, *PVIRTQSTATE;
96
97/**
98 * VirtIO 1.0 Capabilities' related MMIO-mapped structs:
99 *
100 * Note: virtio_pci_device_cap is dev-specific, implemented by client. Definition unknown here.
101 */
102typedef struct virtio_pci_common_cfg
103{
104 /* Per device fields */
105 uint32_t uDeviceFeaturesSelect; /**< RW (driver selects device features) */
106 uint32_t uDeviceFeatures; /**< RO (device reports features to driver) */
107 uint32_t uDriverFeaturesSelect; /**< RW (driver selects driver features) */
108 uint32_t uDriverFeatures; /**< RW (driver-accepted device features) */
109 uint16_t uMsixConfig; /**< RW (driver sets MSI-X config vector) */
110 uint16_t uNumQueues; /**< RO (device specifies max queues) */
111 uint8_t uDeviceStatus; /**< RW (driver writes device status, 0=reset) */
112 uint8_t uConfigGeneration; /**< RO (device changes when changing configs) */
113
114 /* Per virtqueue fields (as determined by uQueueSelect) */
115 uint16_t uQueueSelect; /**< RW (selects queue focus for these fields) */
116 uint16_t uQueueSize; /**< RW (queue size, 0 - 2^n) */
117 uint16_t uQueueMsixVector; /**< RW (driver selects MSI-X queue vector) */
118 uint16_t uQueueEnable; /**< RW (driver controls usability of queue) */
119 uint16_t uQueueNotifyOff; /**< RO (offset uto virtqueue; see spec) */
120 uint64_t pGcPhysQueueDesc; /**< RW (driver writes desc table phys addr) */
121 uint64_t pGcPhysQueueAvail; /**< RW (driver writes avail ring phys addr) */
122 uint64_t pGcPhysQueueUsed; /**< RW (driver writes used ring phys addr) */
123} VIRTIO_PCI_COMMON_CFG_T, *PVIRTIO_PCI_COMMON_CFG_T;
124
125typedef struct virtio_pci_notify_cap
126{
127 struct virtio_pci_cap pciCap; /**< Notification MMIO mapping capability */
128 uint32_t uNotifyOffMultiplier; /**< notify_off_multiplier */
129} VIRTIO_PCI_NOTIFY_CAP_T, *PVIRTIO_PCI_NOTIFY_CAP_T;
130
131typedef struct virtio_pci_cfg_cap
132{
133 struct virtio_pci_cap pciCap; /**< Cap. defines the BAR/off/len to access */
134 uint8_t uPciCfgData[4]; /**< I/O buf for above cap. */
135} VIRTIO_PCI_CFG_CAP_T, *PVIRTIO_PCI_CFG_CAP_T;
136
137/**
138 * The core (/common) state of the VirtIO PCI device
139 *
140 * @implements PDMILEDPORTS
141 */
142typedef struct VIRTIOSTATE
143{
144 PDMPCIDEV dev; /**< PCI device */
145 char szInstance[16]; /**< Instance name, e.g. "VIRTIOSCSI0" */
146 void * pClientContext; /**< Client callback returned on callbacks */
147
148 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3 */
149 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0 */
150 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC */
151
152 RTGCPHYS pGcPhysPciCapBase; /**< Pointer to MMIO mapped capability data */
153 RTGCPHYS pGcPhysCommonCfg; /**< Pointer to MMIO mapped capability data */
154 RTGCPHYS pGcPhysNotifyCap; /**< Pointer to MMIO mapped capability data */
155 RTGCPHYS pGcPhysIsrCap; /**< Pointer to MMIO mapped capability data */
156 RTGCPHYS pGcPhysDeviceCap; /**< Pointer to MMIO mapped capability data */
157
158 RTGCPHYS pGcPhysQueueDesc[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q desc structs GUEST */
159 RTGCPHYS pGcPhysQueueAvail[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q avail structs GUEST */
160 RTGCPHYS pGcPhysQueueUsed[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q used structs GUEST */
161 uint16_t uQueueNotifyOff[VIRTQ_MAX_CNT]; /**< (MMIO) per-Q notify offset HOST */
162 uint16_t uQueueMsixVector[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue vector for MSI-X GUEST */
163 uint16_t uQueueEnable[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue enable GUEST */
164 uint16_t uQueueSize[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue size HOST/GUEST */
165 uint16_t uQueueSelect; /**< (MMIO) queue selector GUEST */
166 uint16_t padding;
167 uint64_t uDeviceFeatures; /**< (MMIO) Host features offered HOST */
168 uint64_t uDriverFeatures; /**< (MMIO) Host features accepted GUEST */
169 uint32_t uDeviceFeaturesSelect; /**< (MMIO) hi/lo select uDeviceFeatures GUEST */
170 uint32_t uDriverFeaturesSelect; /**< (MMIO) hi/lo select uDriverFeatures GUEST */
171 uint32_t uMsixConfig; /**< (MMIO) MSI-X vector GUEST */
172 uint32_t uNumQueues; /**< (MMIO) Actual number of queues GUEST */
173 uint8_t uDeviceStatus; /**< (MMIO) Device Status GUEST */
174 uint8_t uPrevDeviceStatus; /**< (MMIO) Prev Device Status GUEST */
175 uint8_t uConfigGeneration; /**< (MMIO) Device config sequencer HOST */
176
177 VIRTQSTATE virtqState[VIRTQ_MAX_CNT]; /**< Local impl-specific queue context */
178 VIRTIOCALLBACKS virtioCallbacks; /**< Callback vectors to client */
179
180 PFNPCICONFIGREAD pfnPciConfigReadOld; /**< Prev rd. cb. intercepting PCI Cfg I/O */
181 PFNPCICONFIGWRITE pfnPciConfigWriteOld; /**< Prev wr. cb. intercepting PCI Cfg I/O */
182
183 PVIRTIO_PCI_CFG_CAP_T pPciCfgCap; /**< Pointer to struct in configuration area */
184 PVIRTIO_PCI_NOTIFY_CAP_T pNotifyCap; /**< Pointer to struct in configuration area */
185 PVIRTIO_PCI_CAP_T pCommonCfgCap; /**< Pointer to struct in configuration area */
186 PVIRTIO_PCI_CAP_T pIsrCap; /**< Pointer to struct in configuration area */
187 PVIRTIO_PCI_CAP_T pDeviceCap; /**< Pointer to struct in configuration area */
188
189 uint32_t cbDevSpecificCfg; /**< Size of client's dev-specific config data */
190 void *pDevSpecificCfg; /**< Pointer to client's struct */
191 void *pPrevDevSpecificCfg; /**< Previous read dev-specific cfg of client */
192 bool fGenUpdatePending; /**< If set, update cfg gen after driver reads */
193 uint8_t uPciCfgDataOff;
194 uint8_t uISR; /**< Interrupt Status Register. */
195
196} VIRTIOSTATE, *PVIRTIOSTATE;
197
198/** virtq related flags */
199#define VIRTQ_DESC_F_NEXT 1 /**< Indicates this descriptor chains to next */
200#define VIRTQ_DESC_F_WRITE 2 /**< Marks buffer as write-only (default ro) */
201#define VIRTQ_DESC_F_INDIRECT 4 /**< Buffer is list of buffer descriptors */
202
203#define VIRTQ_USED_F_NO_NOTIFY 1 /**< Dev to Drv: Don't notify when buf added */
204#define VIRTQ_AVAIL_F_NO_INTERRUPT 1 /**< Drv to Dev: Don't notify when buf eaten */
205
206/**
207 * virtq related structs
208 * (struct names follow VirtIO 1.0 spec, typedef use VBox style)
209 */
210typedef struct virtq_desc
211{
212 uint64_t pGcPhysBuf; /**< addr GC Phys. address of buffer */
213 uint32_t cb; /**< len Buffer length */
214 uint16_t fFlags; /**< flags Buffer specific flags */
215 uint16_t uDescIdxNext; /**< next Idx set if VIRTIO_DESC_F_NEXT */
216} VIRTQ_DESC_T, *PVIRTQ_DESC_T;
217
218typedef struct virtq_avail
219{
220 uint16_t fFlags; /**< flags avail ring drv to dev flags */
221 uint16_t uIdx; /**< idx Index of next free ring slot */
222 uint16_t auRing[1]; /**< ring Ring: avail drv to dev bufs */
223 uint16_t uUsedEventIdx; /**< used_event (if VIRTQ_USED_F_EVENT_IDX) */
224} VIRTQ_AVAIL_T, *PVIRTQ_AVAIL_T;
225
226typedef struct virtq_used_elem
227{
228 uint32_t uDescIdx; /**< idx Start of used desc chain */
229 uint32_t cbElem; /**< len Total len of used desc chain */
230} VIRTQ_USED_ELEM_T;
231
232typedef struct virt_used
233{
234 uint16_t fFlags; /**< flags used ring host-to-guest flags */
235 uint16_t uIdx; /**< idx Index of next ring slot */
236 VIRTQ_USED_ELEM_T auRing[1]; /**< ring Ring: used dev to drv bufs */
237 uint16_t uAvailEventIdx; /**< avail_event if (VIRTQ_USED_F_EVENT_IDX) */
238} VIRTQ_USED_T, *PVIRTQ_USED_T;
239
240/**
241* This macro returns true if the implied parameter GCPhysAddr address and access length are
242* within the range of the mapped capability struct specified with the explicit parameters.
243*
244* Actual Parameters:
245* @oaram pPhysCapStruct - [input] Pointer to MMIO mapped capability struct
246* @param pCfgCap - [input] Pointer to capability in PCI configuration area
247* @param fMatched - [output] True if GCPhysAddr is within the physically mapped capability.
248*
249* Implied parameters:
250* @param GCPhysAddr - [input, implied] Physical address accessed (via MMIO callback)
251* @param cb - [input, implied] Number of bytes to access
252*/
253#define MATCH_VIRTIO_CAP_STRUCT(pGcPhysCapData, pCfgCap, fMatched) \
254 bool fMatched = false; \
255 if (pGcPhysCapData && pCfgCap && GCPhysAddr >= (RTGCPHYS)pGcPhysCapData \
256 && GCPhysAddr < ((RTGCPHYS)pGcPhysCapData + ((PVIRTIO_PCI_CAP_T)pCfgCap)->uLength) \
257 && cb <= ((PVIRTIO_PCI_CAP_T)pCfgCap)->uLength) \
258 fMatched = true;
259
260/**
261 * This macro resolves to boolean true if the implied parameters, uOffset and cb, match the field
262 * offset and size of a field in the Common Cfg struct, (or if it is a 64-bit field, if it accesses
263 * either 32-bit part as a 32-bit access)
264 * This is mandated by section 4.1.3.1 of the VirtIO 1.0 specification)
265 *
266 * @param member - Member of VIRTIO_PCI_COMMON_CFG_T
267 * @param uOffset - Implied parameter: Offset into VIRTIO_PCI_COMMON_CFG_T
268 * @param cb - Implied parameter: Number of bytes to access
269 * @result - true or false
270 */
271#define MATCH_COMMON_CFG(member) \
272 (RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member) == 8 \
273 && ( uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) \
274 || uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) + sizeof(uint32_t)) \
275 && cb == sizeof(uint32_t)) \
276 || (uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) \
277 && cb == RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member))
278
279#define LOG_COMMON_CFG_ACCESS(member) \
280 virtioLogMappedIoValue(__FUNCTION__, #member, RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member), \
281 pv, cb, uIntraOff, fWrite, false, 0);
282
283#define LOG_COMMON_CFG_ACCESS_INDEXED(member, idx) \
284 virtioLogMappedIoValue(__FUNCTION__, #member, RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member), \
285 pv, cb, uIntraOff, fWrite, true, idx);
286
287#define COMMON_CFG_ACCESSOR(member) \
288 do \
289 { \
290 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
291 if (fWrite) \
292 memcpy(((char *)&pVirtio->member) + uIntraOff, (const char *)pv, cb); \
293 else \
294 memcpy((char *)pv, (const char *)(((char *)&pVirtio->member) + uIntraOff), cb); \
295 LOG_COMMON_CFG_ACCESS(member); \
296 } while(0)
297
298#define COMMON_CFG_ACCESSOR_INDEXED(member, idx) \
299 do \
300 { \
301 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
302 if (fWrite) \
303 memcpy(((char *)(pVirtio->member + idx)) + uIntraOff, (const char *)pv, cb); \
304 else \
305 memcpy((char *)pv, (const char *)(((char *)(pVirtio->member + idx)) + uIntraOff), cb); \
306 LOG_COMMON_CFG_ACCESS_INDEXED(member, idx); \
307 } while(0)
308
309#define COMMON_CFG_ACCESSOR_READONLY(member) \
310 do \
311 { \
312 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
313 if (fWrite) \
314 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s\n", #member)); \
315 else \
316 { \
317 memcpy((char *)pv, (const char *)(((char *)&pVirtio->member) + uIntraOff), cb); \
318 LOG_COMMON_CFG_ACCESS(member); \
319 } \
320 } while(0)
321
322#define COMMON_CFG_ACCESSOR_INDEXED_READONLY(member, idx) \
323 do \
324 { \
325 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
326 if (fWrite) \
327 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s[%d]\n", #member, idx)); \
328 else \
329 { \
330 memcpy((char *)pv, ((char *)(pVirtio->member + idx)) + uIntraOff, cb); \
331 LOG_COMMON_CFG_ACCESS_INDEXED(member, idx); \
332 } \
333 } while(0)
334
335#define DRIVER_OK(pVirtio) (pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
336
337/**
338 * Internal queue operations
339 */
340
341static int virtqIsEventNeeded (uint16_t uEventIdx, uint16_t uDescIdxNew, uint16_t uDescIdxOld);
342static bool virtqIsEmpty (PVIRTIOSTATE pVirtio, uint16_t qIdx);
343static void virtioReadDesc (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uDescIdx, PVIRTQ_DESC_T pDesc);
344static uint16_t virtioReadAvailDescIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t availIdx);
345static uint16_t virtioReadAvailRingIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx);
346static uint16_t virtioReadAvailFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx);
347static uint16_t virtioReadAvailUsedEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx);
348static void virtioWriteUsedElem (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen);
349static void virtioWriteUsedRingIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint16_t uDescIdx);
350static uint16_t virtioReadUsedRingIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx);
351static uint16_t virtioReadUsedFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx);
352static void virtioWriteUsedFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t fFlags);
353static uint16_t virtioReadUsedAvailEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx);
354static void virtioWriteUsedAvailEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uAvailEventIdx);
355
356
357DECLINLINE(int) virtqIsEventNeeded(uint16_t uEventIdx, uint16_t uDescIdxNew, uint16_t uDescIdxOld)
358{
359 return (uint16_t)(uDescIdxNew - uEventIdx - 1) < (uint16_t)(uDescIdxNew - uDescIdxOld);
360}
361
362DECLINLINE(bool) virtqIsEmpty(PVIRTIOSTATE pVirtio, uint16_t qIdx)
363{
364 return virtioReadAvailRingIdx(pVirtio, qIdx) == pVirtio->virtqState[qIdx].uAvailIdx;
365}
366
367/**
368 * Accessor for virtq descriptor
369 */
370DECLINLINE(void) virtioReadDesc(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uDescIdx, PVIRTQ_DESC_T pDesc)
371{
372 //Log(("%s virtioQueueReadDesc: ring=%p idx=%u\n", INSTANCE(pState), pVirtQ, idx));
373 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
374 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
375 pVirtio->pGcPhysQueueDesc[qIdx]
376 + sizeof(VIRTQ_DESC_T) * (uDescIdx % pVirtio->uQueueSize[qIdx]),
377 pDesc, sizeof(VIRTQ_DESC_T));
378}
379
380/**
381 * Accessors for virtq avail ring
382 */
383DECLINLINE(uint16_t) virtioReadAvailDescIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t availIdx)
384{
385 uint16_t uDescIdx;
386 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
387 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
388 pVirtio->pGcPhysQueueAvail[qIdx]
389 + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[availIdx % pVirtio->uQueueSize[qIdx]]),
390 &uDescIdx, sizeof(uDescIdx));
391 return uDescIdx;
392}
393
394DECLINLINE(uint16_t) virtioReadAvailRingIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx)
395{
396 uint16_t uIdx = 0;
397 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
398 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
399 pVirtio->pGcPhysQueueAvail[qIdx] + RT_UOFFSETOF(VIRTQ_AVAIL_T, uIdx),
400 &uIdx, sizeof(uIdx));
401 return uIdx;
402}
403
404DECLINLINE(uint16_t) virtioReadAvailFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx)
405{
406 uint16_t fFlags;
407 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
408 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
409 pVirtio->pGcPhysQueueAvail[qIdx] + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
410 &fFlags, sizeof(fFlags));
411 return fFlags;
412}
413
414DECLINLINE(uint16_t) virtioReadAvailUsedEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx)
415{
416 uint16_t uUsedEventIdx;
417 /** VirtIO 1.0 uUsedEventIdx (used_event) immediately follows ring */
418 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
419 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
420 pVirtio->pGcPhysQueueAvail[qIdx]
421 + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtio->uQueueSize[qIdx]]),
422 &uUsedEventIdx, sizeof(uUsedEventIdx));
423 return uUsedEventIdx;
424}
425
426/**
427 * Accessors for virtq used ring
428 */
429DECLINLINE(void) virtioWriteUsedElem(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen)
430{
431 VIRTQ_USED_ELEM_T elem = { uDescIdx, uLen };
432 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
433 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
434 pVirtio->pGcPhysQueueUsed[qIdx]
435 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[usedIdx % pVirtio->uQueueSize[qIdx]]),
436 &elem, sizeof(elem));
437}
438
439DECLINLINE(void) virtioWriteUsedRingIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint16_t uIdx)
440{
441 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
442 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
443 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
444 &uIdx, sizeof(uIdx));
445}
446
447DECLINLINE(uint16_t)virtioReadUsedRingIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx)
448{
449 uint16_t uIdx;
450 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
451 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
452 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
453 &uIdx, sizeof(uIdx));
454 return uIdx;
455}
456
457DECLINLINE(uint16_t) virtioReadUsedFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx)
458{
459 uint16_t fFlags;
460 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
461 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
462 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
463 &fFlags, sizeof(fFlags));
464 return fFlags;
465}
466
467DECLINLINE(void) virtioWriteUsedFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t fFlags)
468{
469 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
470 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
471 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
472 &fFlags, sizeof(fFlags));
473}
474
475DECLINLINE(uint16_t) virtioReadUsedAvailEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx)
476{
477 uint16_t uAvailEventIdx;
478 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
479 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
480 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
481 pVirtio->pGcPhysQueueUsed[qIdx]
482 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[pVirtio->uQueueSize[qIdx]]),
483 &uAvailEventIdx, sizeof(uAvailEventIdx));
484 return uAvailEventIdx;
485}
486
487DECLINLINE(void) virtioWriteUsedAvailEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uAvailEventIdx)
488{
489 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
490 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
491 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
492 pVirtio->pGcPhysQueueUsed[qIdx]
493 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[pVirtio->uQueueSize[qIdx]]),
494 &uAvailEventIdx, sizeof(uAvailEventIdx));
495}
496
497
498/**
499 * Makes the MMIO-mapped Virtio uDeviceStatus registers non-cryptic */
500DECLINLINE(void) virtioLogDeviceStatus( uint8_t status)
501{
502 if (status == 0)
503 Log6(("RESET"));
504 else
505 {
506 int primed = 0;
507 if (status & VIRTIO_STATUS_ACKNOWLEDGE)
508 Log6(("ACKNOWLEDGE", primed++));
509 if (status & VIRTIO_STATUS_DRIVER)
510 Log6(("%sDRIVER", primed++ ? " | " : ""));
511 if (status & VIRTIO_STATUS_FEATURES_OK)
512 Log6(("%sFEATURES_OK", primed++ ? " | " : ""));
513 if (status & VIRTIO_STATUS_DRIVER_OK)
514 Log6(("%sDRIVER_OK", primed++ ? " | " : ""));
515 if (status & VIRTIO_STATUS_FAILED)
516 Log6(("%sFAILED", primed++ ? " | " : ""));
517 if (status & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
518 Log6(("%sNEEDS_RESET", primed++ ? " | " : ""));
519 (void)primed;
520 }
521}
522
523static int virtioCommonCfgAccessed (PVIRTIOSTATE pVirtio, int fWrite, off_t uOffset, unsigned cb, void const *pv);
524static void virtioResetQueue (PVIRTIOSTATE pVirtio, uint16_t qIdx);
525static void virtioNotifyGuestDriver (PVIRTIOSTATE pVirtio, uint16_t qIdx, bool fForce);
526static int virtioRaiseInterrupt (PVIRTIOSTATE pVirtio, uint8_t uCause, bool fForce);
527static void virtioLowerInterrupt (PVIRTIOSTATE pVirtio);
528static void virtioQueueNotified (PVIRTIOSTATE pVirtio, uint16_t qidx, uint16_t uDescIdx);
529static void virtioGuestResetted (PVIRTIOSTATE pVirtio);
530
531static DECLCALLBACK(int) virtioR3SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM);
532static DECLCALLBACK(int) virtioR3LoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
533static DECLCALLBACK(int) virtioR3LoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM);
534static DECLCALLBACK(int) virtioR3LiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass);
535
536#endif /* !VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette