VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/Virtio_1_0_impl.h@ 80383

Last change on this file since 80383 was 80383, checked in by vboxsync, 5 years ago

Storage/DevVirtioSCSI.cpp: Got notification and initial worker thread scheme implemented. Structured queue transition into device code, and reading and parsing controlq header and request queue header. See bugref:9440 Comment 56 for more information.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 30.4 KB
Line 
1/* $Id: Virtio_1_0_impl.h 80383 2019-08-22 07:25:38Z vboxsync $ $Revision: 80383 $ $Date: 2019-08-22 07:25:38 +0000 (Thu, 22 Aug 2019) $ $Author: vboxsync $ */
2/** @file
3 * Virtio_1_0_impl.h - Virtio Declarations
4 */
5
6/*
7 * Copyright (C) 2009-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h
19#define VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#include "Virtio_1_0.h"
25
26/** @name Saved state versions.
27 * The saved state version is changed if either common or any of specific
28 * parts are changed. That is, it is perfectly possible that the version
29 * of saved vnet state will increase as a result of change in vblk structure
30 * for example.
31 */
32#define VIRTIO_SAVEDSTATE_VERSION 1
33/** @} */
34
35#define VIRTIO_F_VERSION_1 RT_BIT_64(32) /**< Required feature bit for 1.0 devices */
36
37#define VIRTIO_F_INDIRECT_DESC RT_BIT_64(28) /**< Allow descs to point to list of descs */
38#define VIRTIO_F_EVENT_IDX RT_BIT_64(29) /**< Allow notification disable for n elems */
39#define VIRTIO_F_RING_INDIRECT_DESC RT_BIT_64(28) /**< Doc bug: Goes under two names in spec */
40#define VIRTIO_F_RING_EVENT_IDX RT_BIT_64(29) /**< Doc bug: Goes under two names in spec */
41
42#define VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED ( VIRTIO_F_EVENT_IDX ) /**< TBD: Add VIRTIO_F_INDIRECT_DESC */
43
44#define VIRTIO_ISR_VIRTQ_INTERRUPT RT_BIT_32(0) /**< Virtq interrupt bit of ISR register */
45#define VIRTIO_ISR_DEVICE_CONFIG RT_BIT_32(1) /**< Device configuration changed bit of ISR */
46#define DEVICE_PCI_VENDOR_ID_VIRTIO 0x1AF4 /**< Guest driver locates dev via (mandatory) */
47#define DEVICE_PCI_REVISION_ID_VIRTIO 1 /**< VirtIO 1.0 non-transitional drivers >= 1 */
48
49/** Reserved (*negotiated*) Feature Bits (e.g. device independent features, VirtIO 1.0 spec,section 6) */
50
51#define VIRTIO_MSI_NO_VECTOR 0xffff /**< Vector value to disable MSI for queue */
52
53/** Device Status field constants (from Virtio 1.0 spec) */
54#define VIRTIO_STATUS_ACKNOWLEDGE 0x01 /**< Guest driver: Located this VirtIO device */
55#define VIRTIO_STATUS_DRIVER 0x02 /**< Guest driver: Can drive this VirtIO dev. */
56#define VIRTIO_STATUS_DRIVER_OK 0x04 /**< Guest driver: Driver set-up and ready */
57#define VIRTIO_STATUS_FEATURES_OK 0x08 /**< Guest driver: Feature negotiation done */
58#define VIRTIO_STATUS_FAILED 0x80 /**< Guest driver: Fatal error, gave up */
59#define VIRTIO_STATUS_DEVICE_NEEDS_RESET 0x40 /**< Device experienced unrecoverable error */
60
61/** @def Virtio Device PCI Capabilities type codes */
62#define VIRTIO_PCI_CAP_COMMON_CFG 1 /**< Common configuration PCI capability ID */
63#define VIRTIO_PCI_CAP_NOTIFY_CFG 2 /**< Notification area PCI capability ID */
64#define VIRTIO_PCI_CAP_ISR_CFG 3 /**< ISR PCI capability id */
65#define VIRTIO_PCI_CAP_DEVICE_CFG 4 /**< Device-specific PCI cfg capability ID */
66#define VIRTIO_PCI_CAP_PCI_CFG 5 /**< PCI CFG capability ID */
67
68#define VIRTIO_PCI_CAP_ID_VENDOR 0x09 /**< Vendor-specific PCI CFG Device Cap. ID */
69
70/**
71 * The following is the PCI capability struct common to all VirtIO capability types
72 */
73typedef struct virtio_pci_cap
74{
75 /* All little-endian */
76 uint8_t uCapVndr; /**< Generic PCI field: PCI_CAP_ID_VNDR */
77 uint8_t uCapNext; /**< Generic PCI field: next ptr. */
78 uint8_t uCapLen; /**< Generic PCI field: capability length */
79 uint8_t uCfgType; /**< Identifies the structure. */
80 uint8_t uBar; /**< Where to find it. */
81 uint8_t uPadding[3]; /**< Pad to full dword. */
82 uint32_t uOffset; /**< Offset within bar. (L.E.) */
83 uint32_t uLength; /**< Length of struct, in bytes. (L.E.) */
84} VIRTIO_PCI_CAP_T, *PVIRTIO_PCI_CAP_T;
85
86/**
87 * Translation of the descriptor chain associated with one element of virtq avail ring into its
88 * IN and OUT components and represented as respective arrays of SG segments.
89 */
90typedef struct VIRTQ_DESC_CHAIN /**< Describes a single queue element */
91{
92 RTSGSEG aSegsIn[VIRTQ_MAX_SIZE]; /**< List of segments to write to guest */
93 RTSGSEG aSegsOut[VIRTQ_MAX_SIZE]; /**< List of segments read from guest */
94 uint32_t uHeadIdx; /**< Index at head desc (source of seg arrays) */
95 uint32_t cSegsIn; /**< Count of segments in aSegsIn[] */
96 uint32_t cSegsOut; /**< Count of segments in aSegsOut[] */
97} VIRTQ_DESC_CHAIN_T, *PVIRTQ_DESC_CHAIN_T;
98
99/**
100 * Local implementation's usage context of a queue (e.g. not part of VirtIO specification)
101 */
102typedef struct VIRTQ_PROXY
103{
104 RTSGBUF inSgBuf; /**< host-to-guest buffers */
105 RTSGBUF outSgBuf; /**< guest-to-host buffers */
106 const char szVirtqName[32]; /**< Dev-specific name of queue */
107 uint16_t uAvailIdx; /**< Consumer's position in avail ring */
108 uint16_t uUsedIdx; /**< Consumer's position in used ring */
109 bool fEventThresholdReached; /**< Don't lose track while queueing ahead */
110 PVIRTQ_DESC_CHAIN_T pDescChain; /**< Per-queue s/g data. */
111} VIRTQ_PROXY_T, *PVIRTQ_PROXY_T;
112
113/**
114 * VirtIO 1.0 Capabilities' related MMIO-mapped structs:
115 *
116 * Note: virtio_pci_device_cap is dev-specific, implemented by client. Definition unknown here.
117 */
118typedef struct virtio_pci_common_cfg
119{
120 /* Per device fields */
121 uint32_t uDeviceFeaturesSelect; /**< RW (driver selects device features) */
122 uint32_t uDeviceFeatures; /**< RO (device reports features to driver) */
123 uint32_t uDriverFeaturesSelect; /**< RW (driver selects driver features) */
124 uint32_t uDriverFeatures; /**< RW (driver-accepted device features) */
125 uint16_t uMsixConfig; /**< RW (driver sets MSI-X config vector) */
126 uint16_t uNumQueues; /**< RO (device specifies max queues) */
127 uint8_t uDeviceStatus; /**< RW (driver writes device status, 0=reset) */
128 uint8_t uConfigGeneration; /**< RO (device changes when changing configs) */
129
130 /* Per virtqueue fields (as determined by uQueueSelect) */
131 uint16_t uQueueSelect; /**< RW (selects queue focus for these fields) */
132 uint16_t uQueueSize; /**< RW (queue size, 0 - 2^n) */
133 uint16_t uQueueMsixVector; /**< RW (driver selects MSI-X queue vector) */
134 uint16_t uQueueEnable; /**< RW (driver controls usability of queue) */
135 uint16_t uQueueNotifyOff; /**< RO (offset uto virtqueue; see spec) */
136 uint64_t pGcPhysQueueDesc; /**< RW (driver writes desc table phys addr) */
137 uint64_t pGcPhysQueueAvail; /**< RW (driver writes avail ring phys addr) */
138 uint64_t pGcPhysQueueUsed; /**< RW (driver writes used ring phys addr) */
139} VIRTIO_PCI_COMMON_CFG_T, *PVIRTIO_PCI_COMMON_CFG_T;
140
141typedef struct virtio_pci_notify_cap
142{
143 struct virtio_pci_cap pciCap; /**< Notification MMIO mapping capability */
144 uint32_t uNotifyOffMultiplier; /**< notify_off_multiplier */
145} VIRTIO_PCI_NOTIFY_CAP_T, *PVIRTIO_PCI_NOTIFY_CAP_T;
146
147typedef struct virtio_pci_cfg_cap
148{
149 struct virtio_pci_cap pciCap; /**< Cap. defines the BAR/off/len to access */
150 uint8_t uPciCfgData[4]; /**< I/O buf for above cap. */
151} VIRTIO_PCI_CFG_CAP_T, *PVIRTIO_PCI_CFG_CAP_T;
152
153/**
154 * The core (/common) state of the VirtIO PCI device
155 *
156 * @implements PDMILEDPORTS
157 */
158typedef struct VIRTIOSTATE
159{
160 PDMPCIDEV dev; /**< PCI device */
161 char szInstance[16]; /**< Instance name, e.g. "VIRTIOSCSI0" */
162 void * pClientContext; /**< Client callback returned on callbacks */
163
164 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3 */
165 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0 */
166 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC */
167
168 RTGCPHYS pGcPhysPciCapBase; /**< Pointer to MMIO mapped capability data */
169 RTGCPHYS pGcPhysCommonCfg; /**< Pointer to MMIO mapped capability data */
170 RTGCPHYS pGcPhysNotifyCap; /**< Pointer to MMIO mapped capability data */
171 RTGCPHYS pGcPhysIsrCap; /**< Pointer to MMIO mapped capability data */
172 RTGCPHYS pGcPhysDeviceCap; /**< Pointer to MMIO mapped capability data */
173
174 RTGCPHYS pGcPhysQueueDesc[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q desc structs GUEST */
175 RTGCPHYS pGcPhysQueueAvail[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q avail structs GUEST */
176 RTGCPHYS pGcPhysQueueUsed[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q used structs GUEST */
177 uint16_t uQueueNotifyOff[VIRTQ_MAX_CNT]; /**< (MMIO) per-Q notify offset HOST */
178 uint16_t uQueueMsixVector[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue vector for MSI-X GUEST */
179 uint16_t uQueueEnable[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue enable GUEST */
180 uint16_t uQueueSize[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue size HOST/GUEST */
181 uint16_t uQueueSelect; /**< (MMIO) queue selector GUEST */
182 uint16_t padding;
183 uint64_t uDeviceFeatures; /**< (MMIO) Host features offered HOST */
184 uint64_t uDriverFeatures; /**< (MMIO) Host features accepted GUEST */
185 uint32_t uDeviceFeaturesSelect; /**< (MMIO) hi/lo select uDeviceFeatures GUEST */
186 uint32_t uDriverFeaturesSelect; /**< (MMIO) hi/lo select uDriverFeatures GUEST */
187 uint32_t uMsixConfig; /**< (MMIO) MSI-X vector GUEST */
188 uint32_t uNumQueues; /**< (MMIO) Actual number of queues GUEST */
189 uint8_t uDeviceStatus; /**< (MMIO) Device Status GUEST */
190 uint8_t uPrevDeviceStatus; /**< (MMIO) Prev Device Status GUEST */
191 uint8_t uConfigGeneration; /**< (MMIO) Device config sequencer HOST */
192
193 VIRTQ_PROXY_T virtqProxy[VIRTQ_MAX_CNT]; /**< Local impl-specific queue context */
194 VIRTIOCALLBACKS virtioCallbacks; /**< Callback vectors to client */
195
196 PFNPCICONFIGREAD pfnPciConfigReadOld; /**< Prev rd. cb. intercepting PCI Cfg I/O */
197 PFNPCICONFIGWRITE pfnPciConfigWriteOld; /**< Prev wr. cb. intercepting PCI Cfg I/O */
198
199 PVIRTIO_PCI_CFG_CAP_T pPciCfgCap; /**< Pointer to struct in configuration area */
200 PVIRTIO_PCI_NOTIFY_CAP_T pNotifyCap; /**< Pointer to struct in configuration area */
201 PVIRTIO_PCI_CAP_T pCommonCfgCap; /**< Pointer to struct in configuration area */
202 PVIRTIO_PCI_CAP_T pIsrCap; /**< Pointer to struct in configuration area */
203 PVIRTIO_PCI_CAP_T pDeviceCap; /**< Pointer to struct in configuration area */
204
205 uint32_t cbDevSpecificCfg; /**< Size of client's dev-specific config data */
206 void *pDevSpecificCfg; /**< Pointer to client's struct */
207 void *pPrevDevSpecificCfg; /**< Previous read dev-specific cfg of client */
208 bool fGenUpdatePending; /**< If set, update cfg gen after driver reads */
209 uint8_t uPciCfgDataOff;
210 uint8_t uISR; /**< Interrupt Status Register. */
211
212} VIRTIOSTATE, *PVIRTIOSTATE;
213
214/** virtq related flags */
215#define VIRTQ_DESC_F_NEXT 1 /**< Indicates this descriptor chains to next */
216#define VIRTQ_DESC_F_WRITE 2 /**< Marks buffer as write-only (default ro) */
217#define VIRTQ_DESC_F_INDIRECT 4 /**< Buffer is list of buffer descriptors */
218
219#define VIRTQ_USED_F_NO_NOTIFY 1 /**< Dev to Drv: Don't notify when buf added */
220#define VIRTQ_AVAIL_F_NO_INTERRUPT 1 /**< Drv to Dev: Don't notify when buf eaten */
221
222/**
223 * virtq related structs
224 * (struct names follow VirtIO 1.0 spec, typedef use VBox style)
225 */
226typedef struct virtq_desc
227{
228 uint64_t pGcPhysBuf; /**< addr GC Phys. address of buffer */
229 uint32_t cb; /**< len Buffer length */
230 uint16_t fFlags; /**< flags Buffer specific flags */
231 uint16_t uDescIdxNext; /**< next Idx set if VIRTIO_DESC_F_NEXT */
232} VIRTQ_DESC_T, *PVIRTQ_DESC_T;
233
234typedef struct virtq_avail
235{
236 uint16_t fFlags; /**< flags avail ring drv to dev flags */
237 uint16_t uDescIdx; /**< idx Index of next free ring slot */
238 uint16_t auRing[1]; /**< ring Ring: avail drv to dev bufs */
239 uint16_t uUsedEventIdx; /**< used_event (if VIRTQ_USED_F_EVENT_IDX) */
240} VIRTQ_AVAIL_T, *PVIRTQ_AVAIL_T;
241
242typedef struct virtq_used_elem
243{
244 uint32_t uDescIdx; /**< idx Start of used desc chain */
245 uint32_t cbElem; /**< len Total len of used desc chain */
246} VIRTQ_USED_ELEM_T;
247
248typedef struct virt_used
249{
250 uint16_t fFlags; /**< flags used ring host-to-guest flags */
251 uint16_t uDescIdx; /**< idx Index of next ring slot */
252 VIRTQ_USED_ELEM_T auRing[1]; /**< ring Ring: used dev to drv bufs */
253 uint16_t uAvailEventIdx; /**< avail_event if (VIRTQ_USED_F_EVENT_IDX) */
254} VIRTQ_USED_T, *PVIRTQ_USED_T;
255
256/**
257* This macro returns true if physical address and access length are within the mapped capability struct.
258*
259* Actual Parameters:
260* @oaram pPhysCapStruct - [input] Pointer to MMIO mapped capability struct
261* @param pCfgCap - [input] Pointer to capability in PCI configuration area
262* @param fMatched - [output] True if GCPhysAddr is within the physically mapped capability.
263*
264* Implied parameters:
265* @param GCPhysAddr - [input, implied] Physical address accessed (via MMIO callback)
266* @param cb - [input, implied] Number of bytes to access
267*/
268#define MATCH_VIRTIO_CAP_STRUCT(pGcPhysCapData, pCfgCap, fMatched) \
269 bool fMatched = false; \
270 if (pGcPhysCapData && pCfgCap && GCPhysAddr >= (RTGCPHYS)pGcPhysCapData \
271 && GCPhysAddr < ((RTGCPHYS)pGcPhysCapData + ((PVIRTIO_PCI_CAP_T)pCfgCap)->uLength) \
272 && cb <= ((PVIRTIO_PCI_CAP_T)pCfgCap)->uLength) \
273 fMatched = true;
274
275/**
276 * This macro resolves to boolean true if uOffset matches a field offset and size exactly,
277 * (or if it is a 64-bit field, if it accesses either 32-bit part as a 32-bit access)
278 * This is mandated by section 4.1.3.1 of the VirtIO 1.0 specification)
279 *
280 * @param member - Member of VIRTIO_PCI_COMMON_CFG_T
281 * @param uOffset - Implied parameter: Offset into VIRTIO_PCI_COMMON_CFG_T
282 * @param cb - Implied parameter: Number of bytes to access
283 * @result - true or false
284 */
285#define MATCH_COMMON_CFG(member) \
286 (RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member) == 8 \
287 && ( uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) \
288 || uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) + sizeof(uint32_t)) \
289 && cb == sizeof(uint32_t)) \
290 || (uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) \
291 && cb == RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member))
292
293#define LOG_COMMON_CFG_ACCESS(member) \
294 virtioLogMappedIoValue(__FUNCTION__, #member, RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member), \
295 pv, cb, uIntraOff, fWrite, false, 0);
296
297#define LOG_COMMON_CFG_ACCESS_INDEXED(member, idx) \
298 virtioLogMappedIoValue(__FUNCTION__, #member, RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member), \
299 pv, cb, uIntraOff, fWrite, true, idx);
300
301#define COMMON_CFG_ACCESSOR(member) \
302 { \
303 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
304 if (fWrite) \
305 memcpy(((char *)&pVirtio->member) + uIntraOff, (const char *)pv, cb); \
306 else \
307 memcpy((char *)pv, (const char *)(((char *)&pVirtio->member) + uIntraOff), cb); \
308 LOG_COMMON_CFG_ACCESS(member); \
309 }
310
311#define COMMON_CFG_ACCESSOR_INDEXED(member, idx) \
312 { \
313 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
314 if (fWrite) \
315 memcpy(((char *)(pVirtio->member + idx)) + uIntraOff, (const char *)pv, cb); \
316 else \
317 memcpy((char *)pv, (const char *)(((char *)(pVirtio->member + idx)) + uIntraOff), cb); \
318 LOG_COMMON_CFG_ACCESS_INDEXED(member, idx); \
319 }
320
321#define COMMON_CFG_ACCESSOR_READONLY(member) \
322 { \
323 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
324 if (fWrite) \
325 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s\n", #member)); \
326 else \
327 { \
328 memcpy((char *)pv, (const char *)(((char *)&pVirtio->member) + uIntraOff), cb); \
329 LOG_COMMON_CFG_ACCESS(member); \
330 } \
331 }
332
333#define COMMON_CFG_ACCESSOR_INDEXED_READONLY(member, idx) \
334 { \
335 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
336 if (fWrite) \
337 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s[%d]\n", #member, idx)); \
338 else \
339 { \
340 memcpy((char *)pv, ((char *)(pVirtio->member + idx)) + uIntraOff, cb); \
341 LOG_COMMON_CFG_ACCESS_INDEXED(member, idx); \
342 } \
343 }
344
345#define DRIVER_OK(pVirtio) (pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
346
347/**
348 * Internal queue operations
349 */
350
351static int vqIsEventNeeded (uint16_t uEventIdx, uint16_t uDescIdxNew, uint16_t uDescIdxOld);
352static bool vqIsEmpty (PVIRTIOSTATE pVirtio, uint16_t qIdx);
353static void vqReadDesc (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uDescIdx, PVIRTQ_DESC_T pDesc);
354static uint16_t vqReadAvailRingDescIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t availIdx);
355static uint16_t vqReadAvailDescIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx);
356static uint16_t vqReadAvailFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx);
357static uint16_t vqReadAvailUsedEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx);
358static void vqWriteUsedElem (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen);
359static void vqWriteUsedRingDescIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint16_t uDescIdx);
360static uint16_t vqReadUsedDescIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx);
361static uint16_t vqReadUsedFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx);
362static void vqWriteUsedFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t fFlags);
363static uint16_t vqReadUsedAvailEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx);
364static void vqWriteUsedAvailEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uAvailEventIdx);
365
366DECLINLINE(int) vqIsEventNeeded(uint16_t uEventIdx, uint16_t uDescIdxNew, uint16_t uDescIdxOld)
367{
368 return (uint16_t)(uDescIdxNew - uEventIdx - 1) < (uint16_t)(uDescIdxNew - uDescIdxOld);
369}
370
371DECLINLINE(bool) vqIsEmpty(PVIRTIOSTATE pVirtio, uint16_t qIdx)
372{
373 return vqReadAvailDescIdx(pVirtio, qIdx) == pVirtio->virtqProxy->uAvailIdx;
374}
375
376/**
377 * Accessor for virtq descriptor
378 */
379DECLINLINE(void) vqReadDesc(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uDescIdx, PVIRTQ_DESC_T pDesc)
380{
381 //Log(("%s virtioQueueReadDesc: ring=%p idx=%u\n", INSTANCE(pState), pVirtQ, idx));
382 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
383 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
384 pVirtio->pGcPhysQueueDesc[qIdx]
385 + sizeof(VIRTQ_DESC_T) * (uDescIdx % pVirtio->uQueueSize[qIdx]),
386 pDesc, sizeof(VIRTQ_DESC_T));
387}
388
389/**
390 * Accessors for virtq avail ring
391 */
392DECLINLINE(uint16_t) vqReadAvailRingDescIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t availIdx)
393{
394 uint16_t uDescIdx;
395 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
396 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
397 pVirtio->pGcPhysQueueAvail[qIdx]
398 + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[availIdx % pVirtio->uQueueSize[qIdx]]),
399 &uDescIdx, sizeof(uDescIdx));
400 return uDescIdx;
401}
402
403DECLINLINE(uint16_t) vqReadAvailDescIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx)
404{
405 uint16_t uDescIdx;
406 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
407 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
408 pVirtio->pGcPhysQueueAvail[qIdx] + RT_UOFFSETOF(VIRTQ_AVAIL_T, uDescIdx),
409 &uDescIdx, sizeof(uDescIdx));
410 return uDescIdx;
411}
412
413DECLINLINE(uint16_t) vqReadAvailFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx)
414{
415 uint16_t fFlags;
416 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
417 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
418 pVirtio->pGcPhysQueueAvail[qIdx] + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
419 &fFlags, sizeof(fFlags));
420 return fFlags;
421}
422
423DECLINLINE(uint16_t) vqReadAvailUsedEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx)
424{
425 uint16_t uUsedEventIdx;
426 /** VirtIO 1.0 uUsedEventIdx (used_event) immediately follows ring */
427 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
428 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
429 + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtio->uQueueSize[qIdx]]),
430 &uUsedEventIdx, sizeof(uUsedEventIdx));
431 return uUsedEventIdx;
432}
433
434/**
435 * Accessors for virtq used ring
436 */
437DECLINLINE(void) vqWriteUsedElem(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen)
438{
439 VIRTQ_USED_ELEM_T elem = { uDescIdx, uLen };
440 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
441 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
442 pVirtio->pGcPhysQueueUsed[qIdx]
443 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[usedIdx % pVirtio->uQueueSize[qIdx]]),
444 &elem, sizeof(elem));
445}
446
447DECLINLINE(void) vqWriteUsedRingDescIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint16_t uDescIdx)
448{
449 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
450 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
451 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, uDescIdx),
452 &uDescIdx, sizeof(uDescIdx));
453}
454
455DECLINLINE(uint16_t)vqReadUsedDescIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx)
456{
457 uint16_t uDescIdx;
458 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
459 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
460 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, uDescIdx),
461 &uDescIdx, sizeof(uDescIdx));
462 return uDescIdx;
463}
464
465DECLINLINE(uint16_t) vqReadUsedFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx)
466{
467 uint16_t fFlags;
468 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
469 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
470 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
471 &fFlags, sizeof(fFlags));
472 return fFlags;
473}
474
475DECLINLINE(void) vqWriteUsedFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t fFlags)
476{
477 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
478 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
479 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
480 &fFlags, sizeof(fFlags));
481}
482
483DECLINLINE(uint16_t) vqReadUsedAvailEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx)
484{
485 uint16_t uAvailEventIdx;
486 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
487 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
488 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
489 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[pVirtio->uQueueSize[qIdx]]),
490 &uAvailEventIdx, sizeof(uAvailEventIdx));
491 return uAvailEventIdx;
492}
493
494DECLINLINE(void) vqWriteUsedAvailEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uAvailEventIdx)
495{
496 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
497 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
498 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
499 pVirtio->pGcPhysQueueUsed[qIdx]
500 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[pVirtio->uQueueSize[qIdx]]),
501 &uAvailEventIdx, sizeof(uAvailEventIdx));
502}
503
504/**
505 * Makes the MMIO-mapped Virtio uDeviceStatus registers non-cryptic */
506DECLINLINE(void) virtioLogDeviceStatus( uint8_t status)
507{
508 if (status == 0)
509 Log(("RESET"));
510 else
511 {
512 int primed = 0;
513 if (status & VIRTIO_STATUS_ACKNOWLEDGE)
514 Log(("ACKNOWLEDGE", primed++));
515 if (status & VIRTIO_STATUS_DRIVER)
516 Log(("%sDRIVER", primed++ ? " | " : ""));
517 if (status & VIRTIO_STATUS_FEATURES_OK)
518 Log(("%sFEATURES_OK", primed++ ? " | " : ""));
519 if (status & VIRTIO_STATUS_DRIVER_OK)
520 Log(("%sDRIVER_OK", primed++ ? " | " : ""));
521 if (status & VIRTIO_STATUS_FAILED)
522 Log(("%sFAILED", primed++ ? " | " : ""));
523 if (status & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
524 Log(("%sNEEDS_RESET", primed++ ? " | " : ""));
525 }
526}
527
528static void virtioResetQueue (PVIRTIOSTATE pVirtio, uint16_t qIdx);
529static void vqNotifyDriver (PVIRTIOSTATE pVirtio, uint16_t qIdx);
530static int virtioRaiseInterrupt (PVIRTIOSTATE pVirtio, uint8_t uCause);
531static void virtioLowerInterrupt (PVIRTIOSTATE pVirtio);
532static void virtioQueueNotified (PVIRTIOSTATE pVirtio, uint16_t qidx, uint16_t uDescIdx);
533static int virtioCommonCfgAccessed (PVIRTIOSTATE pVirtio, int fWrite, off_t uOffset, unsigned cb, void const *pv);
534static void virtioGuestResetted (PVIRTIOSTATE pVirtio);
535
536static DECLCALLBACK(int) virtioR3SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM);
537static DECLCALLBACK(int) virtioR3LoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
538static DECLCALLBACK(int) virtioR3LoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM);
539static DECLCALLBACK(int) virtioR3LiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass);
540
541#endif /* !VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette