VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/Virtio_1_0_impl.h@ 80596

Last change on this file since 80596 was 80596, checked in by vboxsync, 6 years ago

Storage/DevVirtioSCSI.cpp: Added code to incorporate aeichner's changes (see bugref:9440, Comment #72). Tested new response status code and discovered it's more accurate to trust sense over rcReq values. Also changed some logging levels to reinstate the option to get hexdumps (with some filtering)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 30.5 KB
Line 
1/* $Id: Virtio_1_0_impl.h 80596 2019-09-05 07:12:46Z vboxsync $ $Revision: 80596 $ $Date: 2019-09-05 07:12:46 +0000 (Thu, 05 Sep 2019) $ $Author: vboxsync $ */
2/** @file
3 * Virtio_1_0_impl.h - Virtio Declarations
4 */
5
6/*
7 * Copyright (C) 2009-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h
19#define VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#include "Virtio_1_0.h"
25
26/** @name Saved state versions.
27 * The saved state version is changed if either common or any of specific
28 * parts are changed. That is, it is perfectly possible that the version
29 * of saved vnet state will increase as a result of change in vblk structure
30 * for example.
31 */
32#define VIRTIO_SAVEDSTATE_VERSION 1
33/** @} */
34
35#define VIRTIO_F_VERSION_1 RT_BIT_64(32) /**< Required feature bit for 1.0 devices */
36
37#define VIRTIO_F_INDIRECT_DESC RT_BIT_64(28) /**< Allow descs to point to list of descs */
38#define VIRTIO_F_EVENT_IDX RT_BIT_64(29) /**< Allow notification disable for n elems */
39#define VIRTIO_F_RING_INDIRECT_DESC RT_BIT_64(28) /**< Doc bug: Goes under two names in spec */
40#define VIRTIO_F_RING_EVENT_IDX RT_BIT_64(29) /**< Doc bug: Goes under two names in spec */
41
42#define VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED ( 0 ) /**< TBD: Add VIRTIO_F_INDIRECT_DESC */
43
44#define VIRTIO_ISR_VIRTQ_INTERRUPT RT_BIT_32(0) /**< Virtq interrupt bit of ISR register */
45#define VIRTIO_ISR_DEVICE_CONFIG RT_BIT_32(1) /**< Device configuration changed bit of ISR */
46#define DEVICE_PCI_VENDOR_ID_VIRTIO 0x1AF4 /**< Guest driver locates dev via (mandatory) */
47#define DEVICE_PCI_REVISION_ID_VIRTIO 1 /**< VirtIO 1.0 non-transitional drivers >= 1 */
48
49/** Reserved (*negotiated*) Feature Bits (e.g. device independent features, VirtIO 1.0 spec,section 6) */
50
51#define VIRTIO_MSI_NO_VECTOR 0xffff /**< Vector value to disable MSI for queue */
52
53/** Device Status field constants (from Virtio 1.0 spec) */
54#define VIRTIO_STATUS_ACKNOWLEDGE 0x01 /**< Guest driver: Located this VirtIO device */
55#define VIRTIO_STATUS_DRIVER 0x02 /**< Guest driver: Can drive this VirtIO dev. */
56#define VIRTIO_STATUS_DRIVER_OK 0x04 /**< Guest driver: Driver set-up and ready */
57#define VIRTIO_STATUS_FEATURES_OK 0x08 /**< Guest driver: Feature negotiation done */
58#define VIRTIO_STATUS_FAILED 0x80 /**< Guest driver: Fatal error, gave up */
59#define VIRTIO_STATUS_DEVICE_NEEDS_RESET 0x40 /**< Device experienced unrecoverable error */
60
61/** @def Virtio Device PCI Capabilities type codes */
62#define VIRTIO_PCI_CAP_COMMON_CFG 1 /**< Common configuration PCI capability ID */
63#define VIRTIO_PCI_CAP_NOTIFY_CFG 2 /**< Notification area PCI capability ID */
64#define VIRTIO_PCI_CAP_ISR_CFG 3 /**< ISR PCI capability id */
65#define VIRTIO_PCI_CAP_DEVICE_CFG 4 /**< Device-specific PCI cfg capability ID */
66#define VIRTIO_PCI_CAP_PCI_CFG 5 /**< PCI CFG capability ID */
67
68#define VIRTIO_PCI_CAP_ID_VENDOR 0x09 /**< Vendor-specific PCI CFG Device Cap. ID */
69
70/**
71 * The following is the PCI capability struct common to all VirtIO capability types
72 */
73typedef struct virtio_pci_cap
74{
75 /* All little-endian */
76 uint8_t uCapVndr; /**< Generic PCI field: PCI_CAP_ID_VNDR */
77 uint8_t uCapNext; /**< Generic PCI field: next ptr. */
78 uint8_t uCapLen; /**< Generic PCI field: capability length */
79 uint8_t uCfgType; /**< Identifies the structure. */
80 uint8_t uBar; /**< Where to find it. */
81 uint8_t uPadding[3]; /**< Pad to full dword. */
82 uint32_t uOffset; /**< Offset within bar. (L.E.) */
83 uint32_t uLength; /**< Length of struct, in bytes. (L.E.) */
84} VIRTIO_PCI_CAP_T, *PVIRTIO_PCI_CAP_T;
85
86/**
87 * IN/OUT Descriptor chains descriptor chain associated with one element of virtq avail ring represented
88 * as respective arrays of SG segments.
89 */
90typedef struct VIRTQ_DESC_CHAIN /**< Describes a single queue element */
91{
92 RTSGSEG aSegsIn[VIRTQ_MAX_SIZE]; /**< List of segments to write to guest */
93 RTSGSEG aSegsOut[VIRTQ_MAX_SIZE]; /**< List of segments read from guest */
94 uint32_t uHeadIdx; /**< Index at head desc (source of seg arrays) */
95 uint32_t cSegsIn; /**< Count of segments in aSegsIn[] */
96 uint32_t cSegsOut; /**< Count of segments in aSegsOut[] */
97} VIRTQ_DESC_CHAIN_T, *PVIRTQ_DESC_CHAIN_T;
98
99/**
100 * Local implementation's usage context of a queue (e.g. not part of VirtIO specification)
101 */
102typedef struct VIRTQ_PROXY
103{
104 RTSGBUF inSgBuf; /**< host-to-guest buffers */
105 RTSGBUF outSgBuf; /**< guest-to-host buffers */
106 const char szVirtqName[32]; /**< Dev-specific name of queue */
107 uint16_t uAvailIdx; /**< Consumer's position in avail ring */
108 uint16_t uUsedIdx; /**< Consumer's position in used ring */
109 bool fEventThresholdReached; /**< Don't lose track while queueing ahead */
110 PVIRTQ_DESC_CHAIN_T pDescChain; /**< Per-queue s/g data. */
111} VIRTQ_PROXY_T, *PVIRTQ_PROXY_T;
112
113/**
114 * VirtIO 1.0 Capabilities' related MMIO-mapped structs:
115 *
116 * Note: virtio_pci_device_cap is dev-specific, implemented by client. Definition unknown here.
117 */
118typedef struct virtio_pci_common_cfg
119{
120 /* Per device fields */
121 uint32_t uDeviceFeaturesSelect; /**< RW (driver selects device features) */
122 uint32_t uDeviceFeatures; /**< RO (device reports features to driver) */
123 uint32_t uDriverFeaturesSelect; /**< RW (driver selects driver features) */
124 uint32_t uDriverFeatures; /**< RW (driver-accepted device features) */
125 uint16_t uMsixConfig; /**< RW (driver sets MSI-X config vector) */
126 uint16_t uNumQueues; /**< RO (device specifies max queues) */
127 uint8_t uDeviceStatus; /**< RW (driver writes device status, 0=reset) */
128 uint8_t uConfigGeneration; /**< RO (device changes when changing configs) */
129
130 /* Per virtqueue fields (as determined by uQueueSelect) */
131 uint16_t uQueueSelect; /**< RW (selects queue focus for these fields) */
132 uint16_t uQueueSize; /**< RW (queue size, 0 - 2^n) */
133 uint16_t uQueueMsixVector; /**< RW (driver selects MSI-X queue vector) */
134 uint16_t uQueueEnable; /**< RW (driver controls usability of queue) */
135 uint16_t uQueueNotifyOff; /**< RO (offset uto virtqueue; see spec) */
136 uint64_t pGcPhysQueueDesc; /**< RW (driver writes desc table phys addr) */
137 uint64_t pGcPhysQueueAvail; /**< RW (driver writes avail ring phys addr) */
138 uint64_t pGcPhysQueueUsed; /**< RW (driver writes used ring phys addr) */
139} VIRTIO_PCI_COMMON_CFG_T, *PVIRTIO_PCI_COMMON_CFG_T;
140
141typedef struct virtio_pci_notify_cap
142{
143 struct virtio_pci_cap pciCap; /**< Notification MMIO mapping capability */
144 uint32_t uNotifyOffMultiplier; /**< notify_off_multiplier */
145} VIRTIO_PCI_NOTIFY_CAP_T, *PVIRTIO_PCI_NOTIFY_CAP_T;
146
147typedef struct virtio_pci_cfg_cap
148{
149 struct virtio_pci_cap pciCap; /**< Cap. defines the BAR/off/len to access */
150 uint8_t uPciCfgData[4]; /**< I/O buf for above cap. */
151} VIRTIO_PCI_CFG_CAP_T, *PVIRTIO_PCI_CFG_CAP_T;
152
153/**
154 * The core (/common) state of the VirtIO PCI device
155 *
156 * @implements PDMILEDPORTS
157 */
158typedef struct VIRTIOSTATE
159{
160 PDMPCIDEV dev; /**< PCI device */
161 char szInstance[16]; /**< Instance name, e.g. "VIRTIOSCSI0" */
162 void * pClientContext; /**< Client callback returned on callbacks */
163
164 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3 */
165 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0 */
166 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC */
167
168 RTGCPHYS pGcPhysPciCapBase; /**< Pointer to MMIO mapped capability data */
169 RTGCPHYS pGcPhysCommonCfg; /**< Pointer to MMIO mapped capability data */
170 RTGCPHYS pGcPhysNotifyCap; /**< Pointer to MMIO mapped capability data */
171 RTGCPHYS pGcPhysIsrCap; /**< Pointer to MMIO mapped capability data */
172 RTGCPHYS pGcPhysDeviceCap; /**< Pointer to MMIO mapped capability data */
173
174 RTGCPHYS pGcPhysQueueDesc[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q desc structs GUEST */
175 RTGCPHYS pGcPhysQueueAvail[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q avail structs GUEST */
176 RTGCPHYS pGcPhysQueueUsed[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q used structs GUEST */
177 uint16_t uQueueNotifyOff[VIRTQ_MAX_CNT]; /**< (MMIO) per-Q notify offset HOST */
178 uint16_t uQueueMsixVector[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue vector for MSI-X GUEST */
179 uint16_t uQueueEnable[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue enable GUEST */
180 uint16_t uQueueSize[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue size HOST/GUEST */
181 uint16_t uQueueSelect; /**< (MMIO) queue selector GUEST */
182 uint16_t padding;
183 uint64_t uDeviceFeatures; /**< (MMIO) Host features offered HOST */
184 uint64_t uDriverFeatures; /**< (MMIO) Host features accepted GUEST */
185 uint32_t uDeviceFeaturesSelect; /**< (MMIO) hi/lo select uDeviceFeatures GUEST */
186 uint32_t uDriverFeaturesSelect; /**< (MMIO) hi/lo select uDriverFeatures GUEST */
187 uint32_t uMsixConfig; /**< (MMIO) MSI-X vector GUEST */
188 uint32_t uNumQueues; /**< (MMIO) Actual number of queues GUEST */
189 uint8_t uDeviceStatus; /**< (MMIO) Device Status GUEST */
190 uint8_t uPrevDeviceStatus; /**< (MMIO) Prev Device Status GUEST */
191 uint8_t uConfigGeneration; /**< (MMIO) Device config sequencer HOST */
192
193 VIRTQ_PROXY_T virtqProxy[VIRTQ_MAX_CNT]; /**< Local impl-specific queue context */
194 VIRTIOCALLBACKS virtioCallbacks; /**< Callback vectors to client */
195
196 PFNPCICONFIGREAD pfnPciConfigReadOld; /**< Prev rd. cb. intercepting PCI Cfg I/O */
197 PFNPCICONFIGWRITE pfnPciConfigWriteOld; /**< Prev wr. cb. intercepting PCI Cfg I/O */
198
199 PVIRTIO_PCI_CFG_CAP_T pPciCfgCap; /**< Pointer to struct in configuration area */
200 PVIRTIO_PCI_NOTIFY_CAP_T pNotifyCap; /**< Pointer to struct in configuration area */
201 PVIRTIO_PCI_CAP_T pCommonCfgCap; /**< Pointer to struct in configuration area */
202 PVIRTIO_PCI_CAP_T pIsrCap; /**< Pointer to struct in configuration area */
203 PVIRTIO_PCI_CAP_T pDeviceCap; /**< Pointer to struct in configuration area */
204
205 uint32_t cbDevSpecificCfg; /**< Size of client's dev-specific config data */
206 void *pDevSpecificCfg; /**< Pointer to client's struct */
207 void *pPrevDevSpecificCfg; /**< Previous read dev-specific cfg of client */
208 bool fGenUpdatePending; /**< If set, update cfg gen after driver reads */
209 uint8_t uPciCfgDataOff;
210 uint8_t uISR; /**< Interrupt Status Register. */
211
212} VIRTIOSTATE, *PVIRTIOSTATE;
213
214/** virtq related flags */
215#define VIRTQ_DESC_F_NEXT 1 /**< Indicates this descriptor chains to next */
216#define VIRTQ_DESC_F_WRITE 2 /**< Marks buffer as write-only (default ro) */
217#define VIRTQ_DESC_F_INDIRECT 4 /**< Buffer is list of buffer descriptors */
218
219#define VIRTQ_USED_F_NO_NOTIFY 1 /**< Dev to Drv: Don't notify when buf added */
220#define VIRTQ_AVAIL_F_NO_INTERRUPT 1 /**< Drv to Dev: Don't notify when buf eaten */
221
222/**
223 * virtq related structs
224 * (struct names follow VirtIO 1.0 spec, typedef use VBox style)
225 */
226typedef struct virtq_desc
227{
228 uint64_t pGcPhysBuf; /**< addr GC Phys. address of buffer */
229 uint32_t cb; /**< len Buffer length */
230 uint16_t fFlags; /**< flags Buffer specific flags */
231 uint16_t uDescIdxNext; /**< next Idx set if VIRTIO_DESC_F_NEXT */
232} VIRTQ_DESC_T, *PVIRTQ_DESC_T;
233
234typedef struct virtq_avail
235{
236 uint16_t fFlags; /**< flags avail ring drv to dev flags */
237 uint16_t uIdx; /**< idx Index of next free ring slot */
238 uint16_t auRing[1]; /**< ring Ring: avail drv to dev bufs */
239 uint16_t uUsedEventIdx; /**< used_event (if VIRTQ_USED_F_EVENT_IDX) */
240} VIRTQ_AVAIL_T, *PVIRTQ_AVAIL_T;
241
242typedef struct virtq_used_elem
243{
244 uint32_t uDescIdx; /**< idx Start of used desc chain */
245 uint32_t cbElem; /**< len Total len of used desc chain */
246} VIRTQ_USED_ELEM_T;
247
248typedef struct virt_used
249{
250 uint16_t fFlags; /**< flags used ring host-to-guest flags */
251 uint16_t uIdx; /**< idx Index of next ring slot */
252 VIRTQ_USED_ELEM_T auRing[1]; /**< ring Ring: used dev to drv bufs */
253 uint16_t uAvailEventIdx; /**< avail_event if (VIRTQ_USED_F_EVENT_IDX) */
254} VIRTQ_USED_T, *PVIRTQ_USED_T;
255
256/**
257* This macro returns true if physical address and access length are within the mapped capability struct.
258*
259* Actual Parameters:
260* @oaram pPhysCapStruct - [input] Pointer to MMIO mapped capability struct
261* @param pCfgCap - [input] Pointer to capability in PCI configuration area
262* @param fMatched - [output] True if GCPhysAddr is within the physically mapped capability.
263*
264* Implied parameters:
265* @param GCPhysAddr - [input, implied] Physical address accessed (via MMIO callback)
266* @param cb - [input, implied] Number of bytes to access
267*/
268#define MATCH_VIRTIO_CAP_STRUCT(pGcPhysCapData, pCfgCap, fMatched) \
269 bool fMatched = false; \
270 if (pGcPhysCapData && pCfgCap && GCPhysAddr >= (RTGCPHYS)pGcPhysCapData \
271 && GCPhysAddr < ((RTGCPHYS)pGcPhysCapData + ((PVIRTIO_PCI_CAP_T)pCfgCap)->uLength) \
272 && cb <= ((PVIRTIO_PCI_CAP_T)pCfgCap)->uLength) \
273 fMatched = true;
274
275/**
276 * This macro resolves to boolean true if uOffset matches a field offset and size exactly,
277 * (or if it is a 64-bit field, if it accesses either 32-bit part as a 32-bit access)
278 * This is mandated by section 4.1.3.1 of the VirtIO 1.0 specification)
279 *
280 * @param member - Member of VIRTIO_PCI_COMMON_CFG_T
281 * @param uOffset - Implied parameter: Offset into VIRTIO_PCI_COMMON_CFG_T
282 * @param cb - Implied parameter: Number of bytes to access
283 * @result - true or false
284 */
285#define MATCH_COMMON_CFG(member) \
286 (RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member) == 8 \
287 && ( uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) \
288 || uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) + sizeof(uint32_t)) \
289 && cb == sizeof(uint32_t)) \
290 || (uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) \
291 && cb == RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member))
292
293#define LOG_COMMON_CFG_ACCESS(member) \
294 virtioLogMappedIoValue(__FUNCTION__, #member, RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member), \
295 pv, cb, uIntraOff, fWrite, false, 0);
296
297#define LOG_COMMON_CFG_ACCESS_INDEXED(member, idx) \
298 virtioLogMappedIoValue(__FUNCTION__, #member, RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member), \
299 pv, cb, uIntraOff, fWrite, true, idx);
300
301#define COMMON_CFG_ACCESSOR(member) \
302 { \
303 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
304 if (fWrite) \
305 memcpy(((char *)&pVirtio->member) + uIntraOff, (const char *)pv, cb); \
306 else \
307 memcpy((char *)pv, (const char *)(((char *)&pVirtio->member) + uIntraOff), cb); \
308 LOG_COMMON_CFG_ACCESS(member); \
309 }
310
311#define COMMON_CFG_ACCESSOR_INDEXED(member, idx) \
312 { \
313 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
314 if (fWrite) \
315 memcpy(((char *)(pVirtio->member + idx)) + uIntraOff, (const char *)pv, cb); \
316 else \
317 memcpy((char *)pv, (const char *)(((char *)(pVirtio->member + idx)) + uIntraOff), cb); \
318 LOG_COMMON_CFG_ACCESS_INDEXED(member, idx); \
319 }
320
321#define COMMON_CFG_ACCESSOR_READONLY(member) \
322 { \
323 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
324 if (fWrite) \
325 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s\n", #member)); \
326 else \
327 { \
328 memcpy((char *)pv, (const char *)(((char *)&pVirtio->member) + uIntraOff), cb); \
329 LOG_COMMON_CFG_ACCESS(member); \
330 } \
331 }
332
333#define COMMON_CFG_ACCESSOR_INDEXED_READONLY(member, idx) \
334 { \
335 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
336 if (fWrite) \
337 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s[%d]\n", #member, idx)); \
338 else \
339 { \
340 memcpy((char *)pv, ((char *)(pVirtio->member + idx)) + uIntraOff, cb); \
341 LOG_COMMON_CFG_ACCESS_INDEXED(member, idx); \
342 } \
343 }
344
345#define DRIVER_OK(pVirtio) (pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
346
347/**
348 * Internal queue operations
349 */
350
351static int virtqIsEventNeeded (uint16_t uEventIdx, uint16_t uDescIdxNew, uint16_t uDescIdxOld);
352static bool virtqIsEmpty (PVIRTIOSTATE pVirtio, uint16_t qIdx);
353static void virtioReadDesc (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uDescIdx, PVIRTQ_DESC_T pDesc);
354static uint16_t virtioReadAvailDescIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t availIdx);
355static uint16_t virtioReadAvailRingIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx);
356static uint16_t virtioReadAvailFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx);
357static uint16_t virtioReadAvailUsedEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx);
358static void virtioWriteUsedElem (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen);
359static void virtioWriteUsedRingIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint16_t uDescIdx);
360static uint16_t virtioReadUsedRingIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx);
361static uint16_t virtioReadUsedFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx);
362static void virtioWriteUsedFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t fFlags);
363static uint16_t virtioReadUsedAvailEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx);
364static void virtioWriteUsedAvailEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uAvailEventIdx);
365
366
367DECLINLINE(int) virtqIsEventNeeded(uint16_t uEventIdx, uint16_t uDescIdxNew, uint16_t uDescIdxOld)
368{
369 return (uint16_t)(uDescIdxNew - uEventIdx - 1) < (uint16_t)(uDescIdxNew - uDescIdxOld);
370}
371
372DECLINLINE(bool) virtqIsEmpty(PVIRTIOSTATE pVirtio, uint16_t qIdx)
373{
374 return virtioReadAvailRingIdx(pVirtio, qIdx) == pVirtio->virtqProxy[qIdx].uAvailIdx;
375}
376
377/**
378 * Accessor for virtq descriptor
379 */
380DECLINLINE(void) virtioReadDesc(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uDescIdx, PVIRTQ_DESC_T pDesc)
381{
382 //Log(("%s virtioQueueReadDesc: ring=%p idx=%u\n", INSTANCE(pState), pVirtQ, idx));
383 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
384 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
385 pVirtio->pGcPhysQueueDesc[qIdx]
386 + sizeof(VIRTQ_DESC_T) * (uDescIdx % pVirtio->uQueueSize[qIdx]),
387 pDesc, sizeof(VIRTQ_DESC_T));
388}
389
390/**
391 * Accessors for virtq avail ring
392 */
393DECLINLINE(uint16_t) virtioReadAvailDescIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t availIdx)
394{
395 uint16_t uDescIdx;
396 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
397 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
398 pVirtio->pGcPhysQueueAvail[qIdx]
399 + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[availIdx % pVirtio->uQueueSize[qIdx]]),
400 &uDescIdx, sizeof(uDescIdx));
401 return uDescIdx;
402}
403
404DECLINLINE(uint16_t) virtioReadAvailRingIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx)
405{
406 uint16_t uIdx = 0;
407 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
408 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
409 pVirtio->pGcPhysQueueAvail[qIdx] + RT_UOFFSETOF(VIRTQ_AVAIL_T, uIdx),
410 &uIdx, sizeof(uIdx));
411 return uIdx;
412}
413
414DECLINLINE(uint16_t) virtioReadAvailFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx)
415{
416 uint16_t fFlags;
417 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
418 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
419 pVirtio->pGcPhysQueueAvail[qIdx] + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
420 &fFlags, sizeof(fFlags));
421 return fFlags;
422}
423
424DECLINLINE(uint16_t) virtioReadAvailUsedEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx)
425{
426 uint16_t uUsedEventIdx;
427 /** VirtIO 1.0 uUsedEventIdx (used_event) immediately follows ring */
428 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
429 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
430 pVirtio->pGcPhysQueueAvail[qIdx]
431 + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtio->uQueueSize[qIdx]]),
432 &uUsedEventIdx, sizeof(uUsedEventIdx));
433 return uUsedEventIdx;
434}
435
436/**
437 * Accessors for virtq used ring
438 */
439DECLINLINE(void) virtioWriteUsedElem(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen)
440{
441 VIRTQ_USED_ELEM_T elem = { uDescIdx, uLen };
442 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
443 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
444 pVirtio->pGcPhysQueueUsed[qIdx]
445 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[usedIdx % pVirtio->uQueueSize[qIdx]]),
446 &elem, sizeof(elem));
447}
448
449DECLINLINE(void) virtioWriteUsedRingIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint16_t uIdx)
450{
451 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
452 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
453 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
454 &uIdx, sizeof(uIdx));
455}
456
457DECLINLINE(uint16_t)virtioReadUsedRingIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx)
458{
459 uint16_t uIdx;
460 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
461 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
462 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
463 &uIdx, sizeof(uIdx));
464 return uIdx;
465}
466
467DECLINLINE(uint16_t) virtioReadUsedFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx)
468{
469 uint16_t fFlags;
470 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
471 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
472 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
473 &fFlags, sizeof(fFlags));
474 return fFlags;
475}
476
477DECLINLINE(void) virtioWriteUsedFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t fFlags)
478{
479 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
480 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
481 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
482 &fFlags, sizeof(fFlags));
483}
484
485DECLINLINE(uint16_t) virtioReadUsedAvailEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx)
486{
487 uint16_t uAvailEventIdx;
488 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
489 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
490 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
491 pVirtio->pGcPhysQueueUsed[qIdx]
492 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[pVirtio->uQueueSize[qIdx]]),
493 &uAvailEventIdx, sizeof(uAvailEventIdx));
494 return uAvailEventIdx;
495}
496
497DECLINLINE(void) virtioWriteUsedAvailEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uAvailEventIdx)
498{
499 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
500 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
501 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
502 pVirtio->pGcPhysQueueUsed[qIdx]
503 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[pVirtio->uQueueSize[qIdx]]),
504 &uAvailEventIdx, sizeof(uAvailEventIdx));
505}
506
507
508/**
509 * Makes the MMIO-mapped Virtio uDeviceStatus registers non-cryptic */
510DECLINLINE(void) virtioLogDeviceStatus( uint8_t status)
511{
512 if (status == 0)
513 Log6(("RESET"));
514 else
515 {
516 int primed = 0;
517 if (status & VIRTIO_STATUS_ACKNOWLEDGE)
518 Log6(("ACKNOWLEDGE", primed++));
519 if (status & VIRTIO_STATUS_DRIVER)
520 Log6(("%sDRIVER", primed++ ? " | " : ""));
521 if (status & VIRTIO_STATUS_FEATURES_OK)
522 Log6(("%sFEATURES_OK", primed++ ? " | " : ""));
523 if (status & VIRTIO_STATUS_DRIVER_OK)
524 Log6(("%sDRIVER_OK", primed++ ? " | " : ""));
525 if (status & VIRTIO_STATUS_FAILED)
526 Log6(("%sFAILED", primed++ ? " | " : ""));
527 if (status & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
528 Log6(("%sNEEDS_RESET", primed++ ? " | " : ""));
529 }
530}
531
532static void virtioResetQueue (PVIRTIOSTATE pVirtio, uint16_t qIdx);
533static void virtioNotifyGuestDriver (PVIRTIOSTATE pVirtio, uint16_t qIdx);
534static int virtioRaiseInterrupt (PVIRTIOSTATE pVirtio, uint8_t uCause);
535static void virtioLowerInterrupt (PVIRTIOSTATE pVirtio);
536static void virtioQueueNotified (PVIRTIOSTATE pVirtio, uint16_t qidx, uint16_t uDescIdx);
537static int virtioCommonCfgAccessed (PVIRTIOSTATE pVirtio, int fWrite, off_t uOffset, unsigned cb, void const *pv);
538static void virtioGuestResetted (PVIRTIOSTATE pVirtio);
539
540static DECLCALLBACK(int) virtioR3SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM);
541static DECLCALLBACK(int) virtioR3LoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
542static DECLCALLBACK(int) virtioR3LoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM);
543static DECLCALLBACK(int) virtioR3LiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass);
544
545#endif /* !VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette