VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/Virtio_1_0_impl.h@ 80692

Last change on this file since 80692 was 80683, checked in by vboxsync, 5 years ago

Storage:DevVirtioSCSI.cpp: suspend/resume/reset implemented and seems to be working. See bugref:9440, Comment #84

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 30.6 KB
Line 
1/* $Id: Virtio_1_0_impl.h 80683 2019-09-09 19:57:50Z vboxsync $ $Revision: 80683 $ $Date: 2019-09-09 19:57:50 +0000 (Mon, 09 Sep 2019) $ $Author: vboxsync $ */
2/** @file
3 * Virtio_1_0_impl.h - Virtio Declarations
4 */
5
6/*
7 * Copyright (C) 2009-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h
19#define VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#include "Virtio_1_0.h"
25
26/** @name Saved state versions.
27 * The saved state version is changed if either common or any of specific
28 * parts are changed. That is, it is perfectly possible that the version
29 * of saved vnet state will increase as a result of change in vblk structure
30 * for example.
31 */
32#define VIRTIO_SAVEDSTATE_VERSION 1
33/** @} */
34
35#define VIRTIO_F_VERSION_1 RT_BIT_64(32) /**< Required feature bit for 1.0 devices */
36
37#define VIRTIO_F_INDIRECT_DESC RT_BIT_64(28) /**< Allow descs to point to list of descs */
38#define VIRTIO_F_EVENT_IDX RT_BIT_64(29) /**< Allow notification disable for n elems */
39#define VIRTIO_F_RING_INDIRECT_DESC RT_BIT_64(28) /**< Doc bug: Goes under two names in spec */
40#define VIRTIO_F_RING_EVENT_IDX RT_BIT_64(29) /**< Doc bug: Goes under two names in spec */
41
42#define VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED ( 0 ) /**< TBD: Add VIRTIO_F_INDIRECT_DESC */
43
44#define VIRTIO_ISR_VIRTQ_INTERRUPT RT_BIT_32(0) /**< Virtq interrupt bit of ISR register */
45#define VIRTIO_ISR_DEVICE_CONFIG RT_BIT_32(1) /**< Device configuration changed bit of ISR */
46#define DEVICE_PCI_VENDOR_ID_VIRTIO 0x1AF4 /**< Guest driver locates dev via (mandatory) */
47#define DEVICE_PCI_REVISION_ID_VIRTIO 1 /**< VirtIO 1.0 non-transitional drivers >= 1 */
48
49/** Reserved (*negotiated*) Feature Bits (e.g. device independent features, VirtIO 1.0 spec,section 6) */
50
51#define VIRTIO_MSI_NO_VECTOR 0xffff /**< Vector value to disable MSI for queue */
52
53/** Device Status field constants (from Virtio 1.0 spec) */
54#define VIRTIO_STATUS_ACKNOWLEDGE 0x01 /**< Guest driver: Located this VirtIO device */
55#define VIRTIO_STATUS_DRIVER 0x02 /**< Guest driver: Can drive this VirtIO dev. */
56#define VIRTIO_STATUS_DRIVER_OK 0x04 /**< Guest driver: Driver set-up and ready */
57#define VIRTIO_STATUS_FEATURES_OK 0x08 /**< Guest driver: Feature negotiation done */
58#define VIRTIO_STATUS_FAILED 0x80 /**< Guest driver: Fatal error, gave up */
59#define VIRTIO_STATUS_DEVICE_NEEDS_RESET 0x40 /**< Device experienced unrecoverable error */
60
61/** @def Virtio Device PCI Capabilities type codes */
62#define VIRTIO_PCI_CAP_COMMON_CFG 1 /**< Common configuration PCI capability ID */
63#define VIRTIO_PCI_CAP_NOTIFY_CFG 2 /**< Notification area PCI capability ID */
64#define VIRTIO_PCI_CAP_ISR_CFG 3 /**< ISR PCI capability id */
65#define VIRTIO_PCI_CAP_DEVICE_CFG 4 /**< Device-specific PCI cfg capability ID */
66#define VIRTIO_PCI_CAP_PCI_CFG 5 /**< PCI CFG capability ID */
67
68#define VIRTIO_PCI_CAP_ID_VENDOR 0x09 /**< Vendor-specific PCI CFG Device Cap. ID */
69
70/**
71 * The following is the PCI capability struct common to all VirtIO capability types
72 */
73typedef struct virtio_pci_cap
74{
75 /* All little-endian */
76 uint8_t uCapVndr; /**< Generic PCI field: PCI_CAP_ID_VNDR */
77 uint8_t uCapNext; /**< Generic PCI field: next ptr. */
78 uint8_t uCapLen; /**< Generic PCI field: capability length */
79 uint8_t uCfgType; /**< Identifies the structure. */
80 uint8_t uBar; /**< Where to find it. */
81 uint8_t uPadding[3]; /**< Pad to full dword. */
82 uint32_t uOffset; /**< Offset within bar. (L.E.) */
83 uint32_t uLength; /**< Length of struct, in bytes. (L.E.) */
84} VIRTIO_PCI_CAP_T, *PVIRTIO_PCI_CAP_T;
85
86/**
87 * IN/OUT Descriptor chains descriptor chain associated with one element of virtq avail ring represented
88 * as respective arrays of SG segments.
89 */
90typedef struct VIRTQ_DESC_CHAIN /**< Describes a single queue element */
91{
92 RTSGSEG aSegsIn[VIRTQ_MAX_SIZE]; /**< List of segments to write to guest */
93 RTSGSEG aSegsOut[VIRTQ_MAX_SIZE]; /**< List of segments read from guest */
94 uint32_t uHeadIdx; /**< Index at head desc (source of seg arrays) */
95 uint32_t cSegsIn; /**< Count of segments in aSegsIn[] */
96 uint32_t cSegsOut; /**< Count of segments in aSegsOut[] */
97} VIRTQ_DESC_CHAIN_T, *PVIRTQ_DESC_CHAIN_T;
98
99/**
100 * Local implementation's usage context of a queue (e.g. not part of VirtIO specification)
101 */
102typedef struct VIRTQ_PROXY
103{
104 RTSGBUF inSgBuf; /**< host-to-guest buffers */
105 RTSGBUF outSgBuf; /**< guest-to-host buffers */
106 const char szVirtqName[32]; /**< Dev-specific name of queue */
107 uint16_t uAvailIdx; /**< Consumer's position in avail ring */
108 uint16_t uUsedIdx; /**< Consumer's position in used ring */
109 bool fEventThresholdReached; /**< Don't lose track while queueing ahead */
110 PVIRTQ_DESC_CHAIN_T pDescChain; /**< Per-queue s/g data. */
111} VIRTQ_PROXY_T, *PVIRTQ_PROXY_T;
112
113/**
114 * VirtIO 1.0 Capabilities' related MMIO-mapped structs:
115 *
116 * Note: virtio_pci_device_cap is dev-specific, implemented by client. Definition unknown here.
117 */
118typedef struct virtio_pci_common_cfg
119{
120 /* Per device fields */
121 uint32_t uDeviceFeaturesSelect; /**< RW (driver selects device features) */
122 uint32_t uDeviceFeatures; /**< RO (device reports features to driver) */
123 uint32_t uDriverFeaturesSelect; /**< RW (driver selects driver features) */
124 uint32_t uDriverFeatures; /**< RW (driver-accepted device features) */
125 uint16_t uMsixConfig; /**< RW (driver sets MSI-X config vector) */
126 uint16_t uNumQueues; /**< RO (device specifies max queues) */
127 uint8_t uDeviceStatus; /**< RW (driver writes device status, 0=reset) */
128 uint8_t uConfigGeneration; /**< RO (device changes when changing configs) */
129
130 /* Per virtqueue fields (as determined by uQueueSelect) */
131 uint16_t uQueueSelect; /**< RW (selects queue focus for these fields) */
132 uint16_t uQueueSize; /**< RW (queue size, 0 - 2^n) */
133 uint16_t uQueueMsixVector; /**< RW (driver selects MSI-X queue vector) */
134 uint16_t uQueueEnable; /**< RW (driver controls usability of queue) */
135 uint16_t uQueueNotifyOff; /**< RO (offset uto virtqueue; see spec) */
136 uint64_t pGcPhysQueueDesc; /**< RW (driver writes desc table phys addr) */
137 uint64_t pGcPhysQueueAvail; /**< RW (driver writes avail ring phys addr) */
138 uint64_t pGcPhysQueueUsed; /**< RW (driver writes used ring phys addr) */
139} VIRTIO_PCI_COMMON_CFG_T, *PVIRTIO_PCI_COMMON_CFG_T;
140
141typedef struct virtio_pci_notify_cap
142{
143 struct virtio_pci_cap pciCap; /**< Notification MMIO mapping capability */
144 uint32_t uNotifyOffMultiplier; /**< notify_off_multiplier */
145} VIRTIO_PCI_NOTIFY_CAP_T, *PVIRTIO_PCI_NOTIFY_CAP_T;
146
147typedef struct virtio_pci_cfg_cap
148{
149 struct virtio_pci_cap pciCap; /**< Cap. defines the BAR/off/len to access */
150 uint8_t uPciCfgData[4]; /**< I/O buf for above cap. */
151} VIRTIO_PCI_CFG_CAP_T, *PVIRTIO_PCI_CFG_CAP_T;
152
153/**
154 * The core (/common) state of the VirtIO PCI device
155 *
156 * @implements PDMILEDPORTS
157 */
158typedef struct VIRTIOSTATE
159{
160 PDMPCIDEV dev; /**< PCI device */
161 char szInstance[16]; /**< Instance name, e.g. "VIRTIOSCSI0" */
162 void * pClientContext; /**< Client callback returned on callbacks */
163
164 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3 */
165 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0 */
166 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC */
167
168 RTGCPHYS pGcPhysPciCapBase; /**< Pointer to MMIO mapped capability data */
169 RTGCPHYS pGcPhysCommonCfg; /**< Pointer to MMIO mapped capability data */
170 RTGCPHYS pGcPhysNotifyCap; /**< Pointer to MMIO mapped capability data */
171 RTGCPHYS pGcPhysIsrCap; /**< Pointer to MMIO mapped capability data */
172 RTGCPHYS pGcPhysDeviceCap; /**< Pointer to MMIO mapped capability data */
173
174 RTGCPHYS pGcPhysQueueDesc[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q desc structs GUEST */
175 RTGCPHYS pGcPhysQueueAvail[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q avail structs GUEST */
176 RTGCPHYS pGcPhysQueueUsed[VIRTQ_MAX_CNT]; /**< (MMIO) PhysAdr per-Q used structs GUEST */
177 uint16_t uQueueNotifyOff[VIRTQ_MAX_CNT]; /**< (MMIO) per-Q notify offset HOST */
178 uint16_t uQueueMsixVector[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue vector for MSI-X GUEST */
179 uint16_t uQueueEnable[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue enable GUEST */
180 uint16_t uQueueSize[VIRTQ_MAX_CNT]; /**< (MMIO) Per-queue size HOST/GUEST */
181 uint16_t uQueueSelect; /**< (MMIO) queue selector GUEST */
182 uint16_t padding;
183 uint64_t uDeviceFeatures; /**< (MMIO) Host features offered HOST */
184 uint64_t uDriverFeatures; /**< (MMIO) Host features accepted GUEST */
185 uint32_t uDeviceFeaturesSelect; /**< (MMIO) hi/lo select uDeviceFeatures GUEST */
186 uint32_t uDriverFeaturesSelect; /**< (MMIO) hi/lo select uDriverFeatures GUEST */
187 uint32_t uMsixConfig; /**< (MMIO) MSI-X vector GUEST */
188 uint32_t uNumQueues; /**< (MMIO) Actual number of queues GUEST */
189 uint8_t uDeviceStatus; /**< (MMIO) Device Status GUEST */
190 uint8_t uPrevDeviceStatus; /**< (MMIO) Prev Device Status GUEST */
191 uint8_t uConfigGeneration; /**< (MMIO) Device config sequencer HOST */
192
193 VIRTQ_PROXY_T virtqProxy[VIRTQ_MAX_CNT]; /**< Local impl-specific queue context */
194 VIRTIOCALLBACKS virtioCallbacks; /**< Callback vectors to client */
195
196 PFNPCICONFIGREAD pfnPciConfigReadOld; /**< Prev rd. cb. intercepting PCI Cfg I/O */
197 PFNPCICONFIGWRITE pfnPciConfigWriteOld; /**< Prev wr. cb. intercepting PCI Cfg I/O */
198
199 PVIRTIO_PCI_CFG_CAP_T pPciCfgCap; /**< Pointer to struct in configuration area */
200 PVIRTIO_PCI_NOTIFY_CAP_T pNotifyCap; /**< Pointer to struct in configuration area */
201 PVIRTIO_PCI_CAP_T pCommonCfgCap; /**< Pointer to struct in configuration area */
202 PVIRTIO_PCI_CAP_T pIsrCap; /**< Pointer to struct in configuration area */
203 PVIRTIO_PCI_CAP_T pDeviceCap; /**< Pointer to struct in configuration area */
204
205 uint32_t cbDevSpecificCfg; /**< Size of client's dev-specific config data */
206 void *pDevSpecificCfg; /**< Pointer to client's struct */
207 void *pPrevDevSpecificCfg; /**< Previous read dev-specific cfg of client */
208 bool fGenUpdatePending; /**< If set, update cfg gen after driver reads */
209 uint8_t uPciCfgDataOff;
210 uint8_t uISR; /**< Interrupt Status Register. */
211
212} VIRTIOSTATE, *PVIRTIOSTATE;
213
214/** virtq related flags */
215#define VIRTQ_DESC_F_NEXT 1 /**< Indicates this descriptor chains to next */
216#define VIRTQ_DESC_F_WRITE 2 /**< Marks buffer as write-only (default ro) */
217#define VIRTQ_DESC_F_INDIRECT 4 /**< Buffer is list of buffer descriptors */
218
219#define VIRTQ_USED_F_NO_NOTIFY 1 /**< Dev to Drv: Don't notify when buf added */
220#define VIRTQ_AVAIL_F_NO_INTERRUPT 1 /**< Drv to Dev: Don't notify when buf eaten */
221
222/**
223 * virtq related structs
224 * (struct names follow VirtIO 1.0 spec, typedef use VBox style)
225 */
226typedef struct virtq_desc
227{
228 uint64_t pGcPhysBuf; /**< addr GC Phys. address of buffer */
229 uint32_t cb; /**< len Buffer length */
230 uint16_t fFlags; /**< flags Buffer specific flags */
231 uint16_t uDescIdxNext; /**< next Idx set if VIRTIO_DESC_F_NEXT */
232} VIRTQ_DESC_T, *PVIRTQ_DESC_T;
233
234typedef struct virtq_avail
235{
236 uint16_t fFlags; /**< flags avail ring drv to dev flags */
237 uint16_t uIdx; /**< idx Index of next free ring slot */
238 uint16_t auRing[1]; /**< ring Ring: avail drv to dev bufs */
239 uint16_t uUsedEventIdx; /**< used_event (if VIRTQ_USED_F_EVENT_IDX) */
240} VIRTQ_AVAIL_T, *PVIRTQ_AVAIL_T;
241
242typedef struct virtq_used_elem
243{
244 uint32_t uDescIdx; /**< idx Start of used desc chain */
245 uint32_t cbElem; /**< len Total len of used desc chain */
246} VIRTQ_USED_ELEM_T;
247
248typedef struct virt_used
249{
250 uint16_t fFlags; /**< flags used ring host-to-guest flags */
251 uint16_t uIdx; /**< idx Index of next ring slot */
252 VIRTQ_USED_ELEM_T auRing[1]; /**< ring Ring: used dev to drv bufs */
253 uint16_t uAvailEventIdx; /**< avail_event if (VIRTQ_USED_F_EVENT_IDX) */
254} VIRTQ_USED_T, *PVIRTQ_USED_T;
255
256/**
257* This macro returns true if physical address and access length are within the mapped capability struct.
258*
259* Actual Parameters:
260* @oaram pPhysCapStruct - [input] Pointer to MMIO mapped capability struct
261* @param pCfgCap - [input] Pointer to capability in PCI configuration area
262* @param fMatched - [output] True if GCPhysAddr is within the physically mapped capability.
263*
264* Implied parameters:
265* @param GCPhysAddr - [input, implied] Physical address accessed (via MMIO callback)
266* @param cb - [input, implied] Number of bytes to access
267*/
268#define MATCH_VIRTIO_CAP_STRUCT(pGcPhysCapData, pCfgCap, fMatched) \
269 bool fMatched = false; \
270 if (pGcPhysCapData && pCfgCap && GCPhysAddr >= (RTGCPHYS)pGcPhysCapData \
271 && GCPhysAddr < ((RTGCPHYS)pGcPhysCapData + ((PVIRTIO_PCI_CAP_T)pCfgCap)->uLength) \
272 && cb <= ((PVIRTIO_PCI_CAP_T)pCfgCap)->uLength) \
273 fMatched = true;
274
275/**
276 * This macro resolves to boolean true if uOffset matches a field offset and size exactly,
277 * (or if it is a 64-bit field, if it accesses either 32-bit part as a 32-bit access)
278 * This is mandated by section 4.1.3.1 of the VirtIO 1.0 specification)
279 *
280 * @param member - Member of VIRTIO_PCI_COMMON_CFG_T
281 * @param uOffset - Implied parameter: Offset into VIRTIO_PCI_COMMON_CFG_T
282 * @param cb - Implied parameter: Number of bytes to access
283 * @result - true or false
284 */
285#define MATCH_COMMON_CFG(member) \
286 (RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member) == 8 \
287 && ( uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) \
288 || uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) + sizeof(uint32_t)) \
289 && cb == sizeof(uint32_t)) \
290 || (uOffset == RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member) \
291 && cb == RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member))
292
293#define LOG_COMMON_CFG_ACCESS(member) \
294 virtioLogMappedIoValue(__FUNCTION__, #member, RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member), \
295 pv, cb, uIntraOff, fWrite, false, 0);
296
297#define LOG_COMMON_CFG_ACCESS_INDEXED(member, idx) \
298 virtioLogMappedIoValue(__FUNCTION__, #member, RT_SIZEOFMEMB(VIRTIO_PCI_COMMON_CFG_T, member), \
299 pv, cb, uIntraOff, fWrite, true, idx);
300
301#define COMMON_CFG_ACCESSOR(member) \
302 do \
303 { \
304 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
305 if (fWrite) \
306 memcpy(((char *)&pVirtio->member) + uIntraOff, (const char *)pv, cb); \
307 else \
308 memcpy((char *)pv, (const char *)(((char *)&pVirtio->member) + uIntraOff), cb); \
309 LOG_COMMON_CFG_ACCESS(member); \
310 } while(0)
311
312#define COMMON_CFG_ACCESSOR_INDEXED(member, idx) \
313 do \
314 { \
315 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
316 if (fWrite) \
317 memcpy(((char *)(pVirtio->member + idx)) + uIntraOff, (const char *)pv, cb); \
318 else \
319 memcpy((char *)pv, (const char *)(((char *)(pVirtio->member + idx)) + uIntraOff), cb); \
320 LOG_COMMON_CFG_ACCESS_INDEXED(member, idx); \
321 } while(0)
322
323#define COMMON_CFG_ACCESSOR_READONLY(member) \
324 do \
325 { \
326 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
327 if (fWrite) \
328 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s\n", #member)); \
329 else \
330 { \
331 memcpy((char *)pv, (const char *)(((char *)&pVirtio->member) + uIntraOff), cb); \
332 LOG_COMMON_CFG_ACCESS(member); \
333 } \
334 } while(0)
335
336#define COMMON_CFG_ACCESSOR_INDEXED_READONLY(member, idx) \
337 do \
338 { \
339 uint32_t uIntraOff = uOffset - RT_OFFSETOF(VIRTIO_PCI_COMMON_CFG_T, member); \
340 if (fWrite) \
341 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.%s[%d]\n", #member, idx)); \
342 else \
343 { \
344 memcpy((char *)pv, ((char *)(pVirtio->member + idx)) + uIntraOff, cb); \
345 LOG_COMMON_CFG_ACCESS_INDEXED(member, idx); \
346 } \
347 } while(0)
348
349#define DRIVER_OK(pVirtio) (pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
350
351/**
352 * Internal queue operations
353 */
354
355static int virtqIsEventNeeded (uint16_t uEventIdx, uint16_t uDescIdxNew, uint16_t uDescIdxOld);
356static bool virtqIsEmpty (PVIRTIOSTATE pVirtio, uint16_t qIdx);
357static void virtioReadDesc (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uDescIdx, PVIRTQ_DESC_T pDesc);
358static uint16_t virtioReadAvailDescIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t availIdx);
359static uint16_t virtioReadAvailRingIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx);
360static uint16_t virtioReadAvailFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx);
361static uint16_t virtioReadAvailUsedEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx);
362static void virtioWriteUsedElem (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen);
363static void virtioWriteUsedRingIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint16_t uDescIdx);
364static uint16_t virtioReadUsedRingIdx (PVIRTIOSTATE pVirtio, uint16_t qIdx);
365static uint16_t virtioReadUsedFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx);
366static void virtioWriteUsedFlags (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t fFlags);
367static uint16_t virtioReadUsedAvailEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx);
368static void virtioWriteUsedAvailEvent (PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uAvailEventIdx);
369
370
371DECLINLINE(int) virtqIsEventNeeded(uint16_t uEventIdx, uint16_t uDescIdxNew, uint16_t uDescIdxOld)
372{
373 return (uint16_t)(uDescIdxNew - uEventIdx - 1) < (uint16_t)(uDescIdxNew - uDescIdxOld);
374}
375
376DECLINLINE(bool) virtqIsEmpty(PVIRTIOSTATE pVirtio, uint16_t qIdx)
377{
378 return virtioReadAvailRingIdx(pVirtio, qIdx) == pVirtio->virtqProxy[qIdx].uAvailIdx;
379}
380
381/**
382 * Accessor for virtq descriptor
383 */
384DECLINLINE(void) virtioReadDesc(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uDescIdx, PVIRTQ_DESC_T pDesc)
385{
386 //Log(("%s virtioQueueReadDesc: ring=%p idx=%u\n", INSTANCE(pState), pVirtQ, idx));
387 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
388 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
389 pVirtio->pGcPhysQueueDesc[qIdx]
390 + sizeof(VIRTQ_DESC_T) * (uDescIdx % pVirtio->uQueueSize[qIdx]),
391 pDesc, sizeof(VIRTQ_DESC_T));
392}
393
394/**
395 * Accessors for virtq avail ring
396 */
397DECLINLINE(uint16_t) virtioReadAvailDescIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t availIdx)
398{
399 uint16_t uDescIdx;
400 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
401 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
402 pVirtio->pGcPhysQueueAvail[qIdx]
403 + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[availIdx % pVirtio->uQueueSize[qIdx]]),
404 &uDescIdx, sizeof(uDescIdx));
405 return uDescIdx;
406}
407
408DECLINLINE(uint16_t) virtioReadAvailRingIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx)
409{
410 uint16_t uIdx = 0;
411 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
412 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
413 pVirtio->pGcPhysQueueAvail[qIdx] + RT_UOFFSETOF(VIRTQ_AVAIL_T, uIdx),
414 &uIdx, sizeof(uIdx));
415 return uIdx;
416}
417
418DECLINLINE(uint16_t) virtioReadAvailFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx)
419{
420 uint16_t fFlags;
421 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
422 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
423 pVirtio->pGcPhysQueueAvail[qIdx] + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
424 &fFlags, sizeof(fFlags));
425 return fFlags;
426}
427
428DECLINLINE(uint16_t) virtioReadAvailUsedEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx)
429{
430 uint16_t uUsedEventIdx;
431 /** VirtIO 1.0 uUsedEventIdx (used_event) immediately follows ring */
432 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
433 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
434 pVirtio->pGcPhysQueueAvail[qIdx]
435 + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtio->uQueueSize[qIdx]]),
436 &uUsedEventIdx, sizeof(uUsedEventIdx));
437 return uUsedEventIdx;
438}
439
440/**
441 * Accessors for virtq used ring
442 */
443DECLINLINE(void) virtioWriteUsedElem(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen)
444{
445 VIRTQ_USED_ELEM_T elem = { uDescIdx, uLen };
446 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
447 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
448 pVirtio->pGcPhysQueueUsed[qIdx]
449 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[usedIdx % pVirtio->uQueueSize[qIdx]]),
450 &elem, sizeof(elem));
451}
452
453DECLINLINE(void) virtioWriteUsedRingIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint16_t uIdx)
454{
455 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
456 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
457 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
458 &uIdx, sizeof(uIdx));
459}
460
461DECLINLINE(uint16_t)virtioReadUsedRingIdx(PVIRTIOSTATE pVirtio, uint16_t qIdx)
462{
463 uint16_t uIdx;
464 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
465 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
466 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
467 &uIdx, sizeof(uIdx));
468 return uIdx;
469}
470
471DECLINLINE(uint16_t) virtioReadUsedFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx)
472{
473 uint16_t fFlags;
474 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
475 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
476 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
477 &fFlags, sizeof(fFlags));
478 return fFlags;
479}
480
481DECLINLINE(void) virtioWriteUsedFlags(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t fFlags)
482{
483 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
484 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
485 pVirtio->pGcPhysQueueUsed[qIdx] + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
486 &fFlags, sizeof(fFlags));
487}
488
489DECLINLINE(uint16_t) virtioReadUsedAvailEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx)
490{
491 uint16_t uAvailEventIdx;
492 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
493 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
494 PDMDevHlpPhysRead(pVirtio->CTX_SUFF(pDevIns),
495 pVirtio->pGcPhysQueueUsed[qIdx]
496 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[pVirtio->uQueueSize[qIdx]]),
497 &uAvailEventIdx, sizeof(uAvailEventIdx));
498 return uAvailEventIdx;
499}
500
501DECLINLINE(void) virtioWriteUsedAvailEvent(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint32_t uAvailEventIdx)
502{
503 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
504 AssertMsg(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
505 PDMDevHlpPCIPhysWrite(pVirtio->CTX_SUFF(pDevIns),
506 pVirtio->pGcPhysQueueUsed[qIdx]
507 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, auRing[pVirtio->uQueueSize[qIdx]]),
508 &uAvailEventIdx, sizeof(uAvailEventIdx));
509}
510
511
512/**
513 * Makes the MMIO-mapped Virtio uDeviceStatus registers non-cryptic */
514DECLINLINE(void) virtioLogDeviceStatus( uint8_t status)
515{
516 if (status == 0)
517 Log6(("RESET"));
518 else
519 {
520 int primed = 0;
521 if (status & VIRTIO_STATUS_ACKNOWLEDGE)
522 Log6(("ACKNOWLEDGE", primed++));
523 if (status & VIRTIO_STATUS_DRIVER)
524 Log6(("%sDRIVER", primed++ ? " | " : ""));
525 if (status & VIRTIO_STATUS_FEATURES_OK)
526 Log6(("%sFEATURES_OK", primed++ ? " | " : ""));
527 if (status & VIRTIO_STATUS_DRIVER_OK)
528 Log6(("%sDRIVER_OK", primed++ ? " | " : ""));
529 if (status & VIRTIO_STATUS_FAILED)
530 Log6(("%sFAILED", primed++ ? " | " : ""));
531 if (status & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
532 Log6(("%sNEEDS_RESET", primed++ ? " | " : ""));
533 }
534}
535
536static void virtioResetQueue (PVIRTIOSTATE pVirtio, uint16_t qIdx);
537static void virtioNotifyGuestDriver (PVIRTIOSTATE pVirtio, uint16_t qIdx, bool fForce);
538static int virtioRaiseInterrupt (PVIRTIOSTATE pVirtio, uint8_t uCause, bool fForce);
539static void virtioLowerInterrupt (PVIRTIOSTATE pVirtio);
540static void virtioQueueNotified (PVIRTIOSTATE pVirtio, uint16_t qidx, uint16_t uDescIdx);
541static int virtioCommonCfgAccessed (PVIRTIOSTATE pVirtio, int fWrite, off_t uOffset, unsigned cb, void const *pv);
542static void virtioGuestResetted (PVIRTIOSTATE pVirtio);
543
544static DECLCALLBACK(int) virtioR3SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM);
545static DECLCALLBACK(int) virtioR3LoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
546static DECLCALLBACK(int) virtioR3LoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM);
547static DECLCALLBACK(int) virtioR3LiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass);
548
549#endif /* !VBOX_INCLUDED_SRC_VirtIO_Virtio_1_0_impl_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette