VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/Virtio_1_0.cpp@ 80528

Last change on this file since 80528 was 80528, checked in by vboxsync, 5 years ago

Storage/DevVirtioSCSI.cpp: This is being integrated to stop burns. I pasted in and modified some hexdump code I wrote at Sun many years ago, and apparently my editor isn't smart enough to apply the tab and CRLF filters it does when things are entered via keystrokes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 60.1 KB
Line 
1/* $Id: Virtio_1_0.cpp 80528 2019-09-01 22:28:39Z vboxsync $ */
2/** @file
3 * Virtio_1_0 - Virtio Common (PCI, feature & config mgt, queue mgt & proxy, notification mgt)
4 */
5
6/*
7 * Copyright (C) 2009-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
23
24#include <VBox/log.h>
25#include <iprt/param.h>
26#include <iprt/assert.h>
27#include <iprt/uuid.h>
28#include <iprt/mem.h>
29#include <iprt/assert.h>
30#include <iprt/sg.h>
31#include <VBox/vmm/pdmdev.h>
32#include "Virtio_1_0_impl.h"
33#include "Virtio_1_0.h"
34
35#define INSTANCE(pVirtio) pVirtio->szInstance
36#define QUEUENAME(qIdx) (pVirtio->virtqProxy[qIdx].szVirtqName)
37#define CBQUEUENAME(qIdx) RTStrNLen(QUEUENAME(qIdx), sizeof(QUEUENAME(qIdx)))
38/**
39
40 * Formats the logging of a memory-mapped I/O input or output value
41 *
42 * @param pszFunc - To avoid displaying this function's name via __FUNCTION__ or Log2Func()
43 * @param pszMember - Name of struct member
44 * @param pv - Pointer to value
45 * @param cb - Size of value
46 * @param uOffset - Offset into member where value starts
47 * @param fWrite - True if write I/O
48 * @param fHasIndex - True if the member is indexed
49 * @param idx - The index, if fHasIndex is true
50 */
51void virtioLogMappedIoValue(const char *pszFunc, const char *pszMember, size_t uMemberSize,
52 const void *pv, uint32_t cb, uint32_t uOffset, bool fWrite,
53 bool fHasIndex, uint32_t idx)
54{
55
56#define FMTHEX(fmtout, val, cNybbles) \
57 fmtout[cNybbles] = '\0'; \
58 for (uint8_t i = 0; i < cNybbles; i++) \
59 fmtout[(cNybbles - i) - 1] = "0123456789abcdef"[(val >> (i * 4)) & 0xf];
60
61#define MAX_STRING 64
62 char pszIdx[MAX_STRING] = { 0 };
63 char pszDepiction[MAX_STRING] = { 0 };
64 char pszFormattedVal[MAX_STRING] = { 0 };
65 if (fHasIndex)
66 RTStrPrintf(pszIdx, sizeof(pszIdx), "[%d]", idx);
67 if (cb == 1 || cb == 2 || cb == 4 || cb == 8)
68 {
69 /* manually padding with 0's instead of \b due to different impl of %x precision than printf() */
70 uint64_t val = 0;
71 memcpy((char *)&val, pv, cb);
72 FMTHEX(pszFormattedVal, val, cb * 2);
73 if (uOffset != 0 || cb != uMemberSize) /* display bounds if partial member access */
74 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%s%s[%d:%d]",
75 pszMember, pszIdx, uOffset, uOffset + cb - 1);
76 else
77 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%s%s", pszMember, pszIdx);
78 RTStrPrintf(pszDepiction, sizeof(pszDepiction), "%-30s", pszDepiction);
79 uint32_t first = 0;
80 for (uint8_t i = 0; i < sizeof(pszDepiction); i++)
81 if (pszDepiction[i] == ' ' && first++)
82 pszDepiction[i] = '.';
83 Log3Func(("%s: Guest %s %s 0x%s\n",
84 pszFunc, fWrite ? "wrote" : "read ", pszDepiction, pszFormattedVal));
85 }
86 else /* odd number or oversized access, ... log inline hex-dump style */
87 {
88 Log3Func(("%s: Guest %s %s%s[%d:%d]: %.*Rhxs\n",
89 pszFunc, fWrite ? "wrote" : "read ", pszMember,
90 pszIdx, uOffset, uOffset + cb, cb, pv));
91 }
92}
93
94/**
95 * See API comments in header file for description
96 */
97int virtioQueueAttach(VIRTIOHANDLE hVirtio, uint16_t qIdx, const char *pcszName)
98{
99 LogFunc(("%s\n", pcszName));
100 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
101 PVIRTQ_PROXY_T pVirtqProxy = &(pVirtio->virtqProxy[qIdx]);
102 pVirtqProxy->pDescChain = (PVIRTQ_DESC_CHAIN_T)RTMemAllocZ(sizeof(VIRTQ_DESC_CHAIN_T));
103 if (!pVirtqProxy->pDescChain)
104 {
105 Log(("Out of memory!"));
106 return VERR_NO_MEMORY;
107 }
108 pVirtqProxy->uAvailIdx = 0;
109 pVirtqProxy->uUsedIdx = 0;
110 pVirtqProxy->fEventThresholdReached = false;
111 RTStrCopy((char *)pVirtqProxy->szVirtqName, sizeof(pVirtqProxy->szVirtqName), pcszName);
112 return VINF_SUCCESS;
113
114}
115
116/**
117 * See API comments in header file for description
118 */
119const char *virtioQueueGetName(VIRTIOHANDLE hVirtio, uint16_t qIdx)
120{
121 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
122 AssertMsgReturn(DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[qIdx],
123 ("Guest driver not in ready state.\n"), "<null>");
124
125 return (const char *)((PVIRTIOSTATE)hVirtio)->virtqProxy[qIdx].szVirtqName;
126}
127
128/**
129 * See API comments in header file for description
130 */
131int virtioQueueSkip(VIRTIOHANDLE hVirtio, uint16_t qIdx)
132{
133 Assert(qIdx < sizeof(VIRTQ_PROXY_T));
134
135 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
136 PVIRTQ_PROXY_T pVirtqProxy = &pVirtio->virtqProxy[qIdx];
137
138 AssertMsgReturn(DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[qIdx],
139 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
140
141 if (virtioQueueIsEmpty(pVirtio, qIdx))
142 return VERR_NOT_AVAILABLE;
143
144 Log2Func(("%s avail_idx=%u\n", pVirtqProxy->szVirtqName, pVirtqProxy->uAvailIdx));
145 pVirtqProxy->uAvailIdx++;
146
147 return VINF_SUCCESS;
148}
149
150
151/**
152 * See API comments in header file for description
153 */
154uint64_t virtioGetNegotiatedFeatures(VIRTIOHANDLE hVirtio)
155{
156 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
157 return pVirtio->uDriverFeatures;
158}
159
160
161/**
162 * See API comments in header file for description
163 */
164bool virtioQueueIsEmpty(VIRTIOHANDLE hVirtio, uint16_t qIdx)
165{
166 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
167 if (!(pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK))
168 return true;
169 return virtqIsEmpty(pVirtio, qIdx);
170}
171
172/**
173 * See API comments in header file for description
174 */
175int virtioQueuePeek(VIRTIOHANDLE hVirtio, uint16_t qIdx, PPRTSGBUF ppInSegs, PPRTSGBUF ppOutSegs)
176{
177 return virtioQueueGet(hVirtio, qIdx, false /* fRemove */, ppInSegs, ppOutSegs);
178}
179
180 /*/**
181 * See API comments in header file for description
182 */
183int virtioQueueGet(VIRTIOHANDLE hVirtio, uint16_t qIdx, bool fRemove,
184 PPRTSGBUF ppInSegs, PPRTSGBUF ppOutSegs)
185{
186 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
187 PVIRTQ_PROXY_T pVirtqProxy = &pVirtio->virtqProxy[qIdx];
188 PVIRTQ_DESC_CHAIN_T pDescChain = pVirtqProxy->pDescChain;
189
190 AssertMsgReturn(DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[qIdx],
191 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
192
193 if (virtqIsEmpty(pVirtio, qIdx))
194 return VERR_NOT_AVAILABLE;
195
196 pDescChain->cSegsIn = pDescChain->cSegsOut = 0;
197
198
199 pDescChain->uHeadIdx = virtioReadAvailDescIdx(pVirtio, qIdx, pVirtqProxy->uAvailIdx);
200 uint16_t uDescIdx = pDescChain->uHeadIdx;
201
202 Log3Func(("%s DESC CHAIN: (head) desc_idx=%u [avail_idx=%u]\n",
203 pVirtqProxy->szVirtqName, pDescChain->uHeadIdx, pVirtqProxy->uAvailIdx));
204
205 if (fRemove)
206 pVirtqProxy->uAvailIdx++;
207
208 VIRTQ_DESC_T desc;
209 do
210 {
211 RTSGSEG *pSeg;
212
213 /**
214 * Malicious or inept guests may go beyond aSegsIn or aSegsOut boundaries by linking
215 * several descriptors into a loop. Since there is no legitimate way to get a sequences of
216 * linked descriptors exceeding the total number of descriptors in the ring (see @bugref{8620}),
217 * the following aborts I/O if breach and employs a simple log throttling algorithm to notify.
218 */
219 if (pDescChain->cSegsIn + pDescChain->cSegsOut >= VIRTQ_MAX_SIZE)
220 {
221 static volatile uint32_t s_cMessages = 0;
222 static volatile uint32_t s_cThreshold = 1;
223 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
224 {
225 LogRel(("Too many linked descriptors; "
226 "check if the guest arranges descriptors in a loop.\n"));
227 if (ASMAtomicReadU32(&s_cMessages) != 1)
228 LogRel(("(the above error has occured %u times so far)\n",
229 ASMAtomicReadU32(&s_cMessages)));
230 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
231 }
232 break;
233 }
234 RT_UNTRUSTED_VALIDATED_FENCE();
235
236 virtioReadDesc(pVirtio, qIdx, uDescIdx, &desc);
237
238 if (desc.fFlags & VIRTQ_DESC_F_WRITE)
239 {
240 Log3Func(("%s IN desc_idx=%u seg=%u addr=%RGp cb=%u\n",
241 QUEUENAME(qIdx), uDescIdx, pDescChain->cSegsIn, desc.pGcPhysBuf, desc.cb));
242
243 pSeg = &(pDescChain->aSegsIn[pDescChain->cSegsIn++]);
244 }
245 else
246 {
247 Log3Func(("%s OUT desc_idx=%u seg=%u addr=%RGp cb=%u\n",
248 QUEUENAME(qIdx), uDescIdx, pDescChain->cSegsOut, desc.pGcPhysBuf, desc.cb));
249 pSeg = &(pDescChain->aSegsOut[pDescChain->cSegsOut++]);
250 }
251
252 pSeg->pvSeg = (void *)desc.pGcPhysBuf;
253 pSeg->cbSeg = desc.cb;
254
255 uDescIdx = desc.uDescIdxNext;
256 } while (desc.fFlags & VIRTQ_DESC_F_NEXT);
257
258 RTSgBufInit(&pVirtqProxy->inSgBuf, (PCRTSGSEG)&pDescChain->aSegsIn, pDescChain->cSegsIn);
259 RTSgBufInit(&pVirtqProxy->outSgBuf,(PCRTSGSEG)&pDescChain->aSegsOut, pDescChain->cSegsOut);
260
261 if (ppInSegs)
262 *ppInSegs = &pVirtqProxy->inSgBuf;
263 if (ppOutSegs)
264 *ppOutSegs = &pVirtqProxy->outSgBuf;
265
266 Log3Func(("%s -- segs out: %u, segs in: %u --\n",
267 pVirtqProxy->szVirtqName, pDescChain->cSegsOut, pDescChain->cSegsIn));
268
269 return VINF_SUCCESS;
270}
271
272 /** See API comments in header file prototype for description */
273int virtioQueuePut(VIRTIOHANDLE hVirtio, uint16_t qIdx, PRTSGBUF pSgBuf, bool fFence)
274{
275
276 Assert(qIdx < sizeof(VIRTQ_PROXY_T));
277
278 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
279 PVIRTQ_PROXY_T pVirtqProxy = &pVirtio->virtqProxy[qIdx];
280 PVIRTQ_DESC_CHAIN_T pDescChain = pVirtqProxy->pDescChain;
281
282 AssertMsgReturn(DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[qIdx],
283 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
284 /**
285 * Copy caller's virtual memory sg buffer to physical memory
286 */
287 PRTSGBUF pBufSrc = pSgBuf;
288 PRTSGBUF pBufDst = &pVirtqProxy->inSgBuf;
289
290 size_t cbRemain = RTSgBufCalcTotalLength(pBufSrc);
291 uint16_t uUsedIdx = virtioReadUsedRingIdx(pVirtio, qIdx);
292 Log3Func(("Copying client data to %s, desc chain (head desc_idx %d)\n",
293 QUEUENAME(qIdx), uUsedIdx));
294
295 while (cbRemain)
296 {
297 uint64_t dstSgStart = (uint64_t)pBufDst->paSegs[pBufDst->idxSeg].pvSeg;
298 uint64_t dstSgLen = (uint64_t)pBufDst->paSegs[pBufDst->idxSeg].cbSeg;
299 uint64_t dstSgCur = (uint64_t)pBufDst->pvSegCur;
300 size_t cbCopy = RT_MIN(pBufSrc->cbSegLeft, dstSgLen - (dstSgCur - dstSgStart));
301 PDMDevHlpPhysWrite(pVirtio->CTX_SUFF(pDevIns),
302 (RTGCPHYS)pBufDst->pvSegCur, pBufSrc->pvSegCur, cbCopy);
303 RTSgBufAdvance(pBufSrc, cbCopy);
304 RTSgBufAdvance(pBufDst, cbCopy);
305 cbRemain -= cbCopy;
306 }
307
308 if (fFence)
309 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
310
311 /** TBD to avoid wasting cycles how do we wrap this in test for Log2* enabled? */
312 size_t cbInSgBuf = RTSgBufCalcTotalLength(pBufDst);
313 size_t cbWritten = cbInSgBuf - RTSgBufCalcLengthLeft(pBufDst);
314
315
316 /** If this write-ahead crosses threshold where the driver wants to get an event flag it */
317 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
318 if (pVirtqProxy->uUsedIdx == virtioReadAvailUsedEvent(pVirtio, qIdx))
319 pVirtqProxy->fEventThresholdReached = true;
320
321 /**
322 * Place used buffer's descriptor in used ring but don't update used ring's slot index.
323 * That will be done with a subsequent client call to virtioQueueSync() */
324 virtioWriteUsedElem(pVirtio, qIdx,
325 pVirtqProxy->uUsedIdx++,
326 pDescChain->uHeadIdx,
327 pDescChain->cSegsIn);
328 Log3Func(("Copied %u bytes to %u byte buffer\n Write ahead used_idx=%d, %s used_idx=%d\n",
329 cbWritten, cbInSgBuf, pVirtqProxy->uUsedIdx, QUEUENAME(qIdx), uUsedIdx));
330 return VINF_SUCCESS;
331}
332
333/**
334 * See API comments in header file for description
335 */
336int virtioQueueSync(VIRTIOHANDLE hVirtio, uint16_t qIdx)
337{
338 Assert(qIdx < sizeof(VIRTQ_PROXY_T));
339
340 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
341 PVIRTQ_PROXY_T pVirtqProxy = &pVirtio->virtqProxy[qIdx];
342
343 AssertMsgReturn(DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[qIdx],
344 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
345
346 uint16_t uIdx = virtioReadUsedRingIdx(pVirtio, qIdx);
347 Log3Func(("Updating %s used_idx from %u to %u\n",
348 QUEUENAME(qIdx), uIdx, pVirtqProxy->uUsedIdx));
349
350 virtioWriteUsedRingIdx(pVirtio, qIdx, pVirtqProxy->uUsedIdx);
351 virtioNotifyGuestDriver(pVirtio, qIdx);
352
353 return VINF_SUCCESS;
354}
355
356/**
357 * See API comments in header file for description
358 */
359static void virtioQueueNotified(PVIRTIOSTATE pVirtio, uint16_t qIdx, uint16_t uNotifyIdx)
360{
361 Assert(uNotifyIdx == qIdx);
362
363 PVIRTQ_PROXY_T pVirtqProxy = &pVirtio->virtqProxy[qIdx];
364 Log3Func(("%s\n", pVirtqProxy->szVirtqName));
365
366
367 /** Inform client */
368 pVirtio->virtioCallbacks.pfnVirtioQueueNotified((VIRTIOHANDLE)pVirtio, pVirtio->pClientContext, qIdx);
369}
370
371/**
372 * Trigger MSI-X or INT# interrupt to notify guest of data added to used ring of
373 * the specified virtq, depending on the interrupt configuration of the device
374 * and depending on negotiated and realtime constraints flagged by the guest driver.
375 * See VirtIO 1.0 specification (section 2.4.7).
376 */
377static void virtioNotifyGuestDriver(PVIRTIOSTATE pVirtio, uint16_t qIdx)
378{
379 PVIRTQ_PROXY_T pVirtqProxy = &pVirtio->virtqProxy[qIdx];
380
381 AssertMsgReturnVoid(DRIVER_OK(pVirtio) && pVirtio->uQueueEnable[qIdx],
382 ("Guest driver not in ready state.\n"));
383
384 if (pVirtio->uQueueMsixVector[qIdx] == VIRTIO_MSI_NO_VECTOR)
385 {
386 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
387 {
388 if (pVirtqProxy->fEventThresholdReached)
389 {
390 virtioRaiseInterrupt(pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT);
391 pVirtqProxy->fEventThresholdReached = false;
392 return;
393 }
394 Log3Func(("...skipping interrupt: VIRTIO_F_EVENT_IDX set but threshold not reached\n"));
395 }
396 else
397 {
398 /** If guest driver hasn't suppressed interrupts, do so */
399 if (!(virtioReadUsedFlags(pVirtio, qIdx) & VIRTQ_AVAIL_F_NO_INTERRUPT))
400 {
401 virtioRaiseInterrupt(pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT);
402 return;
403 }
404 Log3Func(("...skipping interrupt. Guest flagged VIRTQ_AVAIL_F_NO_INTERRUPT for queue\n"));
405
406 }
407 }
408 else
409 {
410 /* TBD, do MSI notification if criteria met */
411 }
412}
413
414/**
415 * NOTE: The consumer (PDM device) must call this function to 'forward' a relocation call.
416 *
417 * Device relocation callback.
418 *
419 * When this callback is called the device instance data, and if the
420 * device have a GC component, is being relocated, or/and the selectors
421 * have been changed. The device must use the chance to perform the
422 * necessary pointer relocations and data updates.
423 *
424 * Before the GC code is executed the first time, this function will be
425 * called with a 0 delta so GC pointer calculations can be one in one place.
426 *
427 * @param pDevIns Pointer to the device instance.
428 * @param offDelta The relocation delta relative to the old location.
429 *
430 * @remark A relocation CANNOT fail.
431 */
432void virtioRelocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
433{
434 RT_NOREF(offDelta);
435 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
436 LogFunc(("\n"));
437
438 pVirtio->pDevInsR3 = pDevIns;
439 pVirtio->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
440 pVirtio->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
441}
442
443/**
444 * Raise interrupt.
445 *
446 * @param pVirtio The device state structure.
447 * @param uCause Interrupt cause bit mask to set in PCI ISR port.
448 */
449static int virtioRaiseInterrupt(PVIRTIOSTATE pVirtio, uint8_t uCause)
450{
451 if (uCause == VIRTIO_ISR_VIRTQ_INTERRUPT)
452 Log3Func(("reason: buffer added to 'used' ring.\n"));
453 else
454 if (uCause == VIRTIO_ISR_DEVICE_CONFIG)
455 Log3Func(("reason: device config change\n"));
456
457 pVirtio->uISR |= uCause;
458 PDMDevHlpPCISetIrq(pVirtio->CTX_SUFF(pDevIns), 0, 1);
459 return VINF_SUCCESS;
460}
461
462/**
463 * Lower interrupt. (Called when guest reads ISR)
464 *
465 * @param pVirtio The device state structure.
466 */
467static void virtioLowerInterrupt(PVIRTIOSTATE pVirtio)
468{
469 PDMDevHlpPCISetIrq(pVirtio->CTX_SUFF(pDevIns), 0, 0);
470}
471
472static void virtioResetQueue(PVIRTIOSTATE pVirtio, uint16_t qIdx)
473{
474 PVIRTQ_PROXY_T pVirtQ = &pVirtio->virtqProxy[qIdx];
475 pVirtQ->uAvailIdx = 0;
476 pVirtQ->uUsedIdx = 0;
477 pVirtio->uQueueEnable[qIdx] = false;
478 pVirtio->uQueueSize[qIdx] = VIRTQ_MAX_SIZE;
479 pVirtio->uQueueNotifyOff[qIdx] = qIdx;
480}
481
482
483static void virtioResetDevice(PVIRTIOSTATE pVirtio)
484{
485 Log2Func(("\n"));
486 pVirtio->uDeviceFeaturesSelect = 0;
487 pVirtio->uDriverFeaturesSelect = 0;
488 pVirtio->uConfigGeneration = 0;
489 pVirtio->uDeviceStatus = 0;
490 pVirtio->uISR = 0;
491
492#ifndef MSIX_SUPPORT
493 /** This is required by VirtIO 1.0 specification, section 4.1.5.1.2 */
494 pVirtio->uMsixConfig = VIRTIO_MSI_NO_VECTOR;
495 for (int i = 0; i < VIRTQ_MAX_CNT; i++)
496 pVirtio->uQueueMsixVector[i] = VIRTIO_MSI_NO_VECTOR;
497#endif
498
499 pVirtio->uNumQueues = VIRTQ_MAX_CNT;
500 for (uint16_t qIdx = 0; qIdx < pVirtio->uNumQueues; qIdx++)
501 virtioResetQueue(pVirtio, qIdx);
502}
503
504/**
505 * Initiate orderly reset procedure.
506 * Invoked by client to reset the device and driver (see VirtIO 1.0 section 2.1.1/2.1.2)
507 */
508void virtioResetAll(VIRTIOHANDLE hVirtio)
509{
510 LogFunc(("VIRTIO RESET REQUESTED!!!\n"));
511 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)hVirtio;
512 pVirtio->uDeviceStatus |= VIRTIO_STATUS_DEVICE_NEEDS_RESET;
513 if (pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
514 {
515 pVirtio->fGenUpdatePending = true;
516 virtioRaiseInterrupt(pVirtio, VIRTIO_ISR_DEVICE_CONFIG);
517 }
518}
519
520/**
521 * Invoked by this implementation when guest driver resets the device.
522 * The driver itself will not reset until the device has read the status change.
523 */
524static void virtioGuestResetted(PVIRTIOSTATE pVirtio)
525{
526 LogFunc(("Guest reset the device\n"));
527
528 /** Let the client know */
529 pVirtio->virtioCallbacks.pfnVirtioStatusChanged((VIRTIOHANDLE)pVirtio, pVirtio->pClientContext, false);
530 virtioResetDevice(pVirtio);
531}
532
533/**
534 * Handle accesses to Common Configuration capability
535 *
536 * @returns VBox status code
537 *
538 * @param pVirtio Virtio instance state
539 * @param fWrite If write access (otherwise read access)
540 * @param pv Pointer to location to write to or read from
541 * @param cb Number of bytes to read or write
542 */
543static int virtioCommonCfgAccessed(PVIRTIOSTATE pVirtio, int fWrite, off_t uOffset, unsigned cb, void const *pv)
544{
545 int rc = VINF_SUCCESS;
546 uint64_t val;
547 if (MATCH_COMMON_CFG(uDeviceFeatures))
548 {
549 if (fWrite) /* Guest WRITE pCommonCfg>uDeviceFeatures */
550 LogFunc(("Guest attempted to write readonly virtio_pci_common_cfg.device_feature\n"));
551 else /* Guest READ pCommonCfg->uDeviceFeatures */
552 {
553 uint32_t uIntraOff = uOffset - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDeviceFeatures);
554 switch(pVirtio->uDeviceFeaturesSelect)
555 {
556 case 0:
557 val = pVirtio->uDeviceFeatures & 0xffffffff;
558 memcpy((void *)pv, (const void *)&val, cb);
559 LOG_COMMON_CFG_ACCESS(uDeviceFeatures);
560 break;
561 case 1:
562 val = (pVirtio->uDeviceFeatures >> 32) & 0xffffffff;
563 uIntraOff += 4;
564 memcpy((void *)pv, (const void *)&val, cb);
565 LOG_COMMON_CFG_ACCESS(uDeviceFeatures);
566 break;
567 default:
568 LogFunc(("Guest read uDeviceFeatures with out of range selector (%d), returning 0\n",
569 pVirtio->uDeviceFeaturesSelect));
570 return VERR_ACCESS_DENIED;
571 }
572 }
573 }
574 else if (MATCH_COMMON_CFG(uDriverFeatures))
575 {
576 if (fWrite) /* Guest WRITE pCommonCfg->udriverFeatures */
577 {
578 uint32_t uIntraOff = uOffset - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeatures);
579 switch(pVirtio->uDriverFeaturesSelect)
580 {
581 case 0:
582 memcpy(&pVirtio->uDriverFeatures, pv, cb);
583 LOG_COMMON_CFG_ACCESS(uDriverFeatures);
584 break;
585 case 1:
586 memcpy(((char *)&pVirtio->uDriverFeatures) + sizeof(uint32_t), pv, cb);
587 uIntraOff += 4;
588 LOG_COMMON_CFG_ACCESS(uDriverFeatures);
589 break;
590 default:
591 LogFunc(("Guest wrote uDriverFeatures with out of range selector (%d), returning 0\n",
592 pVirtio->uDriverFeaturesSelect));
593 return VERR_ACCESS_DENIED;
594 }
595 }
596 else /* Guest READ pCommonCfg->udriverFeatures */
597 {
598 uint32_t uIntraOff = uOffset - RT_UOFFSETOF(VIRTIO_PCI_COMMON_CFG_T, uDriverFeatures);
599 switch(pVirtio->uDriverFeaturesSelect)
600 {
601 case 0:
602 val = pVirtio->uDriverFeatures & 0xffffffff;
603 memcpy((void *)pv, (const void *)&val, cb);
604 LOG_COMMON_CFG_ACCESS(uDriverFeatures);
605 break;
606 case 1:
607 val = (pVirtio->uDriverFeatures >> 32) & 0xffffffff;
608 uIntraOff += 4;
609 memcpy((void *)pv, (const void *)&val, cb);
610 LOG_COMMON_CFG_ACCESS(uDriverFeatures);
611 break;
612 default:
613 LogFunc(("Guest read uDriverFeatures with out of range selector (%d), returning 0\n",
614 pVirtio->uDriverFeaturesSelect));
615 return VERR_ACCESS_DENIED;
616 }
617 }
618 }
619 else if (MATCH_COMMON_CFG(uNumQueues))
620 {
621 if (fWrite)
622 {
623 Log2Func(("Guest attempted to write readonly virtio_pci_common_cfg.num_queues\n"));
624 return VERR_ACCESS_DENIED;
625 }
626 else
627 {
628 uint32_t uIntraOff = 0;
629 *(uint16_t *)pv = VIRTQ_MAX_CNT;
630 LOG_COMMON_CFG_ACCESS(uNumQueues);
631 }
632 }
633 else if (MATCH_COMMON_CFG(uDeviceStatus))
634 {
635 if (fWrite) /* Guest WRITE pCommonCfg->uDeviceStatus */
636 {
637 pVirtio->uDeviceStatus = *(uint8_t *)pv;
638 Log3Func(("Guest wrote uDeviceStatus ................ ("));
639 virtioLogDeviceStatus(pVirtio->uDeviceStatus);
640 Log3((")\n"));
641 if (pVirtio->uDeviceStatus == 0)
642 virtioGuestResetted(pVirtio);
643 /**
644 * Notify client only if status actually changed from last time.
645 */
646 bool fOkayNow = pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK;
647 bool fWasOkay = pVirtio->uPrevDeviceStatus & VIRTIO_STATUS_DRIVER_OK;
648 if ((fOkayNow && !fWasOkay) || (!fOkayNow && fWasOkay))
649 pVirtio->virtioCallbacks.pfnVirtioStatusChanged((VIRTIOHANDLE)pVirtio, pVirtio->pClientContext,
650 pVirtio->uDeviceStatus & VIRTIO_STATUS_DRIVER_OK);
651 pVirtio->uPrevDeviceStatus = pVirtio->uDeviceStatus;
652 }
653 else /* Guest READ pCommonCfg->uDeviceStatus */
654 {
655 Log3Func(("Guest read uDeviceStatus ................ ("));
656 *(uint32_t *)pv = pVirtio->uDeviceStatus;
657 virtioLogDeviceStatus(pVirtio->uDeviceStatus);
658 Log3((")\n"));
659 }
660 }
661 else if (MATCH_COMMON_CFG(uMsixConfig))
662 {
663 COMMON_CFG_ACCESSOR(uMsixConfig);
664 }
665 else if (MATCH_COMMON_CFG(uDeviceFeaturesSelect))
666 {
667 COMMON_CFG_ACCESSOR(uDeviceFeaturesSelect);
668 }
669 else if (MATCH_COMMON_CFG(uDriverFeaturesSelect))
670 {
671 COMMON_CFG_ACCESSOR(uDriverFeaturesSelect);
672 }
673 else if (MATCH_COMMON_CFG(uConfigGeneration))
674 {
675 COMMON_CFG_ACCESSOR_READONLY(uConfigGeneration);
676 }
677 else if (MATCH_COMMON_CFG(uQueueSelect))
678 {
679 COMMON_CFG_ACCESSOR(uQueueSelect);
680 }
681 else if (MATCH_COMMON_CFG(uQueueSize))
682 {
683 COMMON_CFG_ACCESSOR_INDEXED(uQueueSize, pVirtio->uQueueSelect);
684 }
685 else if (MATCH_COMMON_CFG(uQueueMsixVector))
686 {
687 COMMON_CFG_ACCESSOR_INDEXED(uQueueMsixVector, pVirtio->uQueueSelect);
688 }
689 else if (MATCH_COMMON_CFG(uQueueEnable))
690 {
691 COMMON_CFG_ACCESSOR_INDEXED(uQueueEnable, pVirtio->uQueueSelect);
692 }
693 else if (MATCH_COMMON_CFG(uQueueNotifyOff))
694 {
695 COMMON_CFG_ACCESSOR_INDEXED_READONLY(uQueueNotifyOff, pVirtio->uQueueSelect);
696 }
697 else if (MATCH_COMMON_CFG(pGcPhysQueueDesc))
698 {
699 COMMON_CFG_ACCESSOR_INDEXED(pGcPhysQueueDesc, pVirtio->uQueueSelect);
700 }
701 else if (MATCH_COMMON_CFG(pGcPhysQueueAvail))
702 {
703 COMMON_CFG_ACCESSOR_INDEXED(pGcPhysQueueAvail, pVirtio->uQueueSelect);
704 }
705 else if (MATCH_COMMON_CFG(pGcPhysQueueUsed))
706 {
707 COMMON_CFG_ACCESSOR_INDEXED(pGcPhysQueueUsed, pVirtio->uQueueSelect);
708 }
709 else
710 {
711 Log2Func(("Bad guest %s access to virtio_pci_common_cfg: uOffset=%d, cb=%d\n",
712 fWrite ? "write" : "read ", uOffset, cb));
713 rc = VERR_ACCESS_DENIED;
714 }
715 return rc;
716}
717
718/**
719 * Memory mapped I/O Handler for PCI Capabilities read operations.
720 *
721 * @returns VBox status code.
722 *
723 * @param pDevIns The device instance.
724 * @param pvUser User argument.
725 * @param GCPhysAddr Physical address (in GC) where the read starts.
726 * @param pv Where to store the result.
727 * @param cb Number of bytes read.
728 */
729PDMBOTHCBDECL(int) virtioR3MmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
730{
731 RT_NOREF(pvUser);
732 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
733 int rc = VINF_SUCCESS;
734
735 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysDeviceCap, pVirtio->pDeviceCap, fDevSpecific);
736 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysCommonCfg, pVirtio->pCommonCfgCap, fCommonCfg);
737 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysIsrCap, pVirtio->pIsrCap, fIsr);
738
739 if (fDevSpecific)
740 {
741 uint32_t uOffset = GCPhysAddr - pVirtio->pGcPhysDeviceCap;
742 /**
743 * Callback to client to manage device-specific configuration.
744 */
745 rc = pVirtio->virtioCallbacks.pfnVirtioDevCapRead(pDevIns, uOffset, pv, cb);
746
747 /**
748 * Additionally, anytime any part of the device-specific configuration (which our client maintains)
749 * is READ it needs to be checked to see if it changed since the last time any part was read, in
750 * order to maintain the config generation (see VirtIO 1.0 spec, section 4.1.4.3.1)
751 */
752 bool fDevSpecificFieldChanged = !!memcmp((char *)pVirtio->pDevSpecificCfg + uOffset,
753 (char *)pVirtio->pPrevDevSpecificCfg + uOffset, cb);
754
755 memcpy(pVirtio->pPrevDevSpecificCfg, pVirtio->pDevSpecificCfg, pVirtio->cbDevSpecificCfg);
756
757 if (pVirtio->fGenUpdatePending || fDevSpecificFieldChanged)
758 {
759 ++pVirtio->uConfigGeneration;
760 Log3Func(("Bumped cfg. generation to %d because %s%s\n",
761 pVirtio->uConfigGeneration,
762 fDevSpecificFieldChanged ? "<dev cfg changed> " : "",
763 pVirtio->fGenUpdatePending ? "<update was pending>" : ""));
764 pVirtio->fGenUpdatePending = false;
765 }
766 }
767 else
768 if (fCommonCfg)
769 {
770 uint32_t uOffset = GCPhysAddr - pVirtio->pGcPhysCommonCfg;
771 virtioCommonCfgAccessed(pVirtio, 0 /* fWrite */, uOffset, cb, pv);
772 }
773 else
774 if (fIsr && cb == sizeof(uint8_t))
775 {
776 *(uint8_t *)pv = pVirtio->uISR;
777 Log3Func(("Read and clear ISR\n"));
778 pVirtio->uISR = 0; /** VirtIO specification requires reads of ISR to clear it */
779 virtioLowerInterrupt(pVirtio);
780 }
781 else {
782 LogFunc(("Bad read access to mapped capabilities region:\n"
783 " pVirtio=%#p GCPhysAddr=%RGp cb=%u\n",
784 pVirtio, GCPhysAddr, pv, cb, pv, cb));
785 }
786 return rc;
787}
788/**
789 * Memory mapped I/O Handler for PCI Capabilities write operations.
790 *
791 * @returns VBox status code.
792 *
793 * @param pDevIns The device instance.
794 * @param pvUser User argument.
795 * @param GCPhysAddr Physical address (in GC) where the write starts.
796 * @param pv Where to fetch the result.
797 * @param cb Number of bytes to write.
798 */
799PDMBOTHCBDECL(int) virtioR3MmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
800{
801 RT_NOREF(pvUser);
802 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
803 int rc = VINF_SUCCESS;
804
805 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysDeviceCap, pVirtio->pDeviceCap, fDevSpecific);
806 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysCommonCfg, pVirtio->pCommonCfgCap, fCommonCfg);
807 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysIsrCap, pVirtio->pIsrCap, fIsr);
808 MATCH_VIRTIO_CAP_STRUCT(pVirtio->pGcPhysNotifyCap, pVirtio->pNotifyCap, fNotify);
809
810 if (fDevSpecific)
811 {
812 uint32_t uOffset = GCPhysAddr - pVirtio->pGcPhysDeviceCap;
813 /**
814 * Pass this MMIO write access back to the client to handle
815 */
816 rc = pVirtio->virtioCallbacks.pfnVirtioDevCapWrite(pDevIns, uOffset, pv, cb);
817 }
818 else
819 if (fCommonCfg)
820 {
821 uint32_t uOffset = GCPhysAddr - pVirtio->pGcPhysCommonCfg;
822 virtioCommonCfgAccessed(pVirtio, 1 /* fWrite */, uOffset, cb, pv);
823 }
824 else
825 if (fIsr && cb == sizeof(uint8_t))
826 {
827 pVirtio->uISR = *(uint8_t *)pv;
828 Log3Func(("Setting uISR = 0x%02x (virtq interrupt: %d, dev confg interrupt: %d)\n",
829 pVirtio->uISR & 0xff,
830 pVirtio->uISR & VIRTIO_ISR_VIRTQ_INTERRUPT,
831 !!(pVirtio->uISR & VIRTIO_ISR_DEVICE_CONFIG)));
832 }
833 else
834 /** This *should* be guest driver dropping index of a new descriptor in avail ring */
835 if (fNotify && cb == sizeof(uint16_t))
836 {
837 uint32_t uNotifyBaseOffset = GCPhysAddr - pVirtio->pGcPhysNotifyCap;
838 uint16_t qIdx = uNotifyBaseOffset / VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
839 uint16_t uAvailDescIdx = *(uint16_t *)pv;
840 virtioQueueNotified(pVirtio, qIdx, uAvailDescIdx);
841 }
842 else
843 {
844 Log2Func(("Bad write access to mapped capabilities region:\n"
845 " pVirtio=%#p GCPhysAddr=%RGp pv=%#p{%.*Rhxs} cb=%u\n",
846 pVirtio, GCPhysAddr, pv, cb, pv, cb));
847 }
848 return rc;
849}
850
851/**
852 * @callback_method_impl{FNPCIIOREGIONMAP}
853 */
854static DECLCALLBACK(int) virtioR3Map(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
855 RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
856{
857 RT_NOREF3(pPciDev, iRegion, enmType);
858 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
859 int rc = VINF_SUCCESS;
860
861 Assert(cb >= 32);
862
863 if (iRegion == VIRTIOSCSI_REGION_PCI_CAP)
864 {
865 /* We use the assigned size here, because we currently only support page aligned MMIO ranges. */
866 rc = PDMDevHlpMMIORegister(pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
867 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
868 virtioR3MmioWrite, virtioR3MmioRead,
869 "virtio-scsi MMIO");
870
871 if (RT_FAILURE(rc))
872 {
873 Log2Func(("virtio: PCI Capabilities failed to map GCPhysAddr=%RGp cb=%RGp, region=%d\n",
874 GCPhysAddress, cb, iRegion));
875 return rc;
876 }
877 Log2Func(("virtio: PCI Capabilities mapped at GCPhysAddr=%RGp cb=%RGp, region=%d\n",
878 GCPhysAddress, cb, iRegion));
879 pVirtio->pGcPhysPciCapBase = GCPhysAddress;
880 pVirtio->pGcPhysCommonCfg = GCPhysAddress + pVirtio->pCommonCfgCap->uOffset;
881 pVirtio->pGcPhysNotifyCap = GCPhysAddress + pVirtio->pNotifyCap->pciCap.uOffset;
882 pVirtio->pGcPhysIsrCap = GCPhysAddress + pVirtio->pIsrCap->uOffset;
883 if (pVirtio->pPrevDevSpecificCfg)
884 pVirtio->pGcPhysDeviceCap = GCPhysAddress + pVirtio->pDeviceCap->uOffset;
885 }
886 return rc;
887}
888
889/**
890 * Callback function for reading from the PCI configuration space.
891 *
892 * @returns The register value.
893 * @param pDevIns Pointer to the device instance the PCI device
894 * belongs to.
895 * @param pPciDev Pointer to PCI device. Use pPciDev->pDevIns to get the device instance.
896 * @param uAddress The configuration space register address. [0..4096]
897 * @param cb The register size. [1,2,4]
898 *
899 * @remarks Called with the PDM lock held. The device lock is NOT take because
900 * that is very likely be a lock order violation.
901 */
902static DECLCALLBACK(uint32_t) virtioPciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
903 uint32_t uAddress, unsigned cb)
904{
905 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
906
907 if (uAddress == (uint64_t)&pVirtio->pPciCfgCap->uPciCfgData)
908 {
909 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
910 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
911 * (the virtio_pci_cfg_cap capability), and access data items. */
912 uint32_t uLength = pVirtio->pPciCfgCap->pciCap.uLength;
913 uint32_t uOffset = pVirtio->pPciCfgCap->pciCap.uOffset;
914 uint8_t uBar = pVirtio->pPciCfgCap->pciCap.uBar;
915 uint32_t pv = 0;
916 if (uBar == VIRTIOSCSI_REGION_PCI_CAP)
917 (void)virtioR3MmioRead(pDevIns, NULL, (RTGCPHYS)((uint32_t)pVirtio->pGcPhysPciCapBase + uOffset),
918 &pv, uLength);
919 else
920 {
921 Log2Func(("Guest read virtio_pci_cfg_cap.pci_cfg_data using unconfigured BAR. Ignoring"));
922 return 0;
923 }
924 Log2Func(("virtio: Guest read virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%d, length=%d, result=%d\n",
925 uBar, uOffset, uLength, pv));
926 return pv;
927 }
928 return pVirtio->pfnPciConfigReadOld(pDevIns, pPciDev, uAddress, cb);
929}
930
931/**
932 * Callback function for writing to the PCI configuration space.
933 *
934 * @returns VINF_SUCCESS or PDMDevHlpDBGFStop status.
935 *
936 * @param pDevIns Pointer to the device instance the PCI device
937 * belongs to.
938 * @param pPciDev Pointer to PCI device. Use pPciDev->pDevIns to get the device instance.
939 * @param uAddress The configuration space register address. [0..4096]
940 * @param u32Value The value that's being written. The number of bits actually used from
941 * this value is determined by the cb parameter.
942 * @param cb The register size. [1,2,4]
943 *
944 * @remarks Called with the PDM lock held. The device lock is NOT take because
945 * that is very likely be a lock order violation.
946 */
947static DECLCALLBACK(VBOXSTRICTRC) virtioPciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
948 uint32_t uAddress, uint32_t u32Value, unsigned cb)
949{
950 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
951
952 if (uAddress == pVirtio->uPciCfgDataOff)
953 {
954 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
955 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
956 * (the virtio_pci_cfg_cap capability), and access data items. */
957 uint32_t uLength = pVirtio->pPciCfgCap->pciCap.uLength;
958 uint32_t uOffset = pVirtio->pPciCfgCap->pciCap.uOffset;
959 uint8_t uBar = pVirtio->pPciCfgCap->pciCap.uBar;
960 if (uBar == VIRTIOSCSI_REGION_PCI_CAP)
961 (void)virtioR3MmioWrite(pDevIns, NULL, (RTGCPHYS)((uint32_t)pVirtio->pGcPhysPciCapBase + uOffset),
962 (void *)&u32Value, uLength);
963 else
964 {
965 Log2Func(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data using unconfigured BAR. Ignoring"));
966 return VINF_SUCCESS;
967 }
968 Log2Func(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%x, length=%x, value=%d\n",
969 uBar, uOffset, uLength, u32Value));
970 return VINF_SUCCESS;
971 }
972 return pVirtio->pfnPciConfigWriteOld(pDevIns, pPciDev, uAddress, u32Value, cb);
973}
974
975/**
976 * Get VirtIO accepted host-side features
977 *
978 * @returns feature bits selected or 0 if selector out of range.
979 *
980 * @param pState Virtio state
981 */
982uint64_t virtioGetAcceptedFeatures(PVIRTIOSTATE pVirtio)
983{
984 return pVirtio->uDriverFeatures;
985}
986
987/**
988 * Destruct PCI-related part of device.
989 *
990 * We need to free non-VM resources only.
991 *
992 * @returns VBox status code.
993 * @param pState The device state structure.
994 */
995int virtioDestruct(PVIRTIOSTATE pVirtio)
996{
997 RT_NOREF(pVirtio);
998 Log(("%s Destroying PCI instance\n", INSTANCE(pVirtio)));
999 return VINF_SUCCESS;
1000}
1001
1002/**
1003 * Setup PCI device controller and Virtio state
1004 *
1005 * @param pDevIns Device instance data
1006 * @param pClientContext Opaque client context (such as state struct, ...)
1007 * @param pVirtio Device State
1008 * @param pPciParams Values to populate industry standard PCI Configuration Space data structure
1009 * @param pcszInstance Device instance name (format-specifier)
1010 * @param uDevSpecificFeatures VirtIO device-specific features offered by client
1011 * @param devCapReadCallback Client handler to call upon guest read to device specific capabilities.
1012 * @param devCapWriteCallback Client handler to call upon guest write to device specific capabilities.
1013 * @param devStatusChangedCallback Client handler to call for major device status changes
1014 * @param queueNotifiedCallback Client handler for guest-to-host notifications that avail queue has ring data
1015 * @param ssmLiveExecCallback Client handler for SSM live exec
1016 * @param ssmSaveExecCallback Client handler for SSM save exec
1017 * @param ssmLoadExecCallback Client handler for SSM load exec
1018 * @param ssmLoadDoneCallback Client handler for SSM load done
1019 * @param cbDevSpecificCfg Size of virtio_pci_device_cap device-specific struct
1020 * @param pDevSpecificCfg Address of client's dev-specific configuration struct.
1021 */
1022int virtioConstruct(PPDMDEVINS pDevIns,
1023 void *pClientContext,
1024 VIRTIOHANDLE *phVirtio,
1025 PVIRTIOPCIPARAMS pPciParams,
1026 const char *pcszInstance,
1027 uint64_t uDevSpecificFeatures,
1028 PFNVIRTIODEVCAPREAD devCapReadCallback,
1029 PFNVIRTIODEVCAPWRITE devCapWriteCallback,
1030 PFNVIRTIOSTATUSCHANGED devStatusChangedCallback,
1031 PFNVIRTIOQUEUENOTIFIED queueNotifiedCallback,
1032 PFNSSMDEVLIVEEXEC ssmLiveExecCallback,
1033 PFNSSMDEVSAVEEXEC ssmSaveExecCallback,
1034 PFNSSMDEVLOADEXEC ssmLoadExecCallback,
1035 PFNSSMDEVLOADDONE ssmLoadDoneCallback,
1036 uint16_t cbDevSpecificCfg,
1037 void *pDevSpecificCfg)
1038{
1039
1040 int rc = VINF_SUCCESS;
1041
1042
1043 PVIRTIOSTATE pVirtio = (PVIRTIOSTATE)RTMemAllocZ(sizeof(VIRTIOSTATE));
1044 if (!pVirtio)
1045 {
1046 PDMDEV_SET_ERROR(pDevIns, VERR_NO_MEMORY, N_("virtio: out of memory"));
1047 return VERR_NO_MEMORY;
1048 }
1049
1050 pVirtio->pClientContext = pClientContext;
1051
1052 /**
1053 * The host features offered include both device-specific features
1054 * and reserved feature bits (device independent)
1055 */
1056 pVirtio->uDeviceFeatures = VIRTIO_F_VERSION_1
1057 | VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED
1058 | uDevSpecificFeatures;
1059
1060 RTStrCopy(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszInstance);
1061
1062 pVirtio->pDevInsR3 = pDevIns;
1063 pVirtio->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
1064 pVirtio->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
1065 pVirtio->uDeviceStatus = 0;
1066 pVirtio->cbDevSpecificCfg = cbDevSpecificCfg;
1067 pVirtio->pDevSpecificCfg = pDevSpecificCfg;
1068
1069 pVirtio->pPrevDevSpecificCfg = RTMemAllocZ(cbDevSpecificCfg);
1070 if (!pVirtio->pPrevDevSpecificCfg)
1071 {
1072 RTMemFree(pVirtio);
1073 PDMDEV_SET_ERROR(pDevIns, VERR_NO_MEMORY, N_("virtio: out of memory"));
1074 return VERR_NO_MEMORY;
1075 }
1076
1077 memcpy(pVirtio->pPrevDevSpecificCfg, pVirtio->pDevSpecificCfg, cbDevSpecificCfg);
1078 pVirtio->virtioCallbacks.pfnVirtioDevCapRead = devCapReadCallback;
1079 pVirtio->virtioCallbacks.pfnVirtioDevCapWrite = devCapWriteCallback;
1080 pVirtio->virtioCallbacks.pfnVirtioStatusChanged = devStatusChangedCallback;
1081 pVirtio->virtioCallbacks.pfnVirtioQueueNotified = queueNotifiedCallback;
1082 pVirtio->virtioCallbacks.pfnSSMDevLiveExec = ssmLiveExecCallback;
1083 pVirtio->virtioCallbacks.pfnSSMDevSaveExec = ssmSaveExecCallback;
1084 pVirtio->virtioCallbacks.pfnSSMDevLoadExec = ssmLoadExecCallback;
1085 pVirtio->virtioCallbacks.pfnSSMDevLoadDone = ssmLoadDoneCallback;
1086
1087
1088 /* Set PCI config registers (assume 32-bit mode) */
1089 PCIDevSetRevisionId (&pVirtio->dev, DEVICE_PCI_REVISION_ID_VIRTIO);
1090 PCIDevSetVendorId (&pVirtio->dev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1091 PCIDevSetSubSystemVendorId (&pVirtio->dev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1092 PCIDevSetDeviceId (&pVirtio->dev, pPciParams->uDeviceId);
1093 PCIDevSetClassBase (&pVirtio->dev, pPciParams->uClassBase);
1094 PCIDevSetClassSub (&pVirtio->dev, pPciParams->uClassSub);
1095 PCIDevSetClassProg (&pVirtio->dev, pPciParams->uClassProg);
1096 PCIDevSetSubSystemId (&pVirtio->dev, pPciParams->uSubsystemId);
1097 PCIDevSetInterruptLine (&pVirtio->dev, pPciParams->uInterruptLine);
1098 PCIDevSetInterruptPin (&pVirtio->dev, pPciParams->uInterruptPin);
1099
1100 /* Register PCI device */
1101 rc = PDMDevHlpPCIRegister(pDevIns, &pVirtio->dev);
1102 if (RT_FAILURE(rc))
1103 {
1104 RTMemFree(pVirtio);
1105 return PDMDEV_SET_ERROR(pDevIns, rc,
1106 N_("virtio: cannot register PCI Device")); /* can we put params in this error? */
1107 }
1108
1109 rc = PDMDevHlpSSMRegisterEx(pDevIns, VIRTIO_SAVEDSTATE_VERSION, sizeof(*pVirtio), NULL,
1110 NULL, virtioR3LiveExec, NULL, NULL, virtioR3SaveExec, NULL,
1111 NULL, virtioR3LoadExec, virtioR3LoadDone);
1112 if (RT_FAILURE(rc))
1113 {
1114 RTMemFree(pVirtio);
1115 return PDMDEV_SET_ERROR(pDevIns, rc,
1116 N_("virtio: cannot register SSM callbacks"));
1117 }
1118
1119 PDMDevHlpPCISetConfigCallbacks(pDevIns, &pVirtio->dev,
1120 virtioPciConfigRead, &pVirtio->pfnPciConfigReadOld,
1121 virtioPciConfigWrite, &pVirtio->pfnPciConfigWriteOld);
1122
1123
1124 /** Construct & map PCI vendor-specific capabilities for virtio host negotiation with guest driver */
1125
1126#if 0 && defined(VBOX_WITH_MSI_DEVICES) /* T.B.D. */
1127 uint8_t fMsiSupport = true;
1128#else
1129 uint8_t fMsiSupport = false;
1130#endif
1131
1132 /** The following capability mapped via VirtIO 1.0: struct virtio_pci_cfg_cap (VIRTIO_PCI_CFG_CAP_T)
1133 * as a mandatory but suboptimal alternative interface to host device capabilities, facilitating
1134 * access the memory of any BAR. If the guest uses it (the VirtIO driver on Linux doesn't),
1135 * Unlike Common, Notify, ISR and Device capabilities, it is accessed directly via PCI Config region.
1136 * therefore does not contribute to the capabilities region (BAR) the other capabilities use.
1137 */
1138#define CFGADDR2IDX(addr) ((uint64_t)addr - (uint64_t)&pVirtio->dev.abConfig)
1139
1140 PVIRTIO_PCI_CAP_T pCfg;
1141 uint32_t cbRegion = 0;
1142
1143 /* Common capability (VirtIO 1.0 spec, section 4.1.4.3) */
1144 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[0x40];
1145 pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG;
1146 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1147 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1148 pCfg->uCapNext = CFGADDR2IDX(pCfg) + pCfg->uCapLen;
1149 pCfg->uBar = VIRTIOSCSI_REGION_PCI_CAP;
1150 pCfg->uOffset = RT_ALIGN_32(0, 4); /* reminder, in case someone changes offset */
1151 pCfg->uLength = sizeof(VIRTIO_PCI_COMMON_CFG_T);
1152 cbRegion += pCfg->uLength;
1153 pVirtio->pCommonCfgCap = pCfg;
1154
1155 /**
1156 * Notify capability (VirtIO 1.0 spec, section 4.1.4.4). Note: uLength is based the choice
1157 * of this implementation that each queue's uQueueNotifyOff is set equal to (QueueSelect) ordinal
1158 * value of the queue */
1159 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[pCfg->uCapNext];
1160 pCfg->uCfgType = VIRTIO_PCI_CAP_NOTIFY_CFG;
1161 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1162 pCfg->uCapLen = sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
1163 pCfg->uCapNext = CFGADDR2IDX(pCfg) + pCfg->uCapLen;
1164 pCfg->uBar = VIRTIOSCSI_REGION_PCI_CAP;
1165 pCfg->uOffset = pVirtio->pCommonCfgCap->uOffset + pVirtio->pCommonCfgCap->uLength;
1166 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 2);
1167 pCfg->uLength = VIRTQ_MAX_CNT * VIRTIO_NOTIFY_OFFSET_MULTIPLIER + 2; /* will change in VirtIO 1.1 */
1168 cbRegion += pCfg->uLength;
1169 pVirtio->pNotifyCap = (PVIRTIO_PCI_NOTIFY_CAP_T)pCfg;
1170 pVirtio->pNotifyCap->uNotifyOffMultiplier = VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
1171
1172 /** ISR capability (VirtIO 1.0 spec, section 4.1.4.5)
1173 *
1174 * VirtIO 1.0 spec says 8-bit, unaligned in MMIO space. Example/diagram
1175 * of spec shows it as a 32-bit field with upper bits 'reserved'
1176 * Will take spec words more literally than the diagram for now.
1177 */
1178 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[pCfg->uCapNext];
1179 pCfg->uCfgType = VIRTIO_PCI_CAP_ISR_CFG;
1180 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1181 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1182 pCfg->uCapNext = CFGADDR2IDX(pCfg) + pCfg->uCapLen;
1183 pCfg->uBar = VIRTIOSCSI_REGION_PCI_CAP;
1184 pCfg->uOffset = pVirtio->pNotifyCap->pciCap.uOffset + pVirtio->pNotifyCap->pciCap.uLength;
1185 pCfg->uLength = sizeof(uint8_t);
1186 cbRegion += pCfg->uLength;
1187 pVirtio->pIsrCap = pCfg;
1188
1189 /** PCI Cfg capability (VirtIO 1.0 spec, section 4.1.4.7)
1190 * This capability doesn't get page-MMIO mapped. Instead uBar, uOffset and uLength are intercepted
1191 * by trapping PCI configuration I/O and get modulated by consumers to locate fetch and read/write
1192 * values from any region. NOTE: The linux driver not only doesn't use this feature, and will not
1193 * even list it as present if uLength isn't non-zero and 4-byte-aligned as the linux driver is
1194 * initializing. */
1195
1196 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[pCfg->uCapNext];
1197 pCfg->uCfgType = VIRTIO_PCI_CAP_PCI_CFG;
1198 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1199 pCfg->uCapLen = sizeof(VIRTIO_PCI_CFG_CAP_T);
1200 pCfg->uCapNext = (fMsiSupport || pVirtio->pDevSpecificCfg) ? CFGADDR2IDX(pCfg) + pCfg->uCapLen : 0;
1201 pCfg->uBar = 0;
1202 pCfg->uOffset = 0;
1203 pCfg->uLength = 0;
1204 cbRegion += pCfg->uLength;
1205 pVirtio->pPciCfgCap = (PVIRTIO_PCI_CFG_CAP_T)pCfg;
1206
1207 if (pVirtio->pDevSpecificCfg)
1208 {
1209 /** Following capability (via VirtIO 1.0, section 4.1.4.6). Client defines the
1210 * device-specific config fields struct and passes size to this constructor */
1211 pCfg = (PVIRTIO_PCI_CAP_T)&pVirtio->dev.abConfig[pCfg->uCapNext];
1212 pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG;
1213 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1214 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1215 pCfg->uCapNext = fMsiSupport ? CFGADDR2IDX(pCfg) + pCfg->uCapLen : 0;
1216 pCfg->uBar = VIRTIOSCSI_REGION_PCI_CAP;
1217 pCfg->uOffset = pVirtio->pIsrCap->uOffset + pVirtio->pIsrCap->uLength;
1218 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
1219 pCfg->uLength = cbDevSpecificCfg;
1220 cbRegion += pCfg->uLength;
1221 pVirtio->pDeviceCap = pCfg;
1222 }
1223
1224 /** Set offset to first capability and enable PCI dev capabilities */
1225 PCIDevSetCapabilityList (&pVirtio->dev, 0x40);
1226 PCIDevSetStatus (&pVirtio->dev, VBOX_PCI_STATUS_CAP_LIST);
1227
1228 if (fMsiSupport)
1229 {
1230 PDMMSIREG aMsiReg;
1231 RT_ZERO(aMsiReg);
1232 aMsiReg.iMsixCapOffset = pCfg->uCapNext;
1233 aMsiReg.iMsixNextOffset = 0;
1234 aMsiReg.iMsixBar = 0;
1235 aMsiReg.cMsixVectors = 1;
1236 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg); /* see MsixR3init() */
1237 if (RT_FAILURE (rc))
1238 /** The following is moot, we need to flag no MSI-X support */
1239 PCIDevSetCapabilityList(&pVirtio->dev, 0x40);
1240 }
1241
1242 /** Linux drivers/virtio/virtio_pci_modern.c tries to map at least a page for the
1243 * 'unknown' device-specific capability without querying the capability to figure
1244 * out size, so pad with an extra page */
1245
1246 rc = PDMDevHlpPCIIORegionRegister(pDevIns, VIRTIOSCSI_REGION_PCI_CAP, RT_ALIGN_32(cbRegion + 0x1000, 0x1000),
1247 PCI_ADDRESS_SPACE_MEM, virtioR3Map);
1248 if (RT_FAILURE(rc))
1249 {
1250 RTMemFree(pVirtio->pPrevDevSpecificCfg);
1251 RTMemFree(pVirtio);
1252 return PDMDEV_SET_ERROR(pDevIns, rc,
1253 N_("virtio: cannot register PCI Capabilities address space"));
1254 }
1255 *phVirtio = (VIRTIOHANDLE)pVirtio;
1256 return rc;
1257}
1258
1259#ifdef VBOX_DEVICE_STRUCT_TESTCASE
1260# define virtioDumpState(x, s) do {} while (0)
1261#else
1262# ifdef DEBUG
1263
1264static void virtioDumpState(PVIRTIOSTATE pVirtio, const char *pcszCaller)
1265{
1266 Log2Func(("(called from %s)\n"
1267 " uDeviceFeatures = 0x%08x\n uDriverFeatures = 0x%08x\n"
1268 " uDeviceFeaturesSelect = 0x%04x\n uGuestFeaturesSelect = 0x%04x\n"
1269 " uDeviceStatus = 0x%02x\n uConfigGeneration = 0x%02x\n"
1270 " uQueueSelect = 0x%04x\n uNumQueues = 0x%04x\n"
1271 " uISR = 0x%02x\n fGenUpdatePending = 0x%02x\n"
1272 " uPciCfgDataOff = 0x%02x\n pGcPhysPciCapBase = %RGp\n"
1273 " pGcPhysCommonCfg = %RGp\n pGcPhysNotifyCap = %RGp\n"
1274 " pGcPhysIsrCap = %RGp\n pGcPhysDeviceCap = %RGp\n"
1275 " pDevSpecificCap = %p\n cbDevSpecificCap = 0x%04x\n"
1276 " pfnVirtioStatusChanged = %p\n pfnVirtioQueueNotified = %p\n"
1277 " pfnVirtioDevCapRead = %p\n pfnVirtioDevCapWrite = %p\n"
1278 " pfnSSMDevLiveExec = %p\n pfnSSMDevSaveExec = %p\n"
1279 " pfnSSMDevLoadExec = %p\n pfnSSMDevLoadDone = %p\n"
1280 " pfnPciConfigReadOld = %p\n pfnPciConfigWriteOld = %p\n",
1281 pcszCaller ? pcszCaller : "<unspecified>",
1282 pVirtio->uDeviceFeatures, pVirtio->uDriverFeatures, pVirtio->uDeviceFeaturesSelect,
1283 pVirtio->uDriverFeaturesSelect, pVirtio->uDeviceStatus, pVirtio->uConfigGeneration,
1284 pVirtio->uQueueSelect, pVirtio->uNumQueues, pVirtio->uISR, pVirtio->fGenUpdatePending,
1285 pVirtio->uPciCfgDataOff, pVirtio->pGcPhysPciCapBase, pVirtio->pGcPhysCommonCfg,
1286 pVirtio->pGcPhysNotifyCap, pVirtio->pGcPhysIsrCap, pVirtio->pGcPhysDeviceCap,
1287 pVirtio->pDevSpecificCfg, pVirtio->cbDevSpecificCfg, pVirtio->virtioCallbacks.pfnVirtioStatusChanged,
1288 pVirtio->virtioCallbacks.pfnVirtioQueueNotified, pVirtio->virtioCallbacks.pfnVirtioDevCapRead,
1289 pVirtio->virtioCallbacks.pfnVirtioDevCapWrite, pVirtio->virtioCallbacks.pfnSSMDevLiveExec,
1290 pVirtio->virtioCallbacks.pfnSSMDevSaveExec, pVirtio->virtioCallbacks.pfnSSMDevLoadExec,
1291 pVirtio->virtioCallbacks.pfnSSMDevLoadDone, pVirtio->pfnPciConfigReadOld,
1292 pVirtio->pfnPciConfigWriteOld
1293 ));
1294
1295 for (uint16_t i = 0; i < pVirtio->uNumQueues; i++)
1296 {
1297 Log2Func(("%s queue:\n",
1298 " virtqProxy[%u].uAvailIdx = %u\n virtqProxy[%u].uUsedIdx = %u\n"
1299 " uQueueSize[%u] = %u\n uQueueNotifyOff[%u] = %04x\n"
1300 " uQueueMsixVector[%u] = %04x\n uQueueEnable[%u] = %04x\n"
1301 " pGcPhysQueueDesc[%u] = %RGp\n pGcPhysQueueAvail[%u] = %RGp\n"
1302 " pGcPhysQueueUsed[%u] = %RGp\n",
1303 i, pVirtio->virtqProxy[i].szVirtqName, i, pVirtio->virtqProxy[i].uAvailIdx,
1304 i, pVirtio->virtqProxy[i].uUsedIdx, i, pVirtio->uQueueSize[i],
1305 i, pVirtio->uQueueNotifyOff[i],i, pVirtio->uQueueMsixVector[i],
1306 i, pVirtio->uQueueEnable[i], i, pVirtio->pGcPhysQueueDesc[i],
1307 i, pVirtio->pGcPhysQueueAvail[i], i, pVirtio->pGcPhysQueueUsed[i]
1308 ));
1309 }
1310}
1311# endif
1312#endif
1313
1314#ifdef IN_RING3
1315
1316 /** @callback_method_impl{FNSSMDEVSAVEEXEC} */
1317static DECLCALLBACK(int) virtioR3SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
1318{
1319 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
1320
1321 int rc = VINF_SUCCESS;
1322 virtioDumpState(pVirtio, "virtioSaveExec");
1323
1324 rc = SSMR3PutBool(pSSM, pVirtio->fGenUpdatePending);
1325 rc = SSMR3PutU8(pSSM, pVirtio->uDeviceStatus);
1326 rc = SSMR3PutU8(pSSM, pVirtio->uConfigGeneration);
1327 rc = SSMR3PutU8(pSSM, pVirtio->uPciCfgDataOff);
1328 rc = SSMR3PutU8(pSSM, pVirtio->uISR);
1329 rc = SSMR3PutU16(pSSM, pVirtio->uQueueSelect);
1330 rc = SSMR3PutU32(pSSM, pVirtio->uDeviceFeaturesSelect);
1331 rc = SSMR3PutU32(pSSM, pVirtio->uDriverFeaturesSelect);
1332 rc = SSMR3PutU32(pSSM, pVirtio->uNumQueues);
1333 rc = SSMR3PutU32(pSSM, pVirtio->cbDevSpecificCfg);
1334 rc = SSMR3PutU64(pSSM, pVirtio->uDeviceFeatures);
1335 rc = SSMR3PutU64(pSSM, pVirtio->uDriverFeatures);
1336 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->pDevSpecificCfg);
1337 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnVirtioStatusChanged);
1338 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnVirtioQueueNotified);
1339 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnVirtioDevCapRead);
1340 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnVirtioDevCapWrite);
1341 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnSSMDevLiveExec);
1342 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnSSMDevSaveExec);
1343 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnSSMDevLoadExec);
1344 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->virtioCallbacks.pfnSSMDevLoadDone);
1345 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->pfnPciConfigReadOld);
1346 rc = SSMR3PutU64(pSSM, (uint64_t)pVirtio->pfnPciConfigWriteOld);
1347 rc = SSMR3PutGCPhys(pSSM, pVirtio->pGcPhysCommonCfg);
1348 rc = SSMR3PutGCPhys(pSSM, pVirtio->pGcPhysNotifyCap);
1349 rc = SSMR3PutGCPhys(pSSM, pVirtio->pGcPhysIsrCap);
1350 rc = SSMR3PutGCPhys(pSSM, pVirtio->pGcPhysDeviceCap);
1351 rc = SSMR3PutGCPhys(pSSM, pVirtio->pGcPhysPciCapBase);
1352
1353 for (uint16_t i = 0; i < pVirtio->uNumQueues; i++)
1354 {
1355 rc = SSMR3PutGCPhys64(pSSM, pVirtio->pGcPhysQueueDesc[i]);
1356 rc = SSMR3PutGCPhys64(pSSM, pVirtio->pGcPhysQueueAvail[i]);
1357 rc = SSMR3PutGCPhys64(pSSM, pVirtio->pGcPhysQueueUsed[i]);
1358 rc = SSMR3PutU16(pSSM, pVirtio->uQueueNotifyOff[i]);
1359 rc = SSMR3PutU16(pSSM, pVirtio->uQueueMsixVector[i]);
1360 rc = SSMR3PutU16(pSSM, pVirtio->uQueueEnable[i]);
1361 rc = SSMR3PutU16(pSSM, pVirtio->uQueueSize[i]);
1362 rc = SSMR3PutU16(pSSM, pVirtio->virtqProxy[i].uAvailIdx);
1363 rc = SSMR3PutU16(pSSM, pVirtio->virtqProxy[i].uUsedIdx);
1364 rc = SSMR3PutMem(pSSM, pVirtio->virtqProxy[i].szVirtqName, 32);
1365 }
1366
1367 rc = pVirtio->virtioCallbacks.pfnSSMDevSaveExec(pDevIns, pSSM);
1368 return rc;
1369}
1370
1371 /** @callback_method_impl{FNSSMDEVLOADEXEC} */
1372static DECLCALLBACK(int) virtioR3LoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
1373{
1374 RT_NOREF(uVersion);
1375
1376 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
1377
1378 int rc = VINF_SUCCESS;
1379 virtioDumpState(pVirtio, "virtioLoadExec");
1380
1381 if (uPass == SSM_PASS_FINAL)
1382 {
1383 rc = SSMR3GetBool(pSSM, &pVirtio->fGenUpdatePending);
1384 rc = SSMR3GetU8(pSSM, &pVirtio->uDeviceStatus);
1385 rc = SSMR3GetU8(pSSM, &pVirtio->uConfigGeneration);
1386 rc = SSMR3GetU8(pSSM, &pVirtio->uPciCfgDataOff);
1387 rc = SSMR3GetU8(pSSM, &pVirtio->uISR);
1388 rc = SSMR3GetU16(pSSM, &pVirtio->uQueueSelect);
1389 rc = SSMR3GetU32(pSSM, &pVirtio->uDeviceFeaturesSelect);
1390 rc = SSMR3GetU32(pSSM, &pVirtio->uDriverFeaturesSelect);
1391 rc = SSMR3GetU32(pSSM, &pVirtio->uNumQueues);
1392 rc = SSMR3GetU32(pSSM, &pVirtio->cbDevSpecificCfg);
1393 rc = SSMR3GetU64(pSSM, &pVirtio->uDeviceFeatures);
1394 rc = SSMR3GetU64(pSSM, &pVirtio->uDriverFeatures);
1395 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->pDevSpecificCfg);
1396 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnVirtioStatusChanged);
1397 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnVirtioQueueNotified);
1398 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnVirtioDevCapRead);
1399 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnVirtioDevCapWrite);
1400 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnSSMDevLiveExec);
1401 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnSSMDevSaveExec);
1402 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnSSMDevLoadExec);
1403 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->virtioCallbacks.pfnSSMDevLoadDone);
1404 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->pfnPciConfigReadOld);
1405 rc = SSMR3GetU64(pSSM, (uint64_t *)&pVirtio->pfnPciConfigWriteOld);
1406 rc = SSMR3GetGCPhys(pSSM, &pVirtio->pGcPhysCommonCfg);
1407 rc = SSMR3GetGCPhys(pSSM, &pVirtio->pGcPhysNotifyCap);
1408 rc = SSMR3GetGCPhys(pSSM, &pVirtio->pGcPhysIsrCap);
1409 rc = SSMR3GetGCPhys(pSSM, &pVirtio->pGcPhysDeviceCap);
1410 rc = SSMR3GetGCPhys(pSSM, &pVirtio->pGcPhysPciCapBase);
1411
1412 for (uint16_t i = 0; i < pVirtio->uNumQueues; i++)
1413 {
1414 rc = SSMR3GetGCPhys64(pSSM, &pVirtio->pGcPhysQueueDesc[i]);
1415 rc = SSMR3GetGCPhys64(pSSM, &pVirtio->pGcPhysQueueAvail[i]);
1416 rc = SSMR3GetGCPhys64(pSSM, &pVirtio->pGcPhysQueueUsed[i]);
1417 rc = SSMR3GetU16(pSSM, &pVirtio->uQueueNotifyOff[i]);
1418 rc = SSMR3GetU16(pSSM, &pVirtio->uQueueMsixVector[i]);
1419 rc = SSMR3GetU16(pSSM, &pVirtio->uQueueEnable[i]);
1420 rc = SSMR3GetU16(pSSM, &pVirtio->uQueueSize[i]);
1421 rc = SSMR3GetU16(pSSM, &pVirtio->virtqProxy[i].uAvailIdx);
1422 rc = SSMR3GetU16(pSSM, &pVirtio->virtqProxy[i].uUsedIdx);
1423 rc = SSMR3GetMem(pSSM, (void *)&pVirtio->virtqProxy[i].szVirtqName, 32);
1424 }
1425 }
1426
1427 rc = pVirtio->virtioCallbacks.pfnSSMDevLoadExec(pDevIns, pSSM, uVersion, uPass);
1428
1429 return rc;
1430}
1431
1432/** @callback_method_impl{FNSSMDEVLOADDONE} */
1433static DECLCALLBACK(int) virtioR3LoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
1434{
1435 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
1436
1437 int rc = VINF_SUCCESS;
1438 virtioDumpState(pVirtio, "virtioLoadDone");
1439
1440 rc = pVirtio->virtioCallbacks.pfnSSMDevLoadDone(pDevIns, pSSM);
1441
1442 return rc;
1443}
1444
1445/** @callback_method_impl{FNSSMDEVLIVEEXEC} */
1446static DECLCALLBACK(int) virtioR3LiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
1447{
1448 PVIRTIOSTATE pVirtio = *PDMINS_2_DATA(pDevIns, PVIRTIOSTATE *);
1449
1450 int rc = VINF_SUCCESS;
1451 virtioDumpState(pVirtio, "virtioLiveExec");
1452
1453 rc = pVirtio->virtioCallbacks.pfnSSMDevLiveExec(pDevIns, pSSM, uPass);
1454
1455 return rc;
1456
1457}
1458
1459
1460#endif /* IN_RING3 */
1461
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette