VirtualBox

source: vbox/trunk/src/VBox/Devices/Bus/MsiCommon.cpp@ 63845

Last change on this file since 63845 was 63562, checked in by vboxsync, 8 years ago

scm: cleaning up todos

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 10.7 KB
Line 
1/* $Id: MsiCommon.cpp 63562 2016-08-16 14:04:03Z vboxsync $ */
2/** @file
3 * MSI support routines
4 */
5
6/*
7 * Copyright (C) 2010-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17#define LOG_GROUP LOG_GROUP_DEV_PCI
18/* Hack to get PCIDEVICEINT declare at the right point - include "PCIInternal.h". */
19#define PCI_INCLUDE_PRIVATE
20#include <VBox/pci.h>
21#include <VBox/msi.h>
22#include <VBox/vmm/pdmdev.h>
23#include <VBox/log.h>
24
25#include "MsiCommon.h"
26
27DECLINLINE(uint16_t) msiGetMessageControl(PPCIDEVICE pDev)
28{
29 uint32_t idxMessageControl = pDev->Int.s.u8MsiCapOffset + VBOX_MSI_CAP_MESSAGE_CONTROL;
30#ifdef IN_RING3
31 if (pciDevIsPassthrough(pDev)) {
32 return pDev->Int.s.pfnConfigRead(pDev, idxMessageControl, 2);
33 }
34#endif
35 return PCIDevGetWord(pDev, idxMessageControl);
36}
37
38DECLINLINE(bool) msiIs64Bit(PPCIDEVICE pDev)
39{
40 return pciDevIsMsi64Capable(pDev);
41}
42
43DECLINLINE(uint32_t*) msiGetMaskBits(PPCIDEVICE pDev)
44{
45 uint8_t iOff = msiIs64Bit(pDev) ? VBOX_MSI_CAP_MASK_BITS_64 : VBOX_MSI_CAP_MASK_BITS_32;
46 /* passthrough devices may have no masked/pending support */
47 if (iOff >= pDev->Int.s.u8MsiCapSize)
48 return NULL;
49 iOff += pDev->Int.s.u8MsiCapOffset;
50 return (uint32_t*)(pDev->config + iOff);
51}
52
53DECLINLINE(uint32_t*) msiGetPendingBits(PPCIDEVICE pDev)
54{
55 uint8_t iOff = msiIs64Bit(pDev) ? VBOX_MSI_CAP_PENDING_BITS_64 : VBOX_MSI_CAP_PENDING_BITS_32;
56 /* passthrough devices may have no masked/pending support */
57 if (iOff >= pDev->Int.s.u8MsiCapSize)
58 return NULL;
59 iOff += pDev->Int.s.u8MsiCapOffset;
60 return (uint32_t*)(pDev->config + iOff);
61}
62
63DECLINLINE(bool) msiIsEnabled(PPCIDEVICE pDev)
64{
65 return (msiGetMessageControl(pDev) & VBOX_PCI_MSI_FLAGS_ENABLE) != 0;
66}
67
68DECLINLINE(uint8_t) msiGetMme(PPCIDEVICE pDev)
69{
70 return (msiGetMessageControl(pDev) & VBOX_PCI_MSI_FLAGS_QSIZE) >> 4;
71}
72
73DECLINLINE(RTGCPHYS) msiGetMsiAddress(PPCIDEVICE pDev)
74{
75 if (msiIs64Bit(pDev))
76 {
77 uint32_t lo = PCIDevGetDWord(pDev, pDev->Int.s.u8MsiCapOffset + VBOX_MSI_CAP_MESSAGE_ADDRESS_LO);
78 uint32_t hi = PCIDevGetDWord(pDev, pDev->Int.s.u8MsiCapOffset + VBOX_MSI_CAP_MESSAGE_ADDRESS_HI);
79 return RT_MAKE_U64(lo, hi);
80 }
81 else
82 {
83 return PCIDevGetDWord(pDev, pDev->Int.s.u8MsiCapOffset + VBOX_MSI_CAP_MESSAGE_ADDRESS_32);
84 }
85}
86
87DECLINLINE(uint32_t) msiGetMsiData(PPCIDEVICE pDev, int32_t iVector)
88{
89 int16_t iOff = msiIs64Bit(pDev) ? VBOX_MSI_CAP_MESSAGE_DATA_64 : VBOX_MSI_CAP_MESSAGE_DATA_32;
90 uint16_t lo = PCIDevGetWord(pDev, pDev->Int.s.u8MsiCapOffset + iOff);
91
92 // vector encoding into lower bits of message data
93 uint8_t bits = msiGetMme(pDev);
94 uint16_t uMask = ((1 << bits) - 1);
95 lo &= ~uMask;
96 lo |= iVector & uMask;
97
98 return RT_MAKE_U32(lo, 0);
99}
100
101#ifdef IN_RING3
102
103DECLINLINE(bool) msiBitJustCleared(uint32_t uOldValue,
104 uint32_t uNewValue,
105 uint32_t uMask)
106{
107 return (!!(uOldValue & uMask) && !(uNewValue & uMask));
108}
109
110DECLINLINE(bool) msiBitJustSet(uint32_t uOldValue,
111 uint32_t uNewValue,
112 uint32_t uMask)
113{
114 return (!(uOldValue & uMask) && !!(uNewValue & uMask));
115}
116
117void MsiPciConfigWrite(PPDMDEVINS pDevIns, PCPDMPCIHLP pPciHlp, PPCIDEVICE pDev,
118 uint32_t u32Address, uint32_t val, unsigned len)
119{
120 int32_t iOff = u32Address - pDev->Int.s.u8MsiCapOffset;
121 Assert(iOff >= 0 && (pciDevIsMsiCapable(pDev) && iOff < pDev->Int.s.u8MsiCapSize));
122
123 Log2(("MsiPciConfigWrite: %d <- %x (%d)\n", iOff, val, len));
124
125 uint32_t uAddr = u32Address;
126 bool f64Bit = msiIs64Bit(pDev);
127
128 for (uint32_t i = 0; i < len; i++)
129 {
130 uint32_t reg = i + iOff;
131 uint8_t u8Val = (uint8_t)val;
132 switch (reg)
133 {
134 case 0: /* Capability ID, ro */
135 case 1: /* Next pointer, ro */
136 break;
137 case VBOX_MSI_CAP_MESSAGE_CONTROL:
138 /* don't change read-only bits: 1-3,7 */
139 u8Val &= UINT8_C(~0x8e);
140 pDev->config[uAddr] = u8Val | (pDev->config[uAddr] & UINT8_C(0x8e));
141 break;
142 case VBOX_MSI_CAP_MESSAGE_CONTROL + 1:
143 /* don't change read-only bit 8, and reserved 9-15 */
144 break;
145 default:
146 if (pDev->config[uAddr] != u8Val)
147 {
148 int32_t maskUpdated = -1;
149
150 /* If we're enabling masked vector, and have pending messages
151 for this vector, we have to send this message now */
152 if ( !f64Bit
153 && (reg >= VBOX_MSI_CAP_MASK_BITS_32)
154 && (reg < VBOX_MSI_CAP_MASK_BITS_32 + 4)
155 )
156 {
157 maskUpdated = reg - VBOX_MSI_CAP_MASK_BITS_32;
158 }
159 if ( f64Bit
160 && (reg >= VBOX_MSI_CAP_MASK_BITS_64)
161 && (reg < VBOX_MSI_CAP_MASK_BITS_64 + 4)
162 )
163 {
164 maskUpdated = reg - VBOX_MSI_CAP_MASK_BITS_64;
165 }
166
167 if (maskUpdated != -1 && msiIsEnabled(pDev))
168 {
169 uint32_t* puPending = msiGetPendingBits(pDev);
170 for (int iBitNum = 0; iBitNum < 8; iBitNum++)
171 {
172 int32_t iBit = 1 << iBitNum;
173 uint32_t uVector = maskUpdated*8 + iBitNum;
174
175 if (msiBitJustCleared(pDev->config[uAddr], u8Val, iBit))
176 {
177 Log(("msi: mask updated bit %d@%x (%d)\n", iBitNum, uAddr, maskUpdated));
178
179 /* To ensure that we're no longer masked */
180 pDev->config[uAddr] &= ~iBit;
181 if ((*puPending & (1 << uVector)) != 0)
182 {
183 Log(("msi: notify earlier masked pending vector: %d\n", uVector));
184 MsiNotify(pDevIns, pPciHlp, pDev, uVector, PDM_IRQ_LEVEL_HIGH, 0 /*uTagSrc*/);
185 }
186 }
187 if (msiBitJustSet(pDev->config[uAddr], u8Val, iBit))
188 {
189 Log(("msi: mask vector: %d\n", uVector));
190 }
191 }
192 }
193
194 pDev->config[uAddr] = u8Val;
195 }
196 }
197 uAddr++;
198 val >>= 8;
199 }
200}
201
202uint32_t MsiPciConfigRead (PPDMDEVINS pDevIns, PPCIDEVICE pDev, uint32_t u32Address, unsigned len)
203{
204 RT_NOREF1(pDevIns);
205#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
206 int32_t off = u32Address - pDev->Int.s.u8MsiCapOffset;
207 Assert(off >= 0 && (pciDevIsMsiCapable(pDev) && off < pDev->Int.s.u8MsiCapSize));
208#endif
209 uint32_t rv = 0;
210
211 switch (len)
212 {
213 case 1:
214 rv = PCIDevGetByte(pDev, u32Address);
215 break;
216 case 2:
217 rv = PCIDevGetWord(pDev, u32Address);
218 break;
219 case 4:
220 rv = PCIDevGetDWord(pDev, u32Address);
221 break;
222 default:
223 Assert(false);
224 }
225
226 Log2(("MsiPciConfigRead: %d (%d) -> %x\n", off, len, rv));
227
228 return rv;
229}
230
231int MsiInit(PPCIDEVICE pDev, PPDMMSIREG pMsiReg)
232{
233 if (pMsiReg->cMsiVectors == 0)
234 return VINF_SUCCESS;
235
236 /* XXX: done in pcirawAnalyzePciCaps() */
237 if (pciDevIsPassthrough(pDev))
238 return VINF_SUCCESS;
239
240 uint16_t cVectors = pMsiReg->cMsiVectors;
241 uint8_t iCapOffset = pMsiReg->iMsiCapOffset;
242 uint8_t iNextOffset = pMsiReg->iMsiNextOffset;
243 bool f64bit = pMsiReg->fMsi64bit;
244 uint16_t iFlags = 0;
245 int iMmc;
246
247 /* Compute multiple-message capable bitfield */
248 for (iMmc = 0; iMmc < 6; iMmc++)
249 {
250 if ((1 << iMmc) >= cVectors)
251 break;
252 }
253
254 if ((cVectors > VBOX_MSI_MAX_ENTRIES) || (1 << iMmc) < cVectors)
255 return VERR_TOO_MUCH_DATA;
256
257 Assert(iCapOffset != 0 && iCapOffset < 0xff && iNextOffset < 0xff);
258
259 /* We always support per-vector masking */
260 iFlags |= VBOX_PCI_MSI_FLAGS_MASKBIT | iMmc;
261 if (f64bit)
262 iFlags |= VBOX_PCI_MSI_FLAGS_64BIT;
263 /* How many vectors we're capable of */
264 iFlags |= iMmc;
265
266 pDev->Int.s.u8MsiCapOffset = iCapOffset;
267 pDev->Int.s.u8MsiCapSize = f64bit ? VBOX_MSI_CAP_SIZE_64 : VBOX_MSI_CAP_SIZE_32;
268
269 PCIDevSetByte(pDev, iCapOffset + 0, VBOX_PCI_CAP_ID_MSI);
270 PCIDevSetByte(pDev, iCapOffset + 1, iNextOffset); /* next */
271 PCIDevSetWord(pDev, iCapOffset + VBOX_MSI_CAP_MESSAGE_CONTROL, iFlags);
272
273 *msiGetMaskBits(pDev) = 0;
274 *msiGetPendingBits(pDev) = 0;
275
276 pciDevSetMsiCapable(pDev);
277
278 return VINF_SUCCESS;
279}
280
281#endif /* IN_RING3 */
282
283
284bool MsiIsEnabled(PPCIDEVICE pDev)
285{
286 return pciDevIsMsiCapable(pDev) && msiIsEnabled(pDev);
287}
288
289void MsiNotify(PPDMDEVINS pDevIns, PCPDMPCIHLP pPciHlp, PPCIDEVICE pDev, int iVector, int iLevel, uint32_t uTagSrc)
290{
291 AssertMsg(msiIsEnabled(pDev), ("Must be enabled to use that"));
292
293 uint32_t uMask;
294 uint32_t *puPending = msiGetPendingBits(pDev);
295 if (puPending)
296 {
297 uint32_t *puMask = msiGetMaskBits(pDev);
298 AssertPtr(puMask);
299 uMask = *puMask;
300 LogFlow(("MsiNotify: %d pending=%x mask=%x\n", iVector, *puPending, uMask));
301 }
302 else
303 {
304 uMask = 0;
305 LogFlow(("MsiNotify: %d\n", iVector));
306 }
307
308 /* We only trigger MSI on level up */
309 if ((iLevel & PDM_IRQ_LEVEL_HIGH) == 0)
310 {
311 /** @todo maybe clear pending interrupts on level down? */
312#if 0
313 if (puPending)
314 {
315 *puPending &= ~(1<<iVector);
316 LogFlow(("msi: clear pending %d, now %x\n", iVector, *puPending));
317 }
318#endif
319 return;
320 }
321
322 if ((uMask & (1<<iVector)) != 0)
323 {
324 *puPending |= (1<<iVector);
325 LogFlow(("msi: %d is masked, mark pending, now %x\n", iVector, *puPending));
326 return;
327 }
328
329 RTGCPHYS GCAddr = msiGetMsiAddress(pDev);
330 uint32_t u32Value = msiGetMsiData(pDev, iVector);
331
332 if (puPending)
333 *puPending &= ~(1<<iVector);
334
335 Assert(pPciHlp->pfnIoApicSendMsi != NULL);
336 pPciHlp->pfnIoApicSendMsi(pDevIns, GCAddr, u32Value, uTagSrc);
337}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette