Changeset 49507 in vbox
- Timestamp:
- Nov 15, 2013 3:20:59 PM (11 years ago)
- Location:
- trunk
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/VBoxVideo.h
r49474 r49507 1578 1578 /* allocation paging fill request */ 1579 1579 #define VBOXCMDVBVA_OPTYPE_PAGING_FILL 5 1580 /* same as VBOXCMDVBVA_OPTYPE_NOP, but contains VBOXCMDVBVA_HDR data */ 1581 #define VBOXCMDVBVA_OPTYPE_NOPCMD 6 1582 1580 1583 /* nop - is a one-bit command. The buffer size to skip is determined by VBVA buffer size */ 1581 1584 #define VBOXCMDVBVA_OPTYPE_NOP 0x80 -
trunk/src/VBox/Devices/Graphics/DevVGA.cpp
r49420 r49507 4970 4970 #endif 4971 4971 4972 vboxCmdVBVACmdTimer(pThis); 4972 4973 } 4973 4974 -
trunk/src/VBox/Devices/Graphics/DevVGA.h
r49434 r49507 552 552 # endif /* VBOX_WITH_VDMA */ 553 553 554 int vboxCmdVBVAEnable(PVGASTATE pVGAState, VBVABUFFER *pVBVA); 555 int vboxCmdVBVADisable(PVGASTATE pVGAState); 556 int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState); 557 int vboxCmdVBVACmdFlush(PVGASTATE pVGAState); 558 void vboxCmdVBVACmdTimer(PVGASTATE pVGAState); 559 554 560 #endif /* VBOX_WITH_HGSMI */ 555 561 -
trunk/src/VBox/Devices/Graphics/DevVGA_VBVA.cpp
r49484 r49507 1891 1891 } 1892 1892 1893 void VBVARaiseIrqNoWait(PVGASTATE pVGAState, uint32_t fFlags) 1894 { 1895 PPDMDEVINS pDevIns = pVGAState->pDevInsR3; 1896 PDMCritSectEnter(&pVGAState->CritSect, VERR_SEM_BUSY); 1897 1898 HGSMISetHostGuestFlags(pVGAState->pHGSMI, HGSMIHOSTFLAGS_IRQ | fFlags); 1899 PDMDevHlpPCISetIrqNoWait(pDevIns, 0, PDM_IRQ_LEVEL_HIGH); 1900 1901 PDMCritSectLeave(&pVGAState->CritSect); 1902 } 1903 1904 1893 1905 /* 1894 1906 * … … 2242 2254 2243 2255 case VBVA_CMDVBVA_ENABLE: 2256 { 2257 if (cbBuffer < sizeof (VBVAENABLE)) 2258 { 2259 rc = VERR_INVALID_PARAMETER; 2260 break; 2261 } 2262 2263 VBVAENABLE *pEnable = (VBVAENABLE *)pvBuffer; 2264 2265 if ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE) 2266 { 2267 uint32_t u32Offset = pEnable->u32Offset; 2268 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost (pIns, u32Offset); 2269 2270 if (pVBVA) 2271 rc = vboxCmdVBVAEnable(pVGAState, pVBVA); 2272 else 2273 { 2274 LogRel(("Invalid VBVABUFFER offset 0x%x!!!\n", 2275 pEnable->u32Offset)); 2276 rc = VERR_INVALID_PARAMETER; 2277 } 2278 } 2279 else if ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_DISABLE) 2280 { 2281 rc = vboxCmdVBVADisable(pVGAState); 2282 } 2283 else 2284 { 2285 LogRel(("Invalid VBVA_ENABLE flags 0x%x!!!\n", pEnable->u32Flags)); 2286 rc = VERR_INVALID_PARAMETER; 2287 } 2288 2289 pEnable->i32Result = rc; 2290 break; 2291 } 2244 2292 case VBVA_CMDVBVA_SUBMIT: 2293 { 2294 rc = vboxCmdVBVACmdSubmit(pVGAState); 2295 break; 2296 } 2245 2297 case VBVA_CMDVBVA_FLUSH: 2246 2298 { 2247 /* implement */2248 }break;2249 2299 rc =vboxCmdVBVACmdFlush(pVGAState); 2300 break; 2301 } 2250 2302 case VBVA_SCANLINE_CFG: 2251 2303 { -
trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp
r49474 r49507 28 28 #include <VBox/VBoxVideo3D.h> 29 29 30 #ifdef DEBUG_misha 31 #define WARN_BP() do { AssertFailed(); } while (0) 32 #else 33 #define WARN_BP() do { } while (0) 34 #endif 35 #define WARN(_msg) do { \ 36 LogRel(_msg); \ 37 WARN_BP(); \ 38 } while (0) 39 30 40 #ifdef VBOX_VDMA_WITH_WORKERTHREAD 31 41 typedef enum … … 81 91 #endif 82 92 93 94 /* state transformations: 95 * 96 * submitter | processor 97 * STOPPED 98 * | 99 * | 100 * > 101 * LISTENING ---> PROCESSING 102 * ^ _/ 103 * | _/ 104 * | _/ 105 * | _/ 106 * | _/ 107 * | _/ 108 * | / 109 * < > 110 * PAUSED 111 * 112 * */ 113 #define VBVAEXHOSTCONTEXT_STATE_STOPPED 0 114 #define VBVAEXHOSTCONTEXT_STATE_LISTENING 1 115 #define VBVAEXHOSTCONTEXT_STATE_PROCESSING 2 116 #define VBVAEXHOSTCONTEXT_STATE_PAUSED 3 117 118 typedef struct VBVAEXHOSTCONTEXT 119 { 120 VBVABUFFER *pVBVA; 121 uint32_t cbCurData; 122 volatile uint32_t u32State; 123 volatile uint32_t u32Pause; 124 volatile uint32_t u32cOtherCommands; 125 } VBVAEXHOSTCONTEXT; 126 127 /* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other, 128 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently. 129 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands 130 * see mor edetailed comments in headers for function definitions */ 131 static bool VBoxVBVAExHPCmdCheckRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva); 132 static int VBoxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd); 133 134 /* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other, 135 /* can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */ 136 static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva); 137 138 static void VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva); 139 static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA); 140 static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva); 141 static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva); 142 static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM); 143 static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version); 144 83 145 typedef struct VBOXVDMAHOST 84 146 { 85 147 PHGSMIINSTANCE pHgsmi; 86 148 PVGASTATE pVGAState; 149 VBVAEXHOSTCONTEXT CmdVbva; 87 150 #ifdef VBOX_VDMA_WITH_WATCHDOG 88 151 PTMTIMERR3 WatchDogTimer; … … 208 271 } 209 272 273 static void vboxVDMACrCmdNotifyPerform(struct VBOXVDMAHOST *pVdma) 274 { 275 PVGASTATE pVGAState = pVdma->pVGAState; 276 pVGAState->pDrv->pfnCrCmdNotifyCmds(pVGAState->pDrv); 277 } 278 279 /* 280 * @returns 281 * 282 */ 283 static int vboxVDMACrCmdPreprocess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd) 284 { 285 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP) 286 return VINF_EOF; 287 288 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd; 289 290 /* check if the command is cancelled */ 291 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED)) 292 { 293 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED); 294 return VINF_EOF; 295 } 296 297 /* come commands can be handled right away? */ 298 switch (pCmd->u8OpCode) 299 { 300 case VBOXCMDVBVA_OPTYPE_NOPCMD: 301 pCmd->i8Result = 0; 302 return VINF_EOF; 303 default: 304 return VINF_SUCCESS; 305 } 306 } 307 210 308 static DECLCALLBACK(int) vboxVDMACrCmdCltCmdGet(HVBOXCRCMDCLT hClt, PVBOXCMDVBVA_HDR *ppNextCmd, uint32_t *pcbNextCmd) 211 309 { 212 return VERR_NOT_IMPLEMENTED; 310 struct VBOXVDMAHOST *pVdma = hClt; 311 312 VBoxVBVAExHPCmdCheckRelease(&pVdma->CmdVbva); 313 314 uint32_t cbCmd; 315 uint8_t *pu8Cmd; 316 317 for(;;) 318 { 319 int rc = VBoxVBVAExHPCmdGet(&pVdma->CmdVbva, &pu8Cmd, &cbCmd); 320 switch (rc) 321 { 322 case VINF_SUCCESS: 323 { 324 rc = vboxVDMACrCmdPreprocess(pVdma, pu8Cmd, cbCmd); 325 switch (rc) 326 { 327 case VINF_SUCCESS: 328 *ppNextCmd = (PVBOXCMDVBVA_HDR)pu8Cmd; 329 *pcbNextCmd = cbCmd; 330 return VINF_SUCCESS; 331 case VINF_EOF: 332 continue; 333 default: 334 Assert(!RT_FAILURE(rc)); 335 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR; 336 } 337 break; 338 } 339 case VINF_EOF: 340 return VINF_EOF; 341 case VINF_PERMISSION_DENIED: 342 /* processing was paused, processing state was released, only VBoxVBVAExHS*** calls are now allowed */ 343 return VINF_EOF; 344 case VINF_INTERRUPTED: 345 /* command processing was interrupted, processor state remains set. client can process any commands */ 346 vboxVDMACrCmdNotifyPerform(pVdma); 347 return VINF_EOF; 348 default: 349 Assert(!RT_FAILURE(rc)); 350 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR; 351 } 352 } 353 354 WARN(("Warning: vboxVDMACrCmdCltCmdGet unexpected state\n")); 355 return VERR_INTERNAL_ERROR; 213 356 } 214 357 … … 1171 1314 #endif 1172 1315 pVGAState->pVdma = pVdma; 1316 VBoxVBVAExHSInit(&pVdma->CmdVbva); 1173 1317 #ifdef VBOX_WITH_CRHGSMI 1174 1318 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */ … … 1197 1341 AssertBreakpoint(); 1198 1342 #endif 1343 VBoxVBVAExHSTerm(&pVdma->CmdVbva); 1199 1344 RTMemFree(pVdma); 1200 1345 return VINF_SUCCESS; … … 1390 1535 #endif 1391 1536 } 1537 1538 /**/ 1539 static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva) 1540 { 1541 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_STOPPED); 1542 1543 uint32_t oldState; 1544 if (!ASMAtomicReadU32(&pCmdVbva->u32Pause)) 1545 { 1546 if (ASMAtomicCmpXchgExU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING, &oldState)) 1547 return VINF_SUCCESS; 1548 return oldState == VBVAEXHOSTCONTEXT_STATE_PROCESSING ? VERR_SEM_BUSY : VERR_INVALID_STATE; 1549 } 1550 return VERR_INVALID_STATE; 1551 } 1552 1553 static bool vboxVBVAExHPCheckPause(struct VBVAEXHOSTCONTEXT *pCmdVbva) 1554 { 1555 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING); 1556 1557 if (!ASMAtomicReadU32(&pCmdVbva->u32Pause)) 1558 return false; 1559 1560 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_PAUSED); 1561 return true; 1562 } 1563 1564 static bool vboxVBVAExHPCheckOtherCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva) 1565 { 1566 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING); 1567 1568 return !!ASMAtomicUoReadU32(&pCmdVbva->u32cOtherCommands); 1569 } 1570 1571 static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva) 1572 { 1573 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING); 1574 1575 if (!vboxVBVAExHPCheckPause(pCmdVbva)) 1576 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_LISTENING); 1577 else 1578 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_PAUSED); 1579 } 1580 1581 static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva) 1582 { 1583 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING); 1584 1585 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING); 1586 } 1587 1588 static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva) 1589 { 1590 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING); 1591 1592 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING); 1593 } 1594 1595 static bool vboxVBVAExHPCmdCheckRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva) 1596 { 1597 if (!pCmdVbva->cbCurData) 1598 return false; 1599 1600 VBVABUFFER *pVBVA = pCmdVbva->pVBVA; 1601 pVBVA->off32Data = (pVBVA->off32Data + pCmdVbva->cbCurData) % pVBVA->cbData; 1602 1603 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords); 1604 1605 pCmdVbva->cbCurData = 0; 1606 1607 return true; 1608 } 1609 1610 static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd) 1611 { 1612 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING); 1613 1614 VBVABUFFER *pVBVA = pCmdVbva->pVBVA; 1615 1616 uint32_t indexRecordFirst = pVBVA->indexRecordFirst; 1617 uint32_t indexRecordFree = pVBVA->indexRecordFree; 1618 1619 Log(("first = %d, free = %d\n", 1620 indexRecordFirst, indexRecordFree)); 1621 1622 if (indexRecordFirst == indexRecordFree) 1623 { 1624 /* No records to process. Return without assigning output variables. */ 1625 return VINF_EOF; 1626 } 1627 1628 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord); 1629 1630 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL; 1631 1632 /* A new record need to be processed. */ 1633 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL) 1634 { 1635 Assert(cbRecord == 0); 1636 /* the record is being recorded, try again */ 1637 return VINF_TRY_AGAIN; 1638 } 1639 1640 if (!cbRecord) 1641 { 1642 /* the record is being recorded, try again */ 1643 return VINF_TRY_AGAIN; 1644 } 1645 1646 /* we should not get partial commands here actually */ 1647 Assert(cbRecord); 1648 1649 /* The size of largest contiguous chunk in the ring biffer. */ 1650 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data; 1651 1652 /* The pointer to data in the ring buffer. */ 1653 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data]; 1654 1655 /* Fetch or point the data. */ 1656 if (u32BytesTillBoundary >= cbRecord) 1657 { 1658 /* The command does not cross buffer boundary. Return address in the buffer. */ 1659 *ppCmd = pSrc; 1660 *pcbCmd = cbRecord; 1661 pCmdVbva->cbCurData = cbRecord; 1662 return VINF_SUCCESS; 1663 } 1664 1665 LogRel(("CmdVbva: cross-bound writes unsupported\n")); 1666 return VERR_INVALID_STATE; 1667 } 1668 1669 /* Resumes command processing 1670 * @returns - same as VBoxVBVAExHSCheckCommands 1671 */ 1672 static int vboxVBVAExHSResume(struct VBVAEXHOSTCONTEXT *pCmdVbva) 1673 { 1674 Assert(pCmdVbva->u32State != VBVAEXHOSTCONTEXT_STATE_STOPPED); 1675 1676 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_LISTENING); 1677 1678 return VBoxVBVAExHSCheckCommands(pCmdVbva); 1679 } 1680 1681 /* pause the command processing. this will make the processor stop the command processing and release the processing state 1682 * to resume the command processing the vboxVBVAExHSResume must be called */ 1683 static void vboxVBVAExHSPause(struct VBVAEXHOSTCONTEXT *pCmdVbva) 1684 { 1685 Assert(pCmdVbva->u32State != VBVAEXHOSTCONTEXT_STATE_STOPPED); 1686 1687 Assert(!pCmdVbva->u32Pause); 1688 1689 ASMAtomicWriteU32(&pCmdVbva->u32Pause, 1); 1690 1691 for(;;) 1692 { 1693 if (ASMAtomicCmpXchgU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_PAUSED, VBVAEXHOSTCONTEXT_STATE_LISTENING)) 1694 break; 1695 1696 if (ASMAtomicReadU32(&pCmdVbva->u32State) == VBVAEXHOSTCONTEXT_STATE_PAUSED) 1697 break; 1698 1699 RTThreadSleep(2); 1700 } 1701 1702 pCmdVbva->u32Pause = 0; 1703 } 1704 1705 /* releases (completed) the command previously acquired by VBoxVBVAExHCmdGet 1706 * for convenience can be called if no command is currently acquired 1707 * in that case it will do nothing and return false. 1708 * if the completion notification is needed returns true. */ 1709 static bool VBoxVBVAExHPCmdCheckRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva) 1710 { 1711 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING); 1712 1713 return vboxVBVAExHPCmdCheckRelease(pCmdVbva); 1714 } 1715 1716 /* 1717 * @returns 1718 * VINF_SUCCESS - new command is obtained 1719 * VINF_EOF - processor has completed all commands and release the processing state, only VBoxVBVAExHS*** calls are now allowed 1720 * VINF_PERMISSION_DENIED - processing was paused, processing state was released, only VBoxVBVAExHS*** calls are now allowed 1721 * VINF_INTERRUPTED - command processing was interrupted, processor state remains set. client can process any commands, 1722 * and call VBoxVBVAExHPCmdGet again for further processing 1723 * VERR_** - error happened, most likely guest corrupted VBVA data 1724 * 1725 */ 1726 static int VBoxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd) 1727 { 1728 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING); 1729 1730 for(;;) 1731 { 1732 if (vboxVBVAExHPCheckPause(pCmdVbva)) 1733 return VINF_PERMISSION_DENIED; 1734 if (vboxVBVAExHPCheckOtherCommands(pCmdVbva)) 1735 return VINF_INTERRUPTED; 1736 1737 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd); 1738 switch (rc) 1739 { 1740 case VINF_SUCCESS: 1741 return VINF_SUCCESS; 1742 case VINF_EOF: 1743 vboxVBVAExHPHgEventClear(pCmdVbva); 1744 vboxVBVAExHPProcessorRelease(pCmdVbva); 1745 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e. 1746 * 1. we check the queue -> and it is empty 1747 * 2. submitter adds command to the queue 1748 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification 1749 * 4. we clear the "processing" state 1750 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command 1751 * 6. if the queue appears to be not-empty set the "processing" state back to "true" 1752 **/ 1753 if (VBoxVBVAExHSCheckCommands(pCmdVbva) == VINF_SUCCESS) 1754 continue; 1755 return VINF_EOF; 1756 case VINF_TRY_AGAIN: 1757 RTThreadSleep(1); 1758 continue; 1759 default: 1760 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */ 1761 if (RT_FAILURE(rc)) 1762 return rc; 1763 1764 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected success status %d\n", rc)); 1765 return VERR_INTERNAL_ERROR; 1766 } 1767 } 1768 1769 WARN(("Warning: VBoxVBVAExHCmdGet unexpected state\n")); 1770 return VERR_INTERNAL_ERROR; 1771 } 1772 1773 /* Checks whether the new commands are ready for processing 1774 * @returns 1775 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread) 1776 * VINF_EOF - no commands in a queue 1777 * VINF_ALREADY_INITIALIZED - another thread already processing the commands 1778 * VERR_INVALID_STATE - the VBVA is paused or pausing */ 1779 static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva) 1780 { 1781 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) == VBVAEXHOSTCONTEXT_STATE_STOPPED) 1782 return VINF_EOF; 1783 1784 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva); 1785 if (RT_SUCCESS(rc)) 1786 { 1787 /* we are the processor now */ 1788 VBVABUFFER *pVBVA = pCmdVbva->pVBVA; 1789 1790 uint32_t indexRecordFirst = pVBVA->indexRecordFirst; 1791 uint32_t indexRecordFree = pVBVA->indexRecordFree; 1792 1793 if (indexRecordFirst != indexRecordFree) 1794 { 1795 vboxVBVAExHPHgEventSet(pCmdVbva); 1796 return VINF_SUCCESS; 1797 } 1798 1799 vboxVBVAExHPProcessorRelease(pCmdVbva); 1800 return VINF_EOF; 1801 } 1802 if (rc == VERR_SEM_BUSY) 1803 return VINF_ALREADY_INITIALIZED; 1804 Assert(rc == VERR_INVALID_STATE); 1805 return VERR_INVALID_STATE; 1806 } 1807 1808 static void VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva) 1809 { 1810 memset(pCmdVbva, 0, sizeof (*pCmdVbva)); 1811 } 1812 1813 static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA) 1814 { 1815 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) != VBVAEXHOSTCONTEXT_STATE_STOPPED) 1816 return VINF_ALREADY_INITIALIZED; 1817 1818 pCmdVbva->pVBVA = pVBVA; 1819 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0; 1820 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_LISTENING); 1821 return VINF_SUCCESS; 1822 } 1823 1824 static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva) 1825 { 1826 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) == VBVAEXHOSTCONTEXT_STATE_STOPPED) 1827 return VINF_SUCCESS; 1828 1829 /* ensure no commands pending and one tries to submit them */ 1830 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva); 1831 if (RT_SUCCESS(rc)) 1832 { 1833 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0; 1834 memset(pCmdVbva, 0, sizeof (*pCmdVbva)); 1835 return VINF_SUCCESS; 1836 } 1837 return VERR_INVALID_STATE; 1838 } 1839 1840 static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva) 1841 { 1842 /* ensure the processor is stopped */ 1843 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) == VBVAEXHOSTCONTEXT_STATE_STOPPED) 1844 return; 1845 1846 /* ensure no one tries to submit the command */ 1847 vboxVBVAExHSPause(pCmdVbva); 1848 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0; 1849 memset(pCmdVbva, 0, sizeof (*pCmdVbva)); 1850 } 1851 1852 /* Saves state 1853 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail 1854 */ 1855 static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM) 1856 { 1857 int rc; 1858 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) != VBVAEXHOSTCONTEXT_STATE_STOPPED) 1859 { 1860 vboxVBVAExHSPause(pCmdVbva); 1861 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pCmdVbva->pVBVA) - pu8VramBase)); 1862 AssertRCReturn(rc, rc); 1863 return vboxVBVAExHSResume(pCmdVbva); 1864 } 1865 1866 rc = SSMR3PutU32(pSSM, 0xffffffff); 1867 AssertRCReturn(rc, rc); 1868 1869 return VINF_EOF; 1870 } 1871 1872 /* Loads state 1873 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail 1874 */ 1875 static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version) 1876 { 1877 uint32_t u32; 1878 int rc = SSMR3GetU32(pSSM, &u32); 1879 AssertRCReturn(rc, rc); 1880 if (u32 != 0xffffffff) 1881 { 1882 VBVABUFFER *pVBVA = (VBVABUFFER*)pu8VramBase + u32; 1883 rc = VBoxVBVAExHSEnable(pCmdVbva, pVBVA); 1884 AssertRCReturn(rc, rc); 1885 return VBoxVBVAExHSCheckCommands(pCmdVbva); 1886 } 1887 1888 return VINF_EOF; 1889 } 1890 1891 int vboxCmdVBVAEnable(PVGASTATE pVGAState, VBVABUFFER *pVBVA) 1892 { 1893 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma; 1894 return VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA); 1895 } 1896 1897 int vboxCmdVBVADisable(PVGASTATE pVGAState) 1898 { 1899 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma; 1900 return VBoxVBVAExHSDisable(&pVdma->CmdVbva); 1901 } 1902 1903 static int vboxCmdVBVACmdSubmitPerform(PVGASTATE pVGAState) 1904 { 1905 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma; 1906 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva); 1907 switch (rc) 1908 { 1909 case VINF_SUCCESS: 1910 return pVGAState->pDrv->pfnCrCmdNotifyCmds(pVGAState->pDrv); 1911 case VINF_ALREADY_INITIALIZED: 1912 case VINF_EOF: 1913 case VERR_INVALID_STATE: 1914 return VINF_SUCCESS; 1915 default: 1916 Assert(!RT_FAILURE(rc)); 1917 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR; 1918 } 1919 } 1920 1921 int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState) 1922 { 1923 return vboxCmdVBVACmdSubmitPerform(pVGAState); 1924 } 1925 1926 int vboxCmdVBVACmdFlush(PVGASTATE pVGAState) 1927 { 1928 return vboxCmdVBVACmdSubmitPerform(pVGAState); 1929 } 1930 1931 void vboxCmdVBVACmdTimer(PVGASTATE pVGAState) 1932 { 1933 vboxCmdVBVACmdSubmitPerform(pVGAState); 1934 }
Note:
See TracChangeset
for help on using the changeset viewer.