Changeset 75520 in vbox for trunk/src/VBox/Devices
- Timestamp:
- Nov 16, 2018 4:06:57 PM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 126678
- Location:
- trunk/src/VBox/Devices/VMMDev
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/VMMDev/VMMDev.cpp
r75500 r75520 1776 1776 * @param tsArrival The STAM_GET_TS() value when the request arrived. 1777 1777 */ 1778 static int vmmdevReqHandler_HGCMCall(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr, RTGCPHYS GCPhysReqHdr, uint64_t tsArrival) 1778 static int vmmdevReqHandler_HGCMCall(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr, RTGCPHYS GCPhysReqHdr, 1779 uint64_t tsArrival, PVMMDEVREQLOCK *ppLock) 1779 1780 { 1780 1781 VMMDevHGCMCall *pReq = (VMMDevHGCMCall *)pReqHdr; … … 1786 1787 Log2(("%.*Rhxd\n", pReq->header.header.size, pReq)); 1787 1788 1788 return vmmdevHGCMCall(pThis, pReq, pReq->header.header.size, GCPhysReqHdr, pReq->header.header.requestType, tsArrival); 1789 return vmmdevHGCMCall(pThis, pReq, pReq->header.header.size, GCPhysReqHdr, pReq->header.header.requestType, 1790 tsArrival, ppLock); 1789 1791 } 1790 1792 … … 2552 2554 2553 2555 /** 2556 * Sets request status to VINF_HGCM_ASYNC_EXECUTE. 2557 * 2558 * @param pThis The VMM device instance data. 2559 * @param GCPhysReqHdr The guest physical address of the request. 2560 * @param ppLock Pointer to the request locking info. NULL if not 2561 * locked. 2562 */ 2563 DECLINLINE(void) vmmdevReqHdrSetHgcmAsyncExecute(PVMMDEV pThis, RTGCPHYS GCPhysReqHdr, PVMMDEVREQLOCK pLock) 2564 { 2565 if (pLock) 2566 ((VMMDevRequestHeader volatile *)pLock->pvReq)->rc = VINF_HGCM_ASYNC_EXECUTE; 2567 else 2568 { 2569 int32_t rcReq = VINF_HGCM_ASYNC_EXECUTE; 2570 PDMDevHlpPhysWrite(pThis->pDevIns, GCPhysReqHdr + RT_UOFFSETOF(VMMDevRequestHeader, rc), &rcReq, sizeof(rcReq)); 2571 } 2572 } 2573 2574 2575 /** @name VMMDEVREQDISP_POST_F_XXX - post dispatcher optimizations. 2576 * @{ */ 2577 #define VMMDEVREQDISP_POST_F_NO_WRITE_OUT RT_BIT_32(0) 2578 /** @} */ 2579 2580 2581 /** 2554 2582 * Dispatch the request to the appropriate handler function. 2555 2583 * … … 2560 2588 * HGCM). 2561 2589 * @param tsArrival The STAM_GET_TS() value when the request arrived. 2562 * @param pfDelayedUnlock Where to indicate whether the critical section exit 2563 * needs to be delayed till after the request has been 2564 * written back. This is a HGCM kludge, see critsect 2565 * work in hgcmCompletedWorker for more details. 2590 * @param pfPostOptimize HGCM optimizations, VMMDEVREQDISP_POST_F_XXX. 2566 2591 */ 2567 2592 static int vmmdevReqDispatcher(PVMMDEV pThis, VMMDevRequestHeader *pReqHdr, RTGCPHYS GCPhysReqHdr, 2568 uint64_t tsArrival, bool *pfDelayedUnlock)2593 uint64_t tsArrival, uint32_t *pfPostOptimize, PVMMDEVREQLOCK *ppLock) 2569 2594 { 2570 2595 int rcRet = VINF_SUCCESS; 2571 *pfDelayedUnlock = false;2596 Assert(*pfPostOptimize == 0); 2572 2597 2573 2598 switch (pReqHdr->requestType) … … 2679 2704 #ifdef VBOX_WITH_HGCM 2680 2705 case VMMDevReq_HGCMConnect: 2706 vmmdevReqHdrSetHgcmAsyncExecute(pThis, GCPhysReqHdr, *ppLock); 2681 2707 pReqHdr->rc = vmmdevReqHandler_HGCMConnect(pThis, pReqHdr, GCPhysReqHdr); 2682 *pfDelayedUnlock = true; 2708 Assert(pReqHdr->rc == VINF_HGCM_ASYNC_EXECUTE || RT_FAILURE_NP(pReqHdr->rc)); 2709 if (RT_SUCCESS(pReqHdr->rc)) 2710 *pfPostOptimize |= VMMDEVREQDISP_POST_F_NO_WRITE_OUT; 2683 2711 break; 2684 2712 2685 2713 case VMMDevReq_HGCMDisconnect: 2714 vmmdevReqHdrSetHgcmAsyncExecute(pThis, GCPhysReqHdr, *ppLock); 2686 2715 pReqHdr->rc = vmmdevReqHandler_HGCMDisconnect(pThis, pReqHdr, GCPhysReqHdr); 2687 *pfDelayedUnlock = true; 2716 Assert(pReqHdr->rc == VINF_HGCM_ASYNC_EXECUTE || RT_FAILURE_NP(pReqHdr->rc)); 2717 if (RT_SUCCESS(pReqHdr->rc)) 2718 *pfPostOptimize |= VMMDEVREQDISP_POST_F_NO_WRITE_OUT; 2688 2719 break; 2689 2720 … … 2694 2725 case VMMDevReq_HGCMCall: 2695 2726 # endif /* VBOX_WITH_64_BITS_GUESTS */ 2696 pReqHdr->rc = vmmdevReqHandler_HGCMCall(pThis, pReqHdr, GCPhysReqHdr, tsArrival); 2697 *pfDelayedUnlock = true; 2727 vmmdevReqHdrSetHgcmAsyncExecute(pThis, GCPhysReqHdr, *ppLock); 2728 pReqHdr->rc = vmmdevReqHandler_HGCMCall(pThis, pReqHdr, GCPhysReqHdr, tsArrival, ppLock); 2729 Assert(pReqHdr->rc == VINF_HGCM_ASYNC_EXECUTE || RT_FAILURE_NP(pReqHdr->rc)); 2730 if (RT_SUCCESS(pReqHdr->rc)) 2731 *pfPostOptimize |= VMMDEVREQDISP_POST_F_NO_WRITE_OUT; 2698 2732 break; 2699 2733 2700 2734 case VMMDevReq_HGCMCancel: 2701 2735 pReqHdr->rc = vmmdevReqHandler_HGCMCancel(pThis, pReqHdr, GCPhysReqHdr); 2702 *pfDelayedUnlock = true;2703 2736 break; 2704 2737 … … 2870 2903 2871 2904 int rcRet = VINF_SUCCESS; 2872 bool fDelayedUnlock = false;2873 2905 VMMDevRequestHeader *pRequestHeader = NULL; 2874 2906 … … 2898 2930 { 2899 2931 memcpy(pRequestHeader, &requestHeader, sizeof(VMMDevRequestHeader)); 2900 size_t cbLeft = requestHeader.size - sizeof(VMMDevRequestHeader); 2932 2933 VMMDEVREQLOCK Lock = { NULL, { NULL, NULL } }; 2934 PVMMDEVREQLOCK pLock = NULL; 2935 size_t cbLeft = requestHeader.size - sizeof(VMMDevRequestHeader); 2901 2936 if (cbLeft) 2902 PDMDevHlpPhysRead(pDevIns, 2903 (RTGCPHYS)u32 + sizeof(VMMDevRequestHeader), 2904 (uint8_t *)pRequestHeader + sizeof(VMMDevRequestHeader), 2905 cbLeft); 2906 2937 { 2938 #if 1 2939 RT_NOREF_PV(Lock); 2940 #else 2941 if ( ( requestHeader.requestType == VMMDevReq_HGCMCall32 2942 || requestHeader.requestType == VMMDevReq_HGCMCall64) 2943 && ((u32 + requestHeader.size) >> X86_PAGE_SHIFT) == (u32 >> X86_PAGE_SHIFT) 2944 && RT_SUCCESS(PDMDevHlpPhysGCPhys2CCPtr(pDevIns, u32, 0 /*fFlags*/, &Lock.pvReq, &Lock.Lock)) ) 2945 { 2946 memcpy((uint8_t *)pRequestHeader + sizeof(VMMDevRequestHeader), 2947 (uint8_t *)Lock.pvReq + sizeof(VMMDevRequestHeader), cbLeft); 2948 pLock = &Lock; 2949 } 2950 else 2951 #endif 2952 PDMDevHlpPhysRead(pDevIns, 2953 (RTGCPHYS)u32 + sizeof(VMMDevRequestHeader), 2954 (uint8_t *)pRequestHeader + sizeof(VMMDevRequestHeader), 2955 cbLeft); 2956 } 2957 2958 uint32_t fPostOptimize = 0; 2907 2959 PDMCritSectEnter(&pThis->CritSect, VERR_IGNORED); 2908 rcRet = vmmdevReqDispatcher(pThis, pRequestHeader, u32, tsArrival, &fDelayedUnlock); 2909 if (!fDelayedUnlock) 2910 PDMCritSectLeave(&pThis->CritSect); 2960 rcRet = vmmdevReqDispatcher(pThis, pRequestHeader, u32, tsArrival, &fPostOptimize, &pLock); 2961 PDMCritSectLeave(&pThis->CritSect); 2962 2963 /* 2964 * Write the result back to guest memory (unless it is a locked HGCM call). 2965 */ 2966 if (!(fPostOptimize & VMMDEVREQDISP_POST_F_NO_WRITE_OUT)) 2967 { 2968 if (pLock) 2969 memcpy(pLock->pvReq, pRequestHeader, pRequestHeader->size); 2970 else 2971 PDMDevHlpPhysWrite(pDevIns, u32, pRequestHeader, pRequestHeader->size); 2972 } 2973 2974 RTMemFree(pRequestHeader); 2975 return rcRet; 2911 2976 } 2912 else 2913 { 2914 Log(("VMMDev: RTMemAlloc failed!\n")); 2915 requestHeader.rc = VERR_NO_MEMORY; 2916 } 2977 2978 Log(("VMMDev: RTMemAlloc failed!\n")); 2979 requestHeader.rc = VERR_NO_MEMORY; 2917 2980 } 2918 2981 else … … 2930 2993 2931 2994 /* 2932 * Write the result back to guest memory 2995 * Write the result back to guest memory. 2933 2996 */ 2934 if (pRequestHeader) 2935 { 2936 PDMDevHlpPhysWrite(pDevIns, u32, pRequestHeader, pRequestHeader->size); 2937 if (fDelayedUnlock) 2938 PDMCritSectLeave(&pThis->CritSect); 2939 RTMemFree(pRequestHeader); 2940 } 2941 else 2942 { 2943 /* early error case; write back header only */ 2944 PDMDevHlpPhysWrite(pDevIns, u32, &requestHeader, sizeof(requestHeader)); 2945 Assert(!fDelayedUnlock); 2946 } 2997 PDMDevHlpPhysWrite(pDevIns, u32, &requestHeader, sizeof(requestHeader)); 2947 2998 2948 2999 return rcRet; -
trunk/src/VBox/Devices/VMMDev/VMMDevHGCM.cpp
r75519 r75520 140 140 VMMDevRequestType enmRequestType; 141 141 142 /** Pointer to the locked request, NULL if not locked. */ 143 void *pvReqLocked; 144 /** The PGM lock for GCPhys if pvReqLocked is not NULL. */ 145 PGMPAGEMAPLOCK ReqMapLock; 146 142 147 /** The STAM_GET_TS() value when the request arrived. */ 143 148 uint64_t tsArrival; … … 236 241 /** Deallocate VBOXHGCMCMD memory. 237 242 * 243 * @param pThis The VMMDev instance data. 238 244 * @param pCmd Command to deallocate. 239 245 */ 240 static void vmmdevHGCMCmdFree(PV BOXHGCMCMD pCmd)246 static void vmmdevHGCMCmdFree(PVMMDEV pThis, PVBOXHGCMCMD pCmd) 241 247 { 242 248 if (pCmd) … … 259 265 RTMemFree(pGuestParm->u.ptr.paPages); 260 266 } 267 } 268 269 if (pCmd->pvReqLocked) 270 { 271 PDMDevHlpPhysReleasePageMappingLock(pThis->pDevIns, &pCmd->ReqMapLock); 272 pCmd->pvReqLocked = NULL; 261 273 } 262 274 … … 869 881 */ 870 882 int vmmdevHGCMCall(PVMMDEV pThis, const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall, RTGCPHYS GCPhys, 871 VMMDevRequestType enmRequestType, uint64_t tsArrival )883 VMMDevRequestType enmRequestType, uint64_t tsArrival, PVMMDEVREQLOCK *ppLock) 872 884 { 873 885 LogFunc(("client id = %d, function = %d, cParms = %d, enmRequestType = %d\n", … … 895 907 { 896 908 pCmd->tsArrival = tsArrival; 909 PVMMDEVREQLOCK pLock = *ppLock; 910 if (pLock) 911 { 912 pCmd->ReqMapLock = pLock->Lock; 913 pCmd->pvReqLocked = pLock->pvReq; 914 *ppLock = NULL; 915 } 916 897 917 rc = vmmdevHGCMCallFetchGuestParms(pThis, pCmd, pHGCMCall, cbHGCMCall, enmRequestType, cbHGCMParmStruct); 898 918 if (RT_SUCCESS(rc)) … … 912 932 if (RT_SUCCESS(rc)) 913 933 { 934 Assert(rc == VINF_HGCM_ASYNC_EXECUTE); 935 914 936 /* 915 937 * Done. Just update statistics and return. … … 930 952 } 931 953 } 932 vmmdevHGCMCmdFree(p Cmd);954 vmmdevHGCMCmdFree(pThis, pCmd); 933 955 } 934 956 return rc; … … 1100 1122 if (RT_LIKELY(!pCmd->fCancelled)) 1101 1123 { 1102 /** @todo r=bird: Given that we involve the heap and call PGM three times, we 1103 * would most likely be better off locking the buffer memory here. If 1104 * nothing else, it will avoid taking the PGM lock once. */ 1105 1106 VMMDevHGCMRequestHeader *pHeader = (VMMDevHGCMRequestHeader *)RTMemAlloc(pCmd->cbRequest); 1107 if (pHeader) 1124 if (!pCmd->pvReqLocked) 1108 1125 { 1109 1126 /* 1110 * Enter and leave the critical section here so we make sure 1111 * vmmdevRequestHandler has completed before we read & write 1112 * the request. (This isn't 100% optimal, but it solves the 1113 * 3.0 blocker.) 1127 * Request is not locked: 1114 1128 */ 1115 /** @todo It would be faster if this interface would use MMIO2 memory and we 1116 * didn't have to mess around with PDMDevHlpPhysRead/Write. We're 1117 * reading the header 3 times now and writing the request back twice. */ 1118 1119 PDMCritSectEnter(&pThis->CritSect, VERR_SEM_BUSY); 1120 PDMCritSectLeave(&pThis->CritSect); 1121 1122 /* 1123 * Read the request from the guest memory for updating. 1124 * The request data is not be used for anything but checking the request type. 1125 */ 1126 PDMDevHlpPhysRead(pThis->pDevIns, pCmd->GCPhys, pHeader, pCmd->cbRequest); 1127 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 1129 VMMDevHGCMRequestHeader *pHeader = (VMMDevHGCMRequestHeader *)RTMemAlloc(pCmd->cbRequest); 1130 if (pHeader) 1131 { 1132 /* 1133 * Read the request from the guest memory for updating. 1134 * The request data is not be used for anything but checking the request type. 1135 */ 1136 PDMDevHlpPhysRead(pThis->pDevIns, pCmd->GCPhys, pHeader, pCmd->cbRequest); 1137 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 1138 1139 /* Verify the request type. This is the only field which is used from the guest memory. */ 1140 const VMMDevRequestType enmRequestType = pHeader->header.requestType; 1141 if ( enmRequestType == pCmd->enmRequestType 1142 || enmRequestType == VMMDevReq_HGCMCancel) 1143 { 1144 RT_UNTRUSTED_VALIDATED_FENCE(); 1145 1146 /* 1147 * Update parameters and data buffers. 1148 */ 1149 switch (enmRequestType) 1150 { 1151 #ifdef VBOX_WITH_64_BITS_GUESTS 1152 case VMMDevReq_HGCMCall64: 1153 case VMMDevReq_HGCMCall32: 1154 #else 1155 case VMMDevReq_HGCMCall: 1156 #endif 1157 { 1158 VMMDevHGCMCall *pHGCMCall = (VMMDevHGCMCall *)pHeader; 1159 rc = vmmdevHGCMCompleteCallRequest(pThis, pCmd, pHGCMCall); 1160 #ifdef VBOX_WITH_DTRACE 1161 idFunction = pCmd->u.call.u32Function; 1162 idClient = pCmd->u.call.u32ClientID; 1163 #endif 1164 break; 1165 } 1166 1167 case VMMDevReq_HGCMConnect: 1168 { 1169 /* save the client id in the guest request packet */ 1170 VMMDevHGCMConnect *pHGCMConnect = (VMMDevHGCMConnect *)pHeader; 1171 pHGCMConnect->u32ClientID = pCmd->u.connect.u32ClientID; 1172 break; 1173 } 1174 1175 default: 1176 /* make compiler happy */ 1177 break; 1178 } 1179 } 1180 else 1181 { 1182 /* Guest has changed the command type. */ 1183 LogRelMax(50, ("VMMDEV: Invalid HGCM command: pCmd->enmCmdType = 0x%08X, pHeader->header.requestType = 0x%08X\n", 1184 pCmd->enmCmdType, pHeader->header.requestType)); 1185 1186 ASSERT_GUEST_FAILED_STMT(rc = VERR_INVALID_PARAMETER); 1187 } 1188 1189 /* Setup return code for the guest. */ 1190 if (RT_SUCCESS(rc)) 1191 pHeader->result = result; 1192 else 1193 pHeader->result = rc; 1194 1195 /* First write back the request. */ 1196 PDMDevHlpPhysWrite(pThis->pDevIns, pCmd->GCPhys, pHeader, pCmd->cbRequest); 1197 1198 /* Mark request as processed. */ 1199 pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE; 1200 1201 /* Second write the flags to mark the request as processed. */ 1202 PDMDevHlpPhysWrite(pThis->pDevIns, pCmd->GCPhys + RT_UOFFSETOF(VMMDevHGCMRequestHeader, fu32Flags), 1203 &pHeader->fu32Flags, sizeof(pHeader->fu32Flags)); 1204 1205 /* Now, when the command was removed from the internal list, notify the guest. */ 1206 VMMDevNotifyGuest(pThis, VMMDEV_EVENT_HGCM); 1207 1208 RTMemFree(pHeader); 1209 } 1210 else 1211 { 1212 LogRelMax(10, ("VMMDev: Failed to allocate %u bytes for HGCM request completion!!!\n", pCmd->cbRequest)); 1213 } 1214 } 1215 /* 1216 * Request was locked: 1217 */ 1218 else 1219 { 1220 VMMDevHGCMRequestHeader volatile *pHeader = (VMMDevHGCMRequestHeader volatile *)pCmd->pvReqLocked; 1128 1221 1129 1222 /* Verify the request type. This is the only field which is used from the guest memory. */ … … 1183 1276 pHeader->result = rc; 1184 1277 1185 /* First write back the request. */1186 PDMDevHlpPhysWrite(pThis->pDevIns, pCmd->GCPhys, pHeader, pCmd->cbRequest);1187 1188 1278 /* Mark request as processed. */ 1189 pHeader->fu32Flags |= VBOX_HGCM_REQ_DONE; 1190 1191 /* Second write the flags to mark the request as processed. */ 1192 PDMDevHlpPhysWrite(pThis->pDevIns, pCmd->GCPhys + RT_UOFFSETOF(VMMDevHGCMRequestHeader, fu32Flags), 1193 &pHeader->fu32Flags, sizeof(pHeader->fu32Flags)); 1279 ASMAtomicOrU32(&pHeader->fu32Flags, VBOX_HGCM_REQ_DONE); 1194 1280 1195 1281 /* Now, when the command was removed from the internal list, notify the guest. */ 1196 1282 VMMDevNotifyGuest(pThis, VMMDEV_EVENT_HGCM); 1197 1198 RTMemFree(pHeader);1199 }1200 else1201 {1202 LogRelMax(10, ("VMMDev: Failed to allocate %u bytes for HGCM request completion!!!\n", pCmd->cbRequest));1203 1283 } 1204 1284 } … … 1216 1296 /* Deallocate the command memory. */ 1217 1297 VBOXDD_HGCMCALL_COMPLETED_DONE(pCmd, idFunction, idClient, result); 1218 vmmdevHGCMCmdFree(p Cmd);1298 vmmdevHGCMCmdFree(pThis, pCmd); 1219 1299 1220 1300 #ifndef VBOX_WITHOUT_RELEASE_STATISTICS … … 1773 1853 *ppRestoredCmd = pCmd; 1774 1854 else 1775 vmmdevHGCMCmdFree(p Cmd);1855 vmmdevHGCMCmdFree(pThis, pCmd); 1776 1856 1777 1857 return rc; … … 1885 1965 */ 1886 1966 VMMDevHGCMRequestHeader *pReqHdr = (VMMDevHGCMRequestHeader *)RTMemAlloc(pCmd->cbRequest); 1887 AssertBreakStmt(pReqHdr, vmmdevHGCMCmdFree(p Cmd); rcFunc = VERR_NO_MEMORY);1967 AssertBreakStmt(pReqHdr, vmmdevHGCMCmdFree(pThis, pCmd); rcFunc = VERR_NO_MEMORY); 1888 1968 1889 1969 PDMDevHlpPhysRead(pThis->pDevIns, pCmd->GCPhys, pReqHdr, pCmd->cbRequest); … … 1905 1985 { 1906 1986 Assert(pCmd != pRestoredCmd); /* vmmdevHGCMRestoreCommand must allocate restored command. */ 1907 vmmdevHGCMCmdFree(p Cmd);1987 vmmdevHGCMCmdFree(pThis, pCmd); 1908 1988 pCmd = pRestoredCmd; 1909 1989 } … … 1978 2058 1979 2059 /* Deallocate the command memory. */ 1980 vmmdevHGCMCmdFree(p Cmd);2060 vmmdevHGCMCmdFree(pThis, pCmd); 1981 2061 } 1982 2062 … … 1989 2069 { 1990 2070 RTListNodeRemove(&pCmd->node); 1991 vmmdevHGCMCmdFree(p Cmd);2071 vmmdevHGCMCmdFree(pThis, pCmd); 1992 2072 } 1993 2073 } … … 2008 2088 { 2009 2089 vmmdevHGCMRemoveCommand(pThis, pCmd); 2010 vmmdevHGCMCmdFree(p Cmd);2011 } 2012 } 2090 vmmdevHGCMCmdFree(pThis, pCmd); 2091 } 2092 } -
trunk/src/VBox/Devices/VMMDev/VMMDevHGCM.h
r75500 r75520 25 25 int vmmdevHGCMDisconnect(VMMDevState *pVMMDevState, const VMMDevHGCMDisconnect *pHGCMDisconnect, RTGCPHYS GCPtr); 26 26 int vmmdevHGCMCall(VMMDevState *pVMMDevState, const VMMDevHGCMCall *pHGCMCall, uint32_t cbHGCMCall, RTGCPHYS GCPtr, 27 VMMDevRequestType enmRequestType, uint64_t tsArrival );27 VMMDevRequestType enmRequestType, uint64_t tsArrival, PVMMDEVREQLOCK *ppLock); 28 28 int vmmdevHGCMCancel(VMMDevState *pVMMDevState, const VMMDevHGCMCancel *pHGCMCancel, RTGCPHYS GCPtr); 29 29 int vmmdevHGCMCancel2(VMMDevState *pVMMDevState, RTGCPHYS GCPtr); -
trunk/src/VBox/Devices/VMMDev/VMMDevState.h
r75500 r75520 31 31 32 32 #define VMMDEV_WITH_ALT_TIMESYNC 33 34 /** Request locking structure (HGCM optimization). */ 35 typedef struct VMMDEVREQLOCK 36 { 37 void *pvReq; 38 PGMPAGEMAPLOCK Lock; 39 } VMMDEVREQLOCK; 40 /** Pointer to a request lock structure. */ 41 typedef VMMDEVREQLOCK *PVMMDEVREQLOCK; 33 42 34 43 typedef struct DISPLAYCHANGEREQUEST
Note:
See TracChangeset
for help on using the changeset viewer.