Changeset 76146 in vbox for trunk/src/VBox/Additions
- Timestamp:
- Dec 11, 2018 5:22:18 AM (6 years ago)
- Location:
- trunk/src/VBox/Additions/os2/VBoxSF
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/os2/VBoxSF/VBoxSF.cpp
r76143 r76146 506 506 { 507 507 pDst->u16Length = (uint16_t)RTUtf16Len(pDst->String.utf16) * (uint16_t)sizeof(RTUTF16); 508 Assert(pDst->u16Length < pDst->u16Size); 509 pDst->u16Size = pDst->u16Length + (uint16_t)sizeof(RTUTF16); /* (limit how much is copied to the host) */ 508 510 *ppStr = pDst; 509 511 return NO_ERROR; … … 512 514 513 515 /* 514 * This shouldn't happen, but just in case we try again with 515 * t wice the buffer size.516 * This shouldn't happen, but just in case we try again with twice 517 * the buffer size. 516 518 */ 517 519 if (rc == 0x20412 /*ULS_BUFFERFULL*/) … … 524 526 { 525 527 pDst->u16Length = (uint16_t)RTUtf16Len(pDst->String.utf16) * (uint16_t)sizeof(RTUTF16); 528 Assert(pDst->u16Length < pDst->u16Size); 529 pDst->u16Size = pDst->u16Length + (uint16_t)sizeof(RTUTF16); 526 530 *ppStr = pDst; 527 531 return NO_ERROR; … … 572 576 { 573 577 RT_BZERO(pvBuf, offStrInBuf); 574 575 578 PSHFLSTRING pDst = (PSHFLSTRING)((uint8_t *)pvBuf + offStrInBuf); 579 576 580 APIRET rc = KernStrToUcs(NULL, &pDst->String.utf16[0], (char *)pszFolderPath, cchSrc + 4, cchSrc); 577 581 if (rc == NO_ERROR) 578 582 { 579 583 pDst->u16Length = (uint16_t)RTUtf16Len(pDst->String.utf16) * (uint16_t)sizeof(RTUTF16); 580 pDst->u16Size = (uint16_t)((cchSrc + 4) * sizeof(RTUTF16));581 Assert(pDst->u16Length < pDst->u16Size);584 Assert(pDst->u16Length < (cchSrc + 4) * sizeof(RTUTF16)); 585 pDst->u16Size = pDst->u16Length + (uint16_t)sizeof(RTUTF16); /* (limit how much is copied to the host) */ 582 586 *ppvBuf = pvBuf; 583 587 return NO_ERROR; … … 585 589 VbglR0PhysHeapFree(pvBuf); 586 590 587 #if 0588 591 /* 589 * This shouldn't happen, but just in case we try again with 590 * t wice the buffer size.592 * This shouldn't happen, but just in case we try again with twice 593 * the buffer size. 591 594 */ 592 595 if (rc == 0x20412 /*ULS_BUFFERFULL*/) 593 596 { 594 pDst = vboxSfOs2StrAlloc((cchSrc + 16) * 2); 595 if (pDst) 596 { 597 pvBuf = VbglR0PhysHeapAlloc(offStrInBuf + SHFLSTRING_HEADER_SIZE + (cchSrc + 16) * sizeof(RTUTF16) * 2); 598 if (pvBuf) 599 { 600 RT_BZERO(pvBuf, offStrInBuf); 601 pDst = (PSHFLSTRING)((uint8_t *)pvBuf + offStrInBuf); 602 597 603 rc = KernStrToUcs(NULL, pDst->String.utf16, (char *)pszFolderPath, (cchSrc + 16) * 2, cchSrc); 598 604 if (rc == NO_ERROR) 599 605 { 600 606 pDst->u16Length = (uint16_t)RTUtf16Len(pDst->String.utf16) * (uint16_t)sizeof(RTUTF16); 601 *ppStr = pDst; 607 Assert(pDst->u16Length < (cchSrc + 16) * 2 * sizeof(RTUTF16)); 608 pDst->u16Size = pDst->u16Length + (uint16_t)sizeof(RTUTF16); 609 *ppvBuf = pvBuf; 602 610 return NO_ERROR; 603 611 } … … 607 615 } 608 616 else 609 #endif610 617 LogRel(("vboxSfOs2ConvertPath: KernStrToUcs returns %#x for %.*Rhxs\n", rc, cchSrc, pszFolderPath)); 611 618 } -
trunk/src/VBox/Additions/os2/VBoxSF/VBoxSFFile.cpp
r76143 r76146 40 40 #include <iprt/assert.h> 41 41 #include <iprt/mem.h> 42 43 44 /********************************************************************************************************************************* 45 * Structures and Typedefs * 46 *********************************************************************************************************************************/ 47 /** A preallocated buffer. */ 48 typedef struct 49 { 50 RTCCPHYS PhysAddr; 51 void *pvBuf; 52 bool volatile fBusy; 53 } VBOXSFOS2BUF; 54 55 56 /********************************************************************************************************************************* 57 * Global Variables * 58 *********************************************************************************************************************************/ 59 /** Buffer spinlock. */ 60 static SpinLock_t g_BufferLock; 61 /** 64KB buffers. */ 62 static VBOXSFOS2BUF g_aBigBuffers[4]; 63 64 65 66 /** 67 * Initialize file buffers. 68 */ 69 void vboxSfOs2InitFileBuffers(void) 70 { 71 KernAllocSpinLock(&g_BufferLock); 72 73 for (uint32_t i = 0; i < RT_ELEMENTS(g_aBigBuffers); i++) 74 { 75 g_aBigBuffers[i].pvBuf = RTMemContAlloc(&g_aBigBuffers[i].PhysAddr, _64K); 76 g_aBigBuffers[i].fBusy = g_aBigBuffers[i].pvBuf == NULL; 77 } 78 } 79 80 81 /** 82 * Allocates a big buffer. 83 * @returns Pointer to buffer on success, NULL on failure. 84 * @param pPhysAddr The physical address of the buffer. 85 */ 86 DECLINLINE(void *) vboxSfOs2AllocBigBuffer(RTGCPHYS *pPhysAddr) 87 { 88 KernAcquireSpinLock(&g_BufferLock); 89 for (uint32_t i = 0; i < RT_ELEMENTS(g_aBigBuffers); i++) 90 if (!g_aBigBuffers[i].fBusy) 91 { 92 g_aBigBuffers[i].fBusy = true; 93 KernReleaseSpinLock(&g_BufferLock); 94 95 *pPhysAddr = g_aBigBuffers[i].PhysAddr; 96 return g_aBigBuffers[i].pvBuf; 97 } 98 KernReleaseSpinLock(&g_BufferLock); 99 *pPhysAddr = NIL_RTGCPHYS; 100 return NULL; 101 } 102 103 104 /** 105 * Frees a big buffer. 106 * @param pvBuf The address of the buffer to be freed. 107 */ 108 DECLINLINE(void) vboxSfOs2FreeBigBuffer(void *pvBuf) 109 { 110 Assert(pvBuf); 111 KernAcquireSpinLock(&g_BufferLock); 112 for (uint32_t i = 0; i < RT_ELEMENTS(g_aBigBuffers); i++) 113 if (g_aBigBuffers[i].pvBuf == pvBuf) 114 { 115 Assert(g_aBigBuffers[i].fBusy); 116 g_aBigBuffers[i].fBusy = false; 117 KernReleaseSpinLock(&g_BufferLock); 118 return; 119 } 120 KernReleaseSpinLock(&g_BufferLock); 121 AssertFailed(); 122 } 42 123 43 124 … … 986 1067 987 1068 1069 /** 1070 * Convert KernVMLock page list to HGCM page list. 1071 * 1072 * The trouble is that it combine pages. 1073 */ 1074 static void vboxSfOs2ConvertPageList(KernPageList_t volatile *paSrc, RTGCPHYS64 volatile *paDst, ULONG cSrc, uint32_t cDst) 1075 { 1076 LogFlow(("vboxSfOs2ConvertPageList: %d vs %d\n", cSrc, cDst)); 1077 1078 /* If the list have identical length, the job is easy. */ 1079 if (cSrc == cDst) 1080 for (uint32_t i = 0; i < cSrc; i++) 1081 paDst[i] &= ~(uint32_t)PAGE_OFFSET_MASK; 1082 else 1083 { 1084 Assert(cSrc <= cDst); 1085 Assert(cSrc > 0); 1086 1087 /* 1088 * We have fewer source entries than destiation pages, so something needs 1089 * expanding. The fact that the first and last pages might be partial ones 1090 * makes this more interesting. We have to do it backwards, of course. 1091 */ 1092 1093 /* Deal with the partial page stuff first. */ 1094 paSrc[0].Size += paSrc[0].Addr & PAGE_OFFSET_MASK; 1095 paSrc[0].Addr &= ~(ULONG)PAGE_OFFSET_MASK; 1096 paSrc[cSrc - 1].Size = RT_ALIGN_32(paSrc[cSrc - 1].Size, PAGE_SIZE); 1097 1098 /* The go do work on the conversion. */ 1099 uint32_t iDst = cDst; 1100 uint32_t iSrc = cSrc; 1101 while (iSrc-- > 0) 1102 { 1103 ULONG cbSrc = paSrc[iSrc].Size; 1104 ULONG uAddrSrc = paSrc[iSrc].Addr + cbSrc; 1105 Assert(!(cbSrc & PAGE_OFFSET_MASK)); 1106 Assert(!(uAddrSrc & PAGE_OFFSET_MASK)); 1107 while (cbSrc > 0) 1108 { 1109 uAddrSrc -= PAGE_SIZE; 1110 Assert(iDst > 0); 1111 paDst[--iDst] = uAddrSrc; 1112 cbSrc -= PAGE_SIZE; 1113 } 1114 } 1115 Assert(iDst == 0); 1116 } 1117 } 1118 1119 1120 /** 1121 * Helper for FS32_READ. 1122 */ 1123 DECLINLINE(uint32_t) vboxSfOs2ReadFinalize(PSFFSI pSfFsi, uint64_t offRead, uint32_t cbActual) 1124 { 1125 pSfFsi->sfi_positionl = offRead + cbActual; 1126 if ((uint64_t)pSfFsi->sfi_sizel < offRead + cbActual) 1127 pSfFsi->sfi_sizel = offRead + cbActual; 1128 pSfFsi->sfi_tstamp |= ST_SREAD | ST_PREAD; 1129 return cbActual; 1130 } 1131 1132 988 1133 extern "C" APIRET APIENTRY 989 1134 FS32_READ(PSFFSI pSfFsi, PVBOXSFSYFI pSfFsd, PVOID pvData, PULONG pcb, ULONG fIoFlags) … … 992 1137 993 1138 /* 994 * Validate input.1139 * Validate and extract input. 995 1140 */ 996 1141 AssertReturn(pSfFsd->u32Magic == VBOXSFSYFI_MAGIC, ERROR_SYS_INTERNAL); … … 1002 1147 RT_NOREF(pFolder); 1003 1148 1004 /*1005 * If the read request is small enough, go thru a temporary buffer to1006 * avoid locking/unlocking user memory.1007 */1008 1149 uint64_t const offRead = pSfFsi->sfi_positionl; 1009 1150 uint32_t const cbToRead = *pcb; 1010 1151 uint32_t cbActual = cbToRead; 1011 #if 0 /** @todo debug some other day. */ 1012 if (cbToRead <= _8K - ALLOC_HDR_SIZE - RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0])) 1013 { 1014 size_t cbReq = RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) + RT_ALIGN_32(cbToRead, 4); 1152 1153 /* 1154 * We'll try embedded buffers for reads a smaller than ~2KB if we get 1155 * a heap block that's entirely within one page so the host can lock it 1156 * and avoid bouncing it off the heap on completion. 1157 */ 1158 if (cbToRead <= _2K) 1159 { 1160 size_t cbReq = RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) + cbToRead; 1015 1161 VBOXSFREADEMBEDDEDREQ *pReq = (VBOXSFREADEMBEDDEDREQ *)VbglR0PhysHeapAlloc(cbReq); 1016 if ( pReq != NULL)1017 {1018 RT_BZERO(pReq, cbReq);1019 1162 if ( pReq != NULL 1163 && ( PAGE_SIZE - (PAGE_OFFSET_MASK & (uintptr_t)pReq) >= cbReq 1164 || cbToRead == 0)) 1165 { 1020 1166 APIRET rc; 1021 1167 int vrc = vboxSfOs2HostReqReadEmbedded(pFolder, pReq, pSfFsd->hHostFile, offRead, cbToRead); … … 1027 1173 if (rc == NO_ERROR) 1028 1174 { 1029 *pcb = cbActual; 1030 pSfFsi->sfi_positionl = offRead + cbActual; 1031 if ((uint64_t)pSfFsi->sfi_sizel < offRead + cbActual) 1032 pSfFsi->sfi_sizel = offRead + cbActual; 1033 pSfFsi->sfi_tstamp |= ST_SREAD | ST_PREAD; 1034 LogFlow(("FS32_READ: returns; cbActual=%#x sfi_positionl=%RI64 [copy]\n", cbActual, pSfFsi->sfi_positionl)); 1175 *pcb = vboxSfOs2ReadFinalize(pSfFsi, offRead, cbActual); 1176 LogFlow(("FS32_READ: returns; cbActual=%#x sfi_positionl=%RI64 [embedded]\n", cbActual, pSfFsi->sfi_positionl)); 1035 1177 } 1036 1178 } 1037 1179 else 1038 1180 { 1039 Log(("FS32_READ: vboxSfOs2HostReqReadEmbedded(off=%#RU64,cb=%#x) -> %Rrc [ copy]\n", offRead, cbToRead, vrc));1181 Log(("FS32_READ: vboxSfOs2HostReqReadEmbedded(off=%#RU64,cb=%#x) -> %Rrc [embedded]\n", offRead, cbToRead, vrc)); 1040 1182 rc = ERROR_BAD_NET_RESP; 1041 1183 } … … 1043 1185 return rc; 1044 1186 } 1045 } 1046 #endif 1047 1048 1049 /* 1050 * Do the read directly on the buffer, Vbgl will do the locking for us. 1051 */ 1052 int vrc = VbglR0SfRead(&g_SfClient, &pFolder->hHostFolder, pSfFsd->hHostFile, 1053 offRead, &cbActual, (uint8_t *)pvData, false /*fLocked*/); 1054 if (RT_SUCCESS(vrc)) 1055 { 1056 AssertStmt(cbActual <= cbToRead, cbActual = cbToRead); 1057 *pcb = cbActual; 1058 pSfFsi->sfi_positionl = offRead + cbActual; 1059 if ((uint64_t)pSfFsi->sfi_sizel < offRead + cbActual) 1060 pSfFsi->sfi_sizel = offRead + cbActual; 1061 pSfFsi->sfi_tstamp |= ST_SREAD | ST_PREAD; 1062 LogFlow(("FS32_READ: returns; cbActual=%#x sfi_positionl=%RI64 [direct]\n", cbActual, pSfFsi->sfi_positionl)); 1063 return NO_ERROR; 1064 } 1065 Log(("FS32_READ: VbglR0SfRead(off=%#RU64,cb=%#x) -> %Rrc [direct]\n", offRead, cbToRead, vrc)); 1187 if (pReq) 1188 VbglR0PhysHeapFree(pReq); 1189 } 1190 1191 /* 1192 * Whatever we do now we're going to use a page list request. 1193 * So, allocate one with sufficient space to cover the whole buffer. 1194 */ 1195 uint32_t cPages = ((cbToRead + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; 1196 VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cPages])); 1197 if (pReq) 1198 { /* likely */ } 1199 else 1200 { 1201 LogRel(("FS32_READ: Out of memory for page list request (%u pages)\n", cPages)); 1202 return ERROR_NOT_ENOUGH_MEMORY; 1203 } 1204 1205 /* 1206 * If the request is less than 16KB or smaller, we try bounce it off the 1207 * physical heap (slab size is 64KB). For requests up to 64KB we try use 1208 * one of a handful of preallocated big buffers rather than the phys heap. 1209 */ 1210 if (cbToRead <= _64K) 1211 { 1212 RTGCPHYS GCPhys; 1213 void *pvBuf = NULL; 1214 if (cbToRead <= _16K) 1215 { 1216 pvBuf = VbglR0PhysHeapAlloc(cbToRead); 1217 GCPhys = pvBuf ? VbglR0PhysHeapGetPhysAddr(pvBuf) : NIL_RTGCPHYS; 1218 } 1219 else 1220 pvBuf = vboxSfOs2AllocBigBuffer(&GCPhys); 1221 if (pvBuf) 1222 { 1223 pReq->PgLst.offFirstPage = (uint16_t)GCPhys & (uint16_t)PAGE_OFFSET_MASK; 1224 cPages = (cbToRead + ((uint16_t)GCPhys & (uint16_t)PAGE_OFFSET_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; 1225 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK; 1226 for (uint32_t i = 0; i < cPages; i++, GCPhys += PAGE_SIZE) 1227 pReq->PgLst.aPages[i] = GCPhys; 1228 1229 APIRET rc; 1230 int vrc = vboxSfOs2HostReqReadPgLst(pFolder, pReq, pSfFsd->hHostFile, offRead, cbToRead, cPages); 1231 if (RT_SUCCESS(vrc)) 1232 { 1233 cbActual = pReq->Parms.cb32Read.u.value32; 1234 AssertStmt(cbActual <= cbToRead, cbActual = cbToRead); 1235 rc = KernCopyOut(pvData, pvBuf, cbActual); 1236 if (rc == NO_ERROR) 1237 { 1238 *pcb = vboxSfOs2ReadFinalize(pSfFsi, offRead, cbActual); 1239 LogFlow(("FS32_READ: returns; cbActual=%#x sfi_positionl=%RI64 [bounced]\n", cbActual, pSfFsi->sfi_positionl)); 1240 } 1241 } 1242 else 1243 { 1244 Log(("FS32_READ: vboxSfOs2HostReqReadEmbedded(off=%#RU64,cb=%#x) -> %Rrc [bounced]\n", offRead, cbToRead, vrc)); 1245 rc = ERROR_BAD_NET_RESP; 1246 } 1247 1248 if (cbToRead <= _16K) 1249 VbglR0PhysHeapFree(pvBuf); 1250 else 1251 vboxSfOs2FreeBigBuffer(pvBuf); 1252 VbglR0PhysHeapFree(pReq); 1253 return rc; 1254 } 1255 } 1256 1257 /* 1258 * We couldn't use a bounce buffer for it, so lock the buffer pages. 1259 */ 1260 KernVMLock_t Lock; 1261 ULONG cPagesRet; 1262 AssertCompile(sizeof(KernPageList_t) == sizeof(pReq->PgLst.aPages[0])); 1263 APIRET rc = KernVMLock(VMDHL_LONG | VMDHL_WRITE, (void *)pvData, cbToRead, &Lock, 1264 (KernPageList_t *)&pReq->PgLst.aPages[0], &cPagesRet); 1265 if (rc == NO_ERROR) 1266 { 1267 pReq->PgLst.offFirstPage = (uint16_t)(uintptr_t)pvData & (uint16_t)PAGE_OFFSET_MASK; 1268 cPages = (cbToRead + ((uint16_t)(uintptr_t)pvData & (uint16_t)PAGE_OFFSET_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; 1269 vboxSfOs2ConvertPageList((KernPageList_t volatile *)&pReq->PgLst.aPages[0], &pReq->PgLst.aPages[0], cPagesRet, cPages); 1270 1271 APIRET rc; 1272 int vrc = vboxSfOs2HostReqReadPgLst(pFolder, pReq, pSfFsd->hHostFile, offRead, cbToRead, cPages); 1273 if (RT_SUCCESS(vrc)) 1274 { 1275 cbActual = pReq->Parms.cb32Read.u.value32; 1276 AssertStmt(cbActual <= cbToRead, cbActual = cbToRead); 1277 *pcb = vboxSfOs2ReadFinalize(pSfFsi, offRead, cbActual); 1278 LogFlow(("FS32_READ: returns; cbActual=%#x sfi_positionl=%RI64 [locked]\n", cbActual, pSfFsi->sfi_positionl)); 1279 } 1280 else 1281 { 1282 Log(("FS32_READ: vboxSfOs2HostReqReadEmbedded(off=%#RU64,cb=%#x) -> %Rrc [locked]\n", offRead, cbToRead, vrc)); 1283 rc = ERROR_BAD_NET_RESP; 1284 } 1285 1286 KernVMUnlock(&Lock); 1287 } 1288 else 1289 Log(("FS32_READ: KernVMLock(,%p,%#x,) failed -> %u\n", pvData, cbToRead, rc)); 1290 VbglR0PhysHeapFree(pReq); 1066 1291 RT_NOREF_PV(fIoFlags); 1067 return ERROR_BAD_NET_RESP; 1292 return rc; 1293 } 1294 1295 1296 /** 1297 * Helper for FS32_WRITE. 1298 */ 1299 DECLINLINE(uint32_t) vboxSfOs2WriteFinalize(PSFFSI pSfFsi, uint64_t offWrite, uint32_t cbActual) 1300 { 1301 pSfFsi->sfi_positionl = offWrite + cbActual; 1302 if ((uint64_t)pSfFsi->sfi_sizel < offWrite + cbActual) 1303 pSfFsi->sfi_sizel = offWrite + cbActual; 1304 pSfFsi->sfi_tstamp |= ST_SWRITE | ST_PWRITE; 1305 return cbActual; 1068 1306 } 1069 1307 … … 1073 1311 { 1074 1312 /* 1075 * Validate input.1313 * Validate and extract input. 1076 1314 */ 1077 1315 AssertReturn(pSfFsd->u32Magic == VBOXSFSYFI_MAGIC, ERROR_SYS_INTERNAL); … … 1083 1321 RT_NOREF(pFolder); 1084 1322 1085 /* 1086 * If the write request is small enough, go thru a temporary buffer to 1087 * avoid locking/unlocking user memory. 1088 */ 1089 uint64_t offWrite = pSfFsi->sfi_positionl; 1090 uint32_t cbWrite = *pcb; 1091 uint32_t cbActual = cbWrite; 1092 if (cbWrite <= _8K - ALLOC_HDR_SIZE) 1093 { 1094 void *pvBuf = VbglR0PhysHeapAlloc(cbWrite); 1095 if (pvBuf != NULL) 1096 { 1097 APIRET rc = KernCopyIn(pvBuf, pvData, cbWrite); 1323 uint64_t offWrite = pSfFsi->sfi_positionl; 1324 uint32_t cbToWrite = *pcb; 1325 uint32_t cbActual = cbToWrite; 1326 1327 /* 1328 * We'll try embedded buffers for writes a smaller than ~2KB if we get 1329 * a heap block that's entirely within one page so the host can lock it 1330 * and avoid bouncing it off the heap on completion. 1331 */ 1332 if (cbToWrite <= _2K) 1333 { 1334 size_t cbReq = RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) + cbToWrite; 1335 VBOXSFWRITEEMBEDDEDREQ *pReq = (VBOXSFWRITEEMBEDDEDREQ *)VbglR0PhysHeapAlloc(cbReq); 1336 if ( pReq != NULL 1337 && ( PAGE_SIZE - (PAGE_OFFSET_MASK & (uintptr_t)pReq) >= cbReq 1338 || cbToWrite == 0)) 1339 { 1340 APIRET rc = KernCopyIn(&pReq->abData[0], pvData, cbToWrite); 1098 1341 if (rc == NO_ERROR) 1099 1342 { 1100 int vrc = VbglR0SfWrite(&g_SfClient, &pFolder->hHostFolder, pSfFsd->hHostFile, 1101 offWrite, &cbActual, (uint8_t *)pvBuf, true /*fLocked*/); 1343 int vrc = vboxSfOs2HostReqWriteEmbedded(pFolder, pReq, pSfFsd->hHostFile, offWrite, cbToWrite); 1102 1344 if (RT_SUCCESS(vrc)) 1103 1345 { 1104 AssertStmt(cbActual <= cbWrite, cbActual = cbWrite); 1105 *pcb = cbActual; 1106 pSfFsi->sfi_positionl = offWrite + cbActual; 1107 if ((uint64_t)pSfFsi->sfi_sizel < offWrite + cbActual) 1108 pSfFsi->sfi_sizel = offWrite + cbActual; 1109 pSfFsi->sfi_tstamp |= ST_SWRITE | ST_PWRITE; 1110 LogFlow(("FS32_READ: returns; cbActual=%#x sfi_positionl=%RI64 [copy]\n", cbActual, pSfFsi->sfi_positionl)); 1346 cbActual = pReq->Parms.cb32Write.u.value32; 1347 AssertStmt(cbActual <= cbToWrite, cbActual = cbToWrite); 1348 *pcb = vboxSfOs2WriteFinalize(pSfFsi, offWrite, cbActual); 1349 LogFlow(("FS32_WRITE: returns; cbActual=%#x sfi_positionl=%RI64 [embedded]\n", cbActual, pSfFsi->sfi_positionl)); 1111 1350 } 1112 1351 else 1113 1352 { 1114 Log(("FS32_ READ: VbglR0SfWrite(off=%#x,cb=%#x) -> %Rrc [copy]\n", offWrite, cbWrite, vrc));1353 Log(("FS32_WRITE: vboxSfOs2HostReqWriteEmbedded(off=%#RU64,cb=%#x) -> %Rrc [embedded]\n", offWrite, cbToWrite, vrc)); 1115 1354 rc = ERROR_BAD_NET_RESP; 1116 1355 } 1117 1356 } 1118 VbglR0PhysHeapFree(p vBuf);1357 VbglR0PhysHeapFree(pReq); 1119 1358 return rc; 1120 1359 } 1121 } 1122 1123 /* 1124 * Do the write directly on the buffer, Vbgl will do the locking for us. 1125 */ 1126 int vrc = VbglR0SfWrite(&g_SfClient, &pFolder->hHostFolder, pSfFsd->hHostFile, 1127 offWrite, &cbActual, (uint8_t *)pvData, false /*fLocked*/); 1128 if (RT_SUCCESS(vrc)) 1129 { 1130 AssertStmt(cbActual <= cbWrite, cbActual = cbWrite); 1131 *pcb = cbActual; 1132 pSfFsi->sfi_positionl = offWrite + cbActual; 1133 if ((uint64_t)pSfFsi->sfi_sizel < offWrite + cbActual) 1134 pSfFsi->sfi_sizel = offWrite + cbActual; 1135 pSfFsi->sfi_tstamp |= ST_SWRITE | ST_PWRITE; 1136 LogFlow(("FS32_READ: returns; cbActual=%#x sfi_positionl=%RI64 [direct]\n", cbActual, pSfFsi->sfi_positionl)); 1137 return NO_ERROR; 1138 } 1139 Log(("FS32_READ: VbglR0SfWrite(off=%#x,cb=%#x) -> %Rrc [direct]\n", offWrite, cbWrite, vrc)); 1360 if (pReq) 1361 VbglR0PhysHeapFree(pReq); 1362 } 1363 1364 /* 1365 * Whatever we do now we're going to use a page list request. 1366 * So, allocate one with sufficient space to cover the whole buffer. 1367 */ 1368 uint32_t cPages = ((cbToWrite + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; 1369 VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cPages])); 1370 if (pReq) 1371 { /* likely */ } 1372 else 1373 { 1374 LogRel(("FS32_WRITE: Out of memory for page list request (%u pages)\n", cPages)); 1375 return ERROR_NOT_ENOUGH_MEMORY; 1376 } 1377 1378 /* 1379 * If the request is less than 16KB or smaller, we try bounce it off the 1380 * physical heap (slab size is 64KB). For requests up to 64KB we try use 1381 * one of a handful of preallocated big buffers rather than the phys heap. 1382 */ 1383 if (cbToWrite <= _64K) 1384 { 1385 RTGCPHYS GCPhys; 1386 void *pvBuf = NULL; 1387 if (cbToWrite <= _16K) 1388 { 1389 pvBuf = VbglR0PhysHeapAlloc(cbToWrite); 1390 GCPhys = pvBuf ? VbglR0PhysHeapGetPhysAddr(pvBuf) : NIL_RTGCPHYS; 1391 } 1392 else 1393 pvBuf = vboxSfOs2AllocBigBuffer(&GCPhys); 1394 if (pvBuf) 1395 { 1396 APIRET rc = KernCopyIn(pvBuf, pvData, cbToWrite); 1397 if (rc == NO_ERROR) 1398 { 1399 pReq->PgLst.offFirstPage = (uint16_t)GCPhys & (uint16_t)PAGE_OFFSET_MASK; 1400 cPages = (cbToWrite + ((uint16_t)GCPhys & (uint16_t)PAGE_OFFSET_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; 1401 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK; 1402 for (uint32_t i = 0; i < cPages; i++, GCPhys += PAGE_SIZE) 1403 pReq->PgLst.aPages[i] = GCPhys; 1404 1405 APIRET rc; 1406 int vrc = vboxSfOs2HostReqWritePgLst(pFolder, pReq, pSfFsd->hHostFile, offWrite, cbToWrite, cPages); 1407 if (RT_SUCCESS(vrc)) 1408 { 1409 cbActual = pReq->Parms.cb32Write.u.value32; 1410 AssertStmt(cbActual <= cbToWrite, cbActual = cbToWrite); 1411 *pcb = vboxSfOs2WriteFinalize(pSfFsi, offWrite, cbActual); 1412 LogFlow(("FS32_WRITE: returns; cbActual=%#x sfi_positionl=%RI64 [bounced]\n", cbActual, pSfFsi->sfi_positionl)); 1413 } 1414 else 1415 { 1416 Log(("FS32_WRITE: vboxSfOs2HostReqWriteEmbedded(off=%#RU64,cb=%#x) -> %Rrc [bounced]\n", offWrite, cbToWrite, vrc)); 1417 rc = ERROR_BAD_NET_RESP; 1418 } 1419 } 1420 1421 if (cbToWrite <= _16K) 1422 VbglR0PhysHeapFree(pvBuf); 1423 else 1424 vboxSfOs2FreeBigBuffer(pvBuf); 1425 VbglR0PhysHeapFree(pReq); 1426 return rc; 1427 } 1428 } 1429 1430 /* 1431 * We couldn't use a bounce buffer for it, so lock the buffer pages. 1432 */ 1433 KernVMLock_t Lock; 1434 ULONG cPagesRet; 1435 AssertCompile(sizeof(KernPageList_t) == sizeof(pReq->PgLst.aPages[0])); 1436 APIRET rc = KernVMLock(VMDHL_LONG, (void *)pvData, cbToWrite, &Lock, (KernPageList_t *)&pReq->PgLst.aPages[0], &cPagesRet); 1437 if (rc == NO_ERROR) 1438 { 1439 pReq->PgLst.offFirstPage = (uint16_t)(uintptr_t)pvData & (uint16_t)PAGE_OFFSET_MASK; 1440 cPages = (cbToWrite + ((uint16_t)(uintptr_t)pvData & (uint16_t)PAGE_OFFSET_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; 1441 vboxSfOs2ConvertPageList((KernPageList_t volatile *)&pReq->PgLst.aPages[0], &pReq->PgLst.aPages[0], cPagesRet, cPages); 1442 1443 APIRET rc; 1444 int vrc = vboxSfOs2HostReqWritePgLst(pFolder, pReq, pSfFsd->hHostFile, offWrite, cbToWrite, cPages); 1445 if (RT_SUCCESS(vrc)) 1446 { 1447 cbActual = pReq->Parms.cb32Write.u.value32; 1448 AssertStmt(cbActual <= cbToWrite, cbActual = cbToWrite); 1449 *pcb = vboxSfOs2WriteFinalize(pSfFsi, offWrite, cbActual); 1450 LogFlow(("FS32_WRITE: returns; cbActual=%#x sfi_positionl=%RI64 [locked]\n", cbActual, pSfFsi->sfi_positionl)); 1451 } 1452 else 1453 { 1454 Log(("FS32_WRITE: vboxSfOs2HostReqWriteEmbedded(off=%#RU64,cb=%#x) -> %Rrc [locked]\n", offWrite, cbToWrite, vrc)); 1455 rc = ERROR_BAD_NET_RESP; 1456 } 1457 1458 KernVMUnlock(&Lock); 1459 } 1460 else 1461 Log(("FS32_WRITE: KernVMLock(,%p,%#x,) failed -> %u\n", pvData, cbToWrite, rc)); 1462 VbglR0PhysHeapFree(pReq); 1140 1463 RT_NOREF_PV(fIoFlags); 1141 return ERROR_BAD_NET_RESP;1464 return rc; 1142 1465 } 1143 1466 -
trunk/src/VBox/Additions/os2/VBoxSF/VBoxSFInit.cpp
r75597 r76146 104 104 RTLogBackdoorPrintf("VBoxSFR0Init: Embedded buffers feature is missing. Upgrade to latest VirtualBox!\n"); 105 105 106 /* 107 * Allocate some big buffers for reading and writing. 108 */ 109 vboxSfOs2InitFileBuffers(); 110 106 111 #ifndef DONT_LOCK_SEGMENTS 107 112 /* -
trunk/src/VBox/Additions/os2/VBoxSF/VBoxSFInternal.h
r76143 r76146 222 222 extern VBGLSFCLIENT g_SfClient; 223 223 224 void vboxSfOs2InitFileBuffers(void); 224 225 PSHFLSTRING vboxSfOs2StrAlloc(size_t cwcLength); 225 226 PSHFLSTRING vboxSfOs2StrDup(PCSHFLSTRING pSrc); … … 798 799 799 800 800 /** Request structure for vboxSfOs2HostReqRead . */801 /** Request structure for vboxSfOs2HostReqReadEmbedded. */ 801 802 typedef struct VBOXSFREADEMBEDDEDREQ 802 803 { … … 808 809 809 810 /** 810 * SHFL_FN_ INFORMATION[SHFL_INFO_GET | SHFL_INFO_FILE] request.811 * SHFL_FN_READ request using embedded data buffer. 811 812 */ 812 813 DECLINLINE(int) vboxSfOs2HostReqReadEmbedded(PVBOXSFFOLDER pFolder, VBOXSFREADEMBEDDEDREQ *pReq, uint64_t hHostFile, … … 840 841 841 842 843 /** Request structure for vboxSfOs2HostReqRead. */ 844 typedef struct VBOXSFREADPGLSTREQ 845 { 846 VBGLIOCIDCHGCMFASTCALL Hdr; 847 VMMDevHGCMCall Call; 848 VBoxSFParmRead Parms; 849 HGCMPageListInfo PgLst; 850 } VBOXSFREADPGLSTREQ; 851 852 /** 853 * SHFL_FN_READ request using page list for data buffer (caller populated). 854 */ 855 DECLINLINE(int) vboxSfOs2HostReqReadPgLst(PVBOXSFFOLDER pFolder, VBOXSFREADPGLSTREQ *pReq, uint64_t hHostFile, 856 uint64_t offRead, uint32_t cbToRead, uint32_t cPages) 857 { 858 VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient, 859 SHFL_FN_READ, SHFL_CPARMS_READ, 860 RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cPages])); 861 862 pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit; 863 pReq->Parms.id32Root.u.value32 = pFolder->hHostFolder.root; 864 865 pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit; 866 pReq->Parms.u64Handle.u.value64 = hHostFile; 867 868 pReq->Parms.off64Read.type = VMMDevHGCMParmType_64bit; 869 pReq->Parms.off64Read.u.value64 = offRead; 870 871 pReq->Parms.cb32Read.type = VMMDevHGCMParmType_32bit; 872 pReq->Parms.cb32Read.u.value32 = cbToRead; 873 874 pReq->Parms.pBuf.type = VMMDevHGCMParmType_PageList; 875 pReq->Parms.pBuf.u.PageList.size = cbToRead; 876 pReq->Parms.pBuf.u.PageList.offset = RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) - sizeof(VBGLIOCIDCHGCMFASTCALL); 877 pReq->PgLst.flags = VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST; 878 pReq->PgLst.cPages = (uint16_t)cPages; 879 AssertReturn(cPages <= UINT16_MAX, VERR_OUT_OF_RANGE); 880 /* caller sets offset */ 881 882 int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, 883 RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cPages])); 884 if (RT_SUCCESS(vrc)) 885 vrc = pReq->Call.header.result; 886 return vrc; 887 } 888 889 890 891 /** Request structure for vboxSfOs2HostReqWriteEmbedded. */ 892 typedef struct VBOXSFWRITEEMBEDDEDREQ 893 { 894 VBGLIOCIDCHGCMFASTCALL Hdr; 895 VMMDevHGCMCall Call; 896 VBoxSFParmWrite Parms; 897 uint8_t abData[RT_FLEXIBLE_ARRAY]; 898 } VBOXSFWRITEEMBEDDEDREQ; 899 900 /** 901 * SHFL_FN_WRITE request using embedded data buffer. 902 */ 903 DECLINLINE(int) vboxSfOs2HostReqWriteEmbedded(PVBOXSFFOLDER pFolder, VBOXSFWRITEEMBEDDEDREQ *pReq, uint64_t hHostFile, 904 uint64_t offWrite, uint32_t cbToWrite) 905 { 906 VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient, 907 SHFL_FN_WRITE, SHFL_CPARMS_WRITE, RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) + cbToWrite); 908 909 pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit; 910 pReq->Parms.id32Root.u.value32 = pFolder->hHostFolder.root; 911 912 pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit; 913 pReq->Parms.u64Handle.u.value64 = hHostFile; 914 915 pReq->Parms.off64Write.type = VMMDevHGCMParmType_64bit; 916 pReq->Parms.off64Write.u.value64 = offWrite; 917 918 pReq->Parms.cb32Write.type = VMMDevHGCMParmType_32bit; 919 pReq->Parms.cb32Write.u.value32 = cbToWrite; 920 921 pReq->Parms.pBuf.type = VMMDevHGCMParmType_Embedded; 922 pReq->Parms.pBuf.u.Embedded.cbData = cbToWrite; 923 pReq->Parms.pBuf.u.Embedded.offData = RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) - sizeof(VBGLIOCIDCHGCMFASTCALL); 924 pReq->Parms.pBuf.u.Embedded.fFlags = VBOX_HGCM_F_PARM_DIRECTION_TO_HOST; 925 926 int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) + cbToWrite); 927 if (RT_SUCCESS(vrc)) 928 vrc = pReq->Call.header.result; 929 return vrc; 930 } 931 932 933 /** Request structure for vboxSfOs2HostReqWrite. */ 934 typedef struct VBOXSFWRITEPGLSTREQ 935 { 936 VBGLIOCIDCHGCMFASTCALL Hdr; 937 VMMDevHGCMCall Call; 938 VBoxSFParmWrite Parms; 939 HGCMPageListInfo PgLst; 940 } VBOXSFWRITEPGLSTREQ; 941 942 /** 943 * SHFL_FN_WRITE request using page list for data buffer (caller populated). 944 */ 945 DECLINLINE(int) vboxSfOs2HostReqWritePgLst(PVBOXSFFOLDER pFolder, VBOXSFWRITEPGLSTREQ *pReq, uint64_t hHostFile, 946 uint64_t offWrite, uint32_t cbToWrite, uint32_t cPages) 947 { 948 VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient, 949 SHFL_FN_WRITE, SHFL_CPARMS_WRITE, 950 RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cPages])); 951 952 pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit; 953 pReq->Parms.id32Root.u.value32 = pFolder->hHostFolder.root; 954 955 pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit; 956 pReq->Parms.u64Handle.u.value64 = hHostFile; 957 958 pReq->Parms.off64Write.type = VMMDevHGCMParmType_64bit; 959 pReq->Parms.off64Write.u.value64 = offWrite; 960 961 pReq->Parms.cb32Write.type = VMMDevHGCMParmType_32bit; 962 pReq->Parms.cb32Write.u.value32 = cbToWrite; 963 964 pReq->Parms.pBuf.type = VMMDevHGCMParmType_PageList; 965 pReq->Parms.pBuf.u.PageList.size = cbToWrite; 966 pReq->Parms.pBuf.u.PageList.offset = RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) - sizeof(VBGLIOCIDCHGCMFASTCALL); 967 pReq->PgLst.flags = VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST; 968 pReq->PgLst.cPages = (uint16_t)cPages; 969 AssertReturn(cPages <= UINT16_MAX, VERR_OUT_OF_RANGE); 970 /* caller sets offset */ 971 972 int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, 973 RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cPages])); 974 if (RT_SUCCESS(vrc)) 975 vrc = pReq->Call.header.result; 976 return vrc; 977 } 978 979 842 980 /** @} */ 843 981
Note:
See TracChangeset
for help on using the changeset viewer.