Changeset 80428 in vbox for trunk/src/VBox/Devices/Graphics
- Timestamp:
- Aug 26, 2019 4:09:49 PM (5 years ago)
- Location:
- trunk/src/VBox/Devices/Graphics
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/Graphics/DevVGA.cpp
r80396 r80428 4612 4612 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase); 4613 4613 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIDISPLAYPORT, &pThis->IPort); 4614 #if defined(VBOX_WITH_HGSMI) && (defined(VBOX_WITH_VIDEOHWACCEL) )4614 #if defined(VBOX_WITH_HGSMI) && (defined(VBOX_WITH_VIDEOHWACCEL) || defined(VBOX_WITH_CRHGSMI)) 4615 4615 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIDISPLAYVBVACALLBACKS, &pThis->IVBVACallbacks); 4616 4616 #endif … … 5443 5443 vbvaTimerCb(pThis); 5444 5444 #endif 5445 5446 vboxCmdVBVATimerRefresh(pThis); 5445 5447 5446 5448 #ifdef VBOX_WITH_VMSVGA -
trunk/src/VBox/Devices/Graphics/DevVGA.h
r80396 r80428 560 560 # endif 561 561 562 #ifdef VBOX_WITH_HGSMI563 562 #define PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(_pcb) ( (PVGASTATE)((uint8_t *)(_pcb) - RT_OFFSETOF(VGASTATE, IVBVACallbacks)) ) 564 #endif 563 564 DECLCALLBACK(int) vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, 565 PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc); 566 DECLCALLBACK(int) vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, 567 PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc); 568 DECLCALLBACK(int) vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface, 569 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, 570 PFNCRCTLCOMPLETION pfnCompletion, 571 void *pvCompletion); 572 DECLCALLBACK(int) vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface, 573 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd); 565 574 566 575 int vboxVBVASaveStateExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM); … … 588 597 int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma); 589 598 # endif /* VBOX_WITH_VDMA */ 599 600 int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState); 601 int vboxCmdVBVACmdFlush(PVGASTATE pVGAState); 602 int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pCtl, uint32_t cbCtl); 603 void vboxCmdVBVATimerRefresh(PVGASTATE pVGAState); 604 bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState); 590 605 #endif /* VBOX_WITH_HGSMI */ 591 606 -
trunk/src/VBox/Devices/Graphics/DevVGA_VBVA.cpp
r80396 r80428 2061 2061 if (pView->vbva.guest.pVBVA) 2062 2062 { 2063 Assert(!vboxCmdVBVAIsEnabled(pVGAState)); 2064 2063 2065 int rc = vbvaEnable(iView, pVGAState, pCtx, pView->vbva.guest.pVBVA, pView->vbva.u32VBVAOffset, true /* fRestored */); 2064 2066 if (RT_SUCCESS(rc)) … … 2423 2425 switch (u16ChannelInfo) 2424 2426 { 2427 case VBVA_CMDVBVA_SUBMIT: 2428 rc = vboxCmdVBVACmdSubmit(pVGAState); 2429 break; 2430 2431 case VBVA_CMDVBVA_FLUSH: 2432 rc = vboxCmdVBVACmdFlush(pVGAState); 2433 break; 2434 2435 case VBVA_CMDVBVA_CTL: 2436 if (cbBuffer >= VBoxSHGSMIBufferHeaderSize() + sizeof(VBOXCMDVBVA_CTL)) 2437 { 2438 VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pCtl 2439 = (VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *)VBoxSHGSMIBufferData((VBOXSHGSMIHEADER RT_UNTRUSTED_VOLATILE_GUEST *)pvBuffer); 2440 rc = vboxCmdVBVACmdCtl(pVGAState, pCtl, cbBuffer - VBoxSHGSMIBufferHeaderSize()); 2441 } 2442 else 2443 rc = VERR_INVALID_PARAMETER; 2444 break; 2445 2425 2446 #ifdef VBOX_WITH_VDMA 2426 2447 case VBVA_VDMA_CMD: … … 2467 2488 if (cbBuffer >= sizeof(VBVAINFOVIEW)) 2468 2489 { 2490 AssertMsgBreak(!vboxCmdVBVAIsEnabled(pVGAState), ("VBVA_INFO_VIEW is not acceptible for CmdVbva\n")); 2491 2469 2492 /* Guest submits an array of VBVAINFOVIEW structures. */ 2470 2493 const VBVAINFOVIEW RT_UNTRUSTED_VOLATILE_GUEST *pView = (VBVAINFOVIEW RT_UNTRUSTED_VOLATILE_GUEST *)pvBuffer; … … 2496 2519 case VBVA_INFO_SCREEN: 2497 2520 rc = VERR_INVALID_PARAMETER; 2521 AssertMsgBreak(!vboxCmdVBVAIsEnabled(pVGAState), ("VBVA_INFO_SCREEN is not acceptible for CmdVbva\n")); 2522 2498 2523 if (cbBuffer >= sizeof(VBVAINFOSCREEN)) 2499 2524 rc = VBVAInfoScreen(pVGAState, (VBVAINFOSCREEN RT_UNTRUSTED_VOLATILE_GUEST *)pvBuffer); … … 2502 2527 case VBVA_ENABLE: 2503 2528 rc = VERR_INVALID_PARAMETER; 2529 AssertMsgBreak(!vboxCmdVBVAIsEnabled(pVGAState), ("VBVA_ENABLE is not acceptible for CmdVbva\n")); 2530 2504 2531 if (cbBuffer >= sizeof(VBVAENABLE)) 2505 2532 { -
trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp
r80396 r80428 74 74 typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext); 75 75 76 static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb); 77 78 76 79 typedef struct VBOXVDMATHREAD 77 80 { … … 172 175 173 176 177 typedef struct VBOXVDMA_SOURCE 178 { 179 VBVAINFOSCREEN Screen; 180 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap); 181 } VBOXVDMA_SOURCE; 182 183 174 184 typedef struct VBOXVDMAHOST 175 185 { 176 186 PHGSMIINSTANCE pHgsmi; /**< Same as VGASTATE::pHgsmi. */ 177 187 PVGASTATE pVGAState; 188 VBVAEXHOSTCONTEXT CmdVbva; 189 VBOXVDMATHREAD Thread; 190 VBOXCRCMD_SVRINFO CrSrvInfo; 191 VBVAEXHOSTCTL* pCurRemainingHostCtl; 192 RTSEMEVENTMULTI HostCrCtlCompleteEvent; 193 int32_t volatile i32cHostCrCtlCompleted; 194 RTCRITSECT CalloutCritSect; 195 // VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS]; 178 196 #ifdef VBOX_VDMA_WITH_WATCHDOG 179 197 PTMTIMERR3 WatchDogTimer; … … 195 213 * Internal Functions * 196 214 *********************************************************************************************************************************/ 197 198 215 static int vdmaVBVANotifyDisable(PVGASTATE pVGAState); 216 static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd); 217 static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc); 218 static int VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread); 219 static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, VBOXVDMACMD_DMA_BPB_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *pTransfer, 220 uint32_t cbBuffer); 221 static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource); 222 static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, 223 int rc, void *pvContext); 224 225 /* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other, 226 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */ 227 228 229 230 /** 231 * Creates a host control command. 232 */ 233 static VBVAEXHOSTCTL *VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType) 234 { 235 # ifndef VBOXVDBG_MEMCACHE_DISABLE 236 VBVAEXHOSTCTL *pCtl = (VBVAEXHOSTCTL *)RTMemCacheAlloc(pCmdVbva->CtlCache); 237 # else 238 VBVAEXHOSTCTL *pCtl = (VBVAEXHOSTCTL *)RTMemAlloc(sizeof(VBVAEXHOSTCTL)); 239 # endif 240 if (pCtl) 241 { 242 RT_ZERO(*pCtl); 243 pCtl->enmType = enmType; 244 } 245 else 246 WARN(("VBoxVBVAExHCtlAlloc failed\n")); 247 return pCtl; 248 } 249 250 /** 251 * Destroys a host control command. 252 */ 253 static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl) 254 { 255 # ifndef VBOXVDBG_MEMCACHE_DISABLE 256 RTMemCacheFree(pCmdVbva->CtlCache, pCtl); 257 # else 258 RTMemFree(pCtl); 259 # endif 260 } 261 262 263 264 /** 265 * Works the VBVA state. 266 */ 267 static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva) 268 { 269 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING); 270 271 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING)) 272 return VINF_SUCCESS; 273 return VERR_SEM_BUSY; 274 } 275 276 /** 277 * Worker for vboxVBVAExHPDataGetInner() and VBoxVBVAExHPCheckHostCtlOnDisable() 278 * that gets the next control command. 279 * 280 * @returns Pointer to command if found, NULL if not. 281 * @param pCmdVbva The VBVA command context. 282 * @param pfHostCtl Where to indicate whether it's a host or guest 283 * control command. 284 * @param fHostOnlyMode Whether to only fetch host commands, or both. 285 */ 286 static VBVAEXHOSTCTL *vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode) 287 { 288 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING); 289 290 if (!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls)) 291 return NULL; 292 293 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect); 294 if (RT_SUCCESS(rc)) 295 { 296 VBVAEXHOSTCTL *pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node); 297 if (pCtl) 298 *pfHostCtl = true; 299 else if (!fHostOnlyMode) 300 { 301 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED) 302 { 303 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node); 304 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null, 305 * and there are no HostCtl commands*/ 306 Assert(pCtl); 307 *pfHostCtl = false; 308 } 309 } 310 311 if (pCtl) 312 { 313 RTListNodeRemove(&pCtl->Node); 314 ASMAtomicDecU32(&pCmdVbva->u32cCtls); 315 } 316 317 RTCritSectLeave(&pCmdVbva->CltCritSect); 318 319 return pCtl; 320 } 321 else 322 WARN(("RTCritSectEnter failed %Rrc\n", rc)); 323 324 return NULL; 325 } 326 327 /** 328 * Worker for vboxVDMACrHgcmHandleEnableRemainingHostCommand(). 329 */ 330 static VBVAEXHOSTCTL *VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva) 331 { 332 bool fHostCtl = false; 333 VBVAEXHOSTCTL *pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true); 334 Assert(!pCtl || fHostCtl); 335 return pCtl; 336 } 337 338 /** 339 * Worker for vboxVBVAExHPCheckProcessCtlInternal() and 340 * vboxVDMACrGuestCtlProcess() / VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED. 341 */ 342 static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva) 343 { 344 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED) 345 { 346 WARN(("Invalid state\n")); 347 return VERR_INVALID_STATE; 348 } 349 350 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED); 351 return VINF_SUCCESS; 352 } 353 354 /** 355 * Works the VBVA state in response to VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME. 356 */ 357 static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva) 358 { 359 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED) 360 { 361 WARN(("Invalid state\n")); 362 return VERR_INVALID_STATE; 363 } 364 365 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED); 366 return VINF_SUCCESS; 367 } 368 369 /** 370 * Worker for vboxVBVAExHPDataGetInner that processes PAUSE and RESUME requests. 371 * 372 * Unclear why these cannot be handled the normal way. 373 * 374 * @returns true if handled, false if not. 375 * @param pCmdVbva The VBVA context. 376 * @param pCtl The host control command. 377 */ 378 static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl) 379 { 380 switch (pCtl->enmType) 381 { 382 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE: 383 VBoxVBVAExHPPause(pCmdVbva); 384 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS); 385 return true; 386 387 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME: 388 VBoxVBVAExHPResume(pCmdVbva); 389 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS); 390 return true; 391 392 default: 393 return false; 394 } 395 } 396 397 /** 398 * Works the VBVA state. 399 */ 400 static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva) 401 { 402 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING); 403 404 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING); 405 } 406 407 /** 408 * Works the VBVA state. 409 */ 410 static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva) 411 { 412 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING); 413 if (pCmdVbva->pVBVA) 414 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING); 415 } 416 417 /** 418 * Works the VBVA state. 419 */ 420 static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva) 421 { 422 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING); 423 if (pCmdVbva->pVBVA) 424 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING); 425 } 426 427 /** 428 * Worker for vboxVBVAExHPDataGetInner. 429 * 430 * @retval VINF_SUCCESS 431 * @retval VINF_EOF 432 * @retval VINF_TRY_AGAIN 433 * @retval VERR_INVALID_STATE 434 * 435 * @thread VDMA 436 */ 437 static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t RT_UNTRUSTED_VOLATILE_GUEST **ppbCmd, uint32_t *pcbCmd) 438 { 439 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING); 440 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED); 441 442 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA = pCmdVbva->pVBVA; /* This is shared with the guest, so careful! */ 443 444 /* 445 * Inspect records. 446 */ 447 uint32_t idxRecordFirst = ASMAtomicUoReadU32(&pVBVA->indexRecordFirst); 448 uint32_t idxRecordFree = ASMAtomicReadU32(&pVBVA->indexRecordFree); 449 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 450 Log(("first = %d, free = %d\n", idxRecordFirst, idxRecordFree)); 451 if (idxRecordFirst == idxRecordFree) 452 return VINF_EOF; /* No records to process. Return without assigning output variables. */ 453 AssertReturn(idxRecordFirst < VBVA_MAX_RECORDS, VERR_INVALID_STATE); 454 RT_UNTRUSTED_VALIDATED_FENCE(); 455 456 /* 457 * Read the record size and check that it has been completly recorded. 458 */ 459 uint32_t const cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[idxRecordFirst].cbRecord); 460 uint32_t const cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL; 461 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 462 if ( (cbRecordCurrent & VBVA_F_RECORD_PARTIAL) 463 || !cbRecord) 464 return VINF_TRY_AGAIN; /* The record is being recorded, try again. */ 465 Assert(cbRecord); 466 467 /* 468 * Get and validate the data area. 469 */ 470 uint32_t const offData = ASMAtomicReadU32(&pVBVA->off32Data); 471 uint32_t cbMaxData = ASMAtomicReadU32(&pVBVA->cbData); 472 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 473 AssertLogRelMsgStmt(cbMaxData <= pCmdVbva->cbMaxData, ("%#x vs %#x\n", cbMaxData, pCmdVbva->cbMaxData), 474 cbMaxData = pCmdVbva->cbMaxData); 475 AssertLogRelMsgReturn( cbRecord <= cbMaxData 476 && offData <= cbMaxData - cbRecord, 477 ("offData=%#x cbRecord=%#x cbMaxData=%#x cbRecord\n", offData, cbRecord, cbMaxData), 478 VERR_INVALID_STATE); 479 RT_UNTRUSTED_VALIDATED_FENCE(); 480 481 /* 482 * Just set the return values and we're done. 483 */ 484 *ppbCmd = (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)&pVBVA->au8Data[offData]; 485 *pcbCmd = cbRecord; 486 return VINF_SUCCESS; 487 } 488 489 /** 490 * Completion routine advancing our end of the ring and data buffers forward. 491 * 492 * @param pCmdVbva The VBVA context. 493 * @param cbCmd The size of the data. 494 */ 495 static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd) 496 { 497 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA = pCmdVbva->pVBVA; 498 if (pVBVA) 499 { 500 /* Move data head. */ 501 uint32_t const cbData = pVBVA->cbData; 502 uint32_t const offData = pVBVA->off32Data; 503 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 504 if (cbData > 0) 505 ASMAtomicWriteU32(&pVBVA->off32Data, (offData + cbCmd) % cbData); 506 else 507 ASMAtomicWriteU32(&pVBVA->off32Data, 0); 508 509 /* Increment record pointer. */ 510 uint32_t const idxRecFirst = pVBVA->indexRecordFirst; 511 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 512 ASMAtomicWriteU32(&pVBVA->indexRecordFirst, (idxRecFirst + 1) % RT_ELEMENTS(pVBVA->aRecords)); 513 } 514 } 515 516 /** 517 * Control command completion routine used by many. 518 */ 519 static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc) 520 { 521 if (pCtl->pfnComplete) 522 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete); 523 else 524 VBoxVBVAExHCtlFree(pCmdVbva, pCtl); 525 } 526 527 528 /** 529 * Worker for VBoxVBVAExHPDataGet. 530 * @thread VDMA 531 */ 532 static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGetInner(struct VBVAEXHOSTCONTEXT *pCmdVbva, 533 uint8_t RT_UNTRUSTED_VOLATILE_GUEST **ppbCmd, uint32_t *pcbCmd) 534 { 535 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING); 536 VBVAEXHOSTCTL *pCtl; 537 bool fHostClt; 538 539 for (;;) 540 { 541 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false); 542 if (pCtl) 543 { 544 if (fHostClt) 545 { 546 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl)) 547 { 548 *ppbCmd = (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)pCtl; /* Note! pCtl is host data, so trusted */ 549 *pcbCmd = sizeof (*pCtl); 550 return VBVAEXHOST_DATA_TYPE_HOSTCTL; 551 } 552 continue; /* Processed by vboxVBVAExHPCheckProcessCtlInternal, get next. */ 553 } 554 *ppbCmd = (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)pCtl; /* Note! pCtl is host data, so trusted */ 555 *pcbCmd = sizeof (*pCtl); 556 return VBVAEXHOST_DATA_TYPE_GUESTCTL; 557 } 558 559 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED) 560 return VBVAEXHOST_DATA_TYPE_NO_DATA; 561 562 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppbCmd, pcbCmd); 563 switch (rc) 564 { 565 case VINF_SUCCESS: 566 return VBVAEXHOST_DATA_TYPE_CMD; 567 case VINF_EOF: 568 return VBVAEXHOST_DATA_TYPE_NO_DATA; 569 case VINF_TRY_AGAIN: 570 RTThreadSleep(1); 571 continue; 572 default: 573 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */ 574 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %Rrc\n", rc)); 575 return VBVAEXHOST_DATA_TYPE_NO_DATA; 576 } 577 } 578 /* not reached */ 579 } 580 581 /** 582 * Called by vboxVDMAWorkerThread to get the next command to process. 583 * @thread VDMA 584 */ 585 static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, 586 uint8_t RT_UNTRUSTED_VOLATILE_GUEST **ppbCmd, uint32_t *pcbCmd) 587 { 588 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGetInner(pCmdVbva, ppbCmd, pcbCmd); 589 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA) 590 { 591 vboxVBVAExHPHgEventClear(pCmdVbva); 592 vboxVBVAExHPProcessorRelease(pCmdVbva); 593 594 /* 595 * We need to prevent racing between us clearing the flag and command check/submission thread, i.e. 596 * 1. we check the queue -> and it is empty 597 * 2. submitter adds command to the queue 598 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification 599 * 4. we clear the "processing" state 600 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command 601 * 6. if the queue appears to be not-empty set the "processing" state back to "true" 602 */ 603 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva); 604 if (RT_SUCCESS(rc)) 605 { 606 /* we are the processor now */ 607 enmType = vboxVBVAExHPDataGetInner(pCmdVbva, ppbCmd, pcbCmd); 608 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA) 609 { 610 vboxVBVAExHPProcessorRelease(pCmdVbva); 611 return VBVAEXHOST_DATA_TYPE_NO_DATA; 612 } 613 614 vboxVBVAExHPHgEventSet(pCmdVbva); 615 } 616 } 617 618 return enmType; 619 } 620 621 /** 622 * Checks for pending VBVA command or (internal) control command. 623 */ 624 DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva) 625 { 626 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA = pCmdVbva->pVBVA; 627 if (pVBVA) 628 { 629 uint32_t indexRecordFirst = pVBVA->indexRecordFirst; 630 uint32_t indexRecordFree = pVBVA->indexRecordFree; 631 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 632 633 if (indexRecordFirst != indexRecordFree) 634 return true; 635 } 636 637 return ASMAtomicReadU32(&pCmdVbva->u32cCtls) > 0; 638 } 639 640 /** Checks whether the new commands are ready for processing 641 * @returns 642 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread) 643 * VINF_EOF - no commands in a queue 644 * VINF_ALREADY_INITIALIZED - another thread already processing the commands 645 * VERR_INVALID_STATE - the VBVA is paused or pausing */ 646 static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva) 647 { 648 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva); 649 if (RT_SUCCESS(rc)) 650 { 651 /* we are the processor now */ 652 if (vboxVBVAExHSHasCommands(pCmdVbva)) 653 { 654 vboxVBVAExHPHgEventSet(pCmdVbva); 655 return VINF_SUCCESS; 656 } 657 658 vboxVBVAExHPProcessorRelease(pCmdVbva); 659 return VINF_EOF; 660 } 661 if (rc == VERR_SEM_BUSY) 662 return VINF_ALREADY_INITIALIZED; 663 return VERR_INVALID_STATE; 664 } 665 666 /** 667 * Worker for vboxVDMAConstruct() that initializes the give VBVA host context. 668 */ 669 static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva) 670 { 671 RT_ZERO(*pCmdVbva); 672 int rc = RTCritSectInit(&pCmdVbva->CltCritSect); 673 if (RT_SUCCESS(rc)) 674 { 675 # ifndef VBOXVDBG_MEMCACHE_DISABLE 676 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL), 677 0, /* size_t cbAlignment */ 678 UINT32_MAX, /* uint32_t cMaxObjects */ 679 NULL, /* PFNMEMCACHECTOR pfnCtor*/ 680 NULL, /* PFNMEMCACHEDTOR pfnDtor*/ 681 NULL, /* void *pvUser*/ 682 0 /* uint32_t fFlags*/ 683 ); 684 if (RT_SUCCESS(rc)) 685 # endif 686 { 687 RTListInit(&pCmdVbva->GuestCtlList); 688 RTListInit(&pCmdVbva->HostCtlList); 689 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING; 690 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED; 691 return VINF_SUCCESS; 692 } 693 # ifndef VBOXVDBG_MEMCACHE_DISABLE 694 WARN(("RTMemCacheCreate failed %Rrc\n", rc)); 695 # endif 696 } 697 else 698 WARN(("RTCritSectInit failed %Rrc\n", rc)); 699 700 return rc; 701 } 702 703 /** 704 * Checks if VBVA state is some form of enabled. 705 */ 706 DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva) 707 { 708 return ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED; 709 } 710 711 /** 712 * Checks if VBVA state is disabled. 713 */ 714 DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva) 715 { 716 return ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED; 717 } 718 719 /** 720 * Worker for vdmaVBVAEnableProcess(). 721 * 722 * @thread VDMA 723 */ 724 static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA, 725 uint8_t *pbVRam, uint32_t cbVRam) 726 { 727 if (VBoxVBVAExHSIsEnabled(pCmdVbva)) 728 { 729 WARN(("VBVAEx is enabled already\n")); 730 return VERR_INVALID_STATE; 731 } 732 733 uintptr_t offVRam = (uintptr_t)pVBVA - (uintptr_t)pbVRam; 734 AssertLogRelMsgReturn(offVRam < cbVRam - sizeof(*pVBVA), ("%#p cbVRam=%#x\n", offVRam, cbVRam), VERR_OUT_OF_RANGE); 735 RT_UNTRUSTED_VALIDATED_FENCE(); 736 737 pCmdVbva->pVBVA = pVBVA; 738 pCmdVbva->cbMaxData = cbVRam - offVRam - RT_UOFFSETOF(VBVABUFFER, au8Data); 739 pVBVA->hostFlags.u32HostEvents = 0; 740 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED); 741 return VINF_SUCCESS; 742 } 743 744 /** 745 * Works the enable state. 746 * @thread VDMA, CR, EMT, ... 747 */ 748 static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva) 749 { 750 if (VBoxVBVAExHSIsDisabled(pCmdVbva)) 751 return VINF_SUCCESS; 752 753 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED); 754 return VINF_SUCCESS; 755 } 756 757 /** 758 * Worker for vboxVDMADestruct() and vboxVDMAConstruct(). 759 */ 760 static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva) 761 { 762 /* ensure the processor is stopped */ 763 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING); 764 765 /* ensure no one tries to submit the command */ 766 if (pCmdVbva->pVBVA) 767 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0; 768 769 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList)); 770 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList)); 771 772 RTCritSectDelete(&pCmdVbva->CltCritSect); 773 774 # ifndef VBOXVDBG_MEMCACHE_DISABLE 775 RTMemCacheDestroy(pCmdVbva->CtlCache); 776 # endif 777 778 RT_ZERO(*pCmdVbva); 779 } 780 781 782 /** 783 * Worker for vboxVBVAExHSSaveStateLocked(). 784 * @thread VDMA 785 */ 786 static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM) 787 { 788 RT_NOREF(pCmdVbva); 789 int rc = SSMR3PutU32(pSSM, pCtl->enmType); 790 AssertRCReturn(rc, rc); 791 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd); 792 AssertRCReturn(rc, rc); 793 rc = SSMR3PutU32(pSSM, (uint32_t)((uintptr_t)pCtl->u.cmd.pvCmd - (uintptr_t)pu8VramBase)); 794 AssertRCReturn(rc, rc); 795 796 return VINF_SUCCESS; 797 } 798 799 /** 800 * Worker for VBoxVBVAExHSSaveState(). 801 * @thread VDMA 802 */ 803 static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM) 804 { 805 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED) 806 { 807 WARN(("vbva not paused\n")); 808 return VERR_INVALID_STATE; 809 } 810 811 int rc; 812 VBVAEXHOSTCTL* pCtl; 813 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node) 814 { 815 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM); 816 AssertRCReturn(rc, rc); 817 } 818 819 rc = SSMR3PutU32(pSSM, 0); 820 AssertRCReturn(rc, rc); 821 822 return VINF_SUCCESS; 823 } 824 825 /** 826 * Handles VBVAEXHOSTCTL_TYPE_HH_SAVESTATE for vboxVDMACrHostCtlProcess, saving 827 * state on the VDMA thread. 828 * 829 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail 830 * @thread VDMA 831 */ 832 static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM) 833 { 834 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect); 835 AssertRCReturn(rc, rc); 836 837 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM); 838 if (RT_FAILURE(rc)) 839 WARN(("vboxVBVAExHSSaveStateLocked failed %Rrc\n", rc)); 840 841 RTCritSectLeave(&pCmdVbva->CltCritSect); 842 return rc; 843 } 844 845 846 /** 847 * Worker for vboxVBVAExHSLoadStateLocked. 848 * @retval VINF_EOF if end stuff to load. 849 * @thread VDMA 850 */ 851 static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version) 852 { 853 RT_NOREF(u32Version); 854 uint32_t u32; 855 int rc = SSMR3GetU32(pSSM, &u32); 856 AssertLogRelRCReturn(rc, rc); 857 858 if (!u32) 859 return VINF_EOF; 860 861 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32); 862 if (!pHCtl) 863 { 864 WARN(("VBoxVBVAExHCtlCreate failed\n")); 865 return VERR_NO_MEMORY; 866 } 867 868 rc = SSMR3GetU32(pSSM, &u32); 869 AssertLogRelRCReturn(rc, rc); 870 pHCtl->u.cmd.cbCmd = u32; 871 872 rc = SSMR3GetU32(pSSM, &u32); 873 AssertLogRelRCReturn(rc, rc); 874 pHCtl->u.cmd.pvCmd = pu8VramBase + u32; 875 876 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node); 877 ++pCmdVbva->u32cCtls; 878 879 return VINF_SUCCESS; 880 } 881 882 /** 883 * Worker for VBoxVBVAExHSLoadState. 884 * @thread VDMA 885 */ 886 static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version) 887 { 888 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED) 889 { 890 WARN(("vbva not stopped\n")); 891 return VERR_INVALID_STATE; 892 } 893 894 int rc; 895 do 896 { 897 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version); 898 AssertLogRelRCReturn(rc, rc); 899 } while (rc != VINF_EOF); 900 901 return VINF_SUCCESS; 902 } 903 904 /** 905 * Handles VBVAEXHOSTCTL_TYPE_HH_LOADSTATE for vboxVDMACrHostCtlProcess(), 906 * loading state on the VDMA thread. 907 * 908 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail 909 * @thread VDMA 910 */ 911 static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version) 912 { 913 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version); 914 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect); 915 AssertRCReturn(rc, rc); 916 917 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version); 918 if (RT_FAILURE(rc)) 919 WARN(("vboxVBVAExHSSaveStateLocked failed %Rrc\n", rc)); 920 921 RTCritSectLeave(&pCmdVbva->CltCritSect); 922 return rc; 923 } 924 925 926 927 /** 928 * Queues a control command to the VDMA worker thread. 929 * 930 * The @a enmSource argument decides which list (guest/host) it's queued on. 931 * 932 */ 933 static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource, 934 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete) 935 { 936 int rc; 937 if (VBoxVBVAExHSIsEnabled(pCmdVbva)) 938 { 939 pCtl->pfnComplete = pfnComplete; 940 pCtl->pvComplete = pvComplete; 941 942 rc = RTCritSectEnter(&pCmdVbva->CltCritSect); 943 if (RT_SUCCESS(rc)) 944 { 945 /* Recheck that we're enabled after we've got the lock. */ 946 if (VBoxVBVAExHSIsEnabled(pCmdVbva)) 947 { 948 /* Queue it. */ 949 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST) 950 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node); 951 else 952 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node); 953 ASMAtomicIncU32(&pCmdVbva->u32cCtls); 954 955 RTCritSectLeave(&pCmdVbva->CltCritSect); 956 957 /* Work the state or something. */ 958 rc = VBoxVBVAExHSCheckCommands(pCmdVbva); 959 } 960 else 961 { 962 RTCritSectLeave(&pCmdVbva->CltCritSect); 963 Log(("cmd vbva not enabled (race)\n")); 964 rc = VERR_INVALID_STATE; 965 } 966 } 967 else 968 AssertRC(rc); 969 } 970 else 971 { 972 Log(("cmd vbva not enabled\n")); 973 rc = VERR_INVALID_STATE; 974 } 975 return rc; 976 } 977 978 /** 979 * Submits the control command and notifies the VDMA thread. 980 */ 981 static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource, 982 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete) 983 { 984 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete); 985 if (RT_SUCCESS(rc)) 986 { 987 if (rc == VINF_SUCCESS) 988 return VBoxVDMAThreadEventNotify(&pVdma->Thread); 989 Assert(rc == VINF_ALREADY_INITIALIZED); 990 } 991 else 992 Log(("VBoxVBVAExHCtlSubmit failed %Rrc\n", rc)); 993 994 return rc; 995 } 996 997 998 /** 999 * Call VDMA thread creation notification callback. 1000 */ 1001 void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext) 1002 { 1003 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING); 1004 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged; 1005 void *pvChanged = pThread->pvChanged; 1006 1007 pThread->pfnChanged = NULL; 1008 pThread->pvChanged = NULL; 1009 1010 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED); 1011 1012 if (pfnChanged) 1013 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged); 1014 } 1015 1016 /** 1017 * Call VDMA thread termination notification callback. 1018 */ 1019 void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext) 1020 { 1021 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING); 1022 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged; 1023 void *pvChanged = pThread->pvChanged; 1024 1025 pThread->pfnChanged = NULL; 1026 pThread->pvChanged = NULL; 1027 1028 if (pfnChanged) 1029 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged); 1030 } 1031 1032 /** 1033 * Check if VDMA thread is terminating. 1034 */ 1035 DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread) 1036 { 1037 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING; 1038 } 1039 1040 /** 1041 * Init VDMA thread. 1042 */ 1043 void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread) 1044 { 1045 RT_ZERO(*pThread); 1046 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED; 1047 } 1048 1049 /** 1050 * Clean up VDMA thread. 1051 */ 1052 int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread) 1053 { 1054 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State); 1055 switch (u32State) 1056 { 1057 case VBOXVDMATHREAD_STATE_TERMINATED: 1058 return VINF_SUCCESS; 1059 1060 case VBOXVDMATHREAD_STATE_TERMINATING: 1061 { 1062 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL); 1063 if (RT_SUCCESS(rc)) 1064 { 1065 RTSemEventDestroy(pThread->hEvent); 1066 pThread->hEvent = NIL_RTSEMEVENT; 1067 pThread->hWorkerThread = NIL_RTTHREAD; 1068 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED); 1069 } 1070 else 1071 WARN(("RTThreadWait failed %Rrc\n", rc)); 1072 return rc; 1073 } 1074 1075 default: 1076 WARN(("invalid state")); 1077 return VERR_INVALID_STATE; 1078 } 1079 } 1080 1081 /** 1082 * Start VDMA thread. 1083 */ 1084 int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread, 1085 PFNVBOXVDMATHREAD_CHANGED pfnCreated, void *pvCreated) 1086 { 1087 int rc = VBoxVDMAThreadCleanup(pThread); 1088 if (RT_SUCCESS(rc)) 1089 { 1090 rc = RTSemEventCreate(&pThread->hEvent); 1091 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING; 1092 pThread->pfnChanged = pfnCreated; 1093 pThread->pvChanged = pvCreated; 1094 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA"); 1095 if (RT_SUCCESS(rc)) 1096 return VINF_SUCCESS; 1097 1098 WARN(("RTThreadCreate failed %Rrc\n", rc)); 1099 RTSemEventDestroy(pThread->hEvent); 1100 pThread->hEvent = NIL_RTSEMEVENT; 1101 pThread->hWorkerThread = NIL_RTTHREAD; 1102 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED; 1103 } 1104 else 1105 WARN(("VBoxVDMAThreadCleanup failed %Rrc\n", rc)); 1106 return rc; 1107 } 1108 1109 /** 1110 * Notifies the VDMA thread. 1111 * @thread !VDMA 1112 */ 1113 static int VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread) 1114 { 1115 int rc = RTSemEventSignal(pThread->hEvent); 1116 AssertRC(rc); 1117 return rc; 1118 } 1119 1120 /** 1121 * State worker for VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD & 1122 * VBVAEXHOSTCTL_TYPE_GHH_DISABLE in vboxVDMACrHostCtlProcess(), and 1123 * VBVAEXHOSTCTL_TYPE_GHH_DISABLE in vboxVDMACrGuestCtlProcess(). 1124 * 1125 * @thread VDMA 1126 */ 1127 static int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void *pvTerminated, bool fNotify) 1128 { 1129 for (;;) 1130 { 1131 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State); 1132 switch (u32State) 1133 { 1134 case VBOXVDMATHREAD_STATE_CREATED: 1135 pThread->pfnChanged = pfnTerminated; 1136 pThread->pvChanged = pvTerminated; 1137 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING); 1138 if (fNotify) 1139 { 1140 int rc = VBoxVDMAThreadEventNotify(pThread); 1141 AssertRC(rc); 1142 } 1143 return VINF_SUCCESS; 1144 1145 case VBOXVDMATHREAD_STATE_TERMINATING: 1146 case VBOXVDMATHREAD_STATE_TERMINATED: 1147 WARN(("thread is marked to termination or terminated\nn")); 1148 return VERR_INVALID_STATE; 1149 1150 case VBOXVDMATHREAD_STATE_CREATING: 1151 /* wait till the thread creation is completed */ 1152 WARN(("concurrent thread create/destron\n")); 1153 RTThreadYield(); 1154 continue; 1155 1156 default: 1157 WARN(("invalid state")); 1158 return VERR_INVALID_STATE; 1159 } 1160 } 1161 } 1162 1163 1164 1165 /* 1166 * 1167 * 1168 * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync 1169 * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync 1170 * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync 1171 * 1172 * 1173 */ 1174 1175 /** Completion callback for vboxVDMACrCtlPostAsync(). */ 1176 typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext); 1177 /** Pointer to a vboxVDMACrCtlPostAsync completion callback. */ 1178 typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK; 1179 1180 /** 1181 * Private wrapper around VBOXVDMACMD_CHROMIUM_CTL. 1182 */ 1183 typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE 1184 { 1185 uint32_t uMagic; /**< VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC */ 1186 uint32_t cRefs; 1187 int32_t volatile rc; 1188 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion; 1189 void *pvCompletion; 1190 RTSEMEVENT hEvtDone; 1191 VBOXVDMACMD_CHROMIUM_CTL Cmd; 1192 } VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE; 1193 /** Magic number for VBOXVDMACMD_CHROMIUM_CTL_PRIVATE (Michael Wolff). */ 1194 # define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC UINT32_C(0x19530827) 1195 1196 /** Converts from a VBOXVDMACMD_CHROMIUM_CTL::Cmd pointer to a pointer to the 1197 * containing structure. */ 1198 # define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) RT_FROM_MEMBER(pCmd, VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd) 1199 1200 /** 1201 * Creates a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance. 1202 */ 1203 static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd) 1204 { 1205 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr; 1206 pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)); 1207 if (pHdr) 1208 { 1209 pHdr->uMagic = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC; 1210 pHdr->cRefs = 1; 1211 pHdr->rc = VERR_NOT_IMPLEMENTED; 1212 pHdr->hEvtDone = NIL_RTSEMEVENT; 1213 pHdr->Cmd.enmType = enmCmd; 1214 pHdr->Cmd.cbCmd = cbCmd; 1215 return &pHdr->Cmd; 1216 } 1217 return NULL; 1218 } 1219 1220 /** 1221 * Releases a reference to a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance. 1222 */ 1223 DECLINLINE(void) vboxVDMACrCtlRelease(PVBOXVDMACMD_CHROMIUM_CTL pCmd) 1224 { 1225 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd); 1226 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC); 1227 1228 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs); 1229 if (!cRefs) 1230 { 1231 pHdr->uMagic = ~VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC; 1232 if (pHdr->hEvtDone != NIL_RTSEMEVENT) 1233 { 1234 RTSemEventDestroy(pHdr->hEvtDone); 1235 pHdr->hEvtDone = NIL_RTSEMEVENT; 1236 } 1237 RTMemFree(pHdr); 1238 } 1239 } 1240 1241 /** 1242 * Releases a reference to a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance. 1243 */ 1244 DECLINLINE(void) vboxVDMACrCtlRetain(PVBOXVDMACMD_CHROMIUM_CTL pCmd) 1245 { 1246 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd); 1247 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC); 1248 1249 uint32_t cRefs = ASMAtomicIncU32(&pHdr->cRefs); 1250 Assert(cRefs > 1); 1251 Assert(cRefs < _1K); 1252 RT_NOREF_PV(cRefs); 1253 } 1254 1255 /** 1256 * Gets the result from our private chromium control command. 1257 * 1258 * @returns status code. 1259 * @param pCmd The command. 1260 */ 1261 DECLINLINE(int) vboxVDMACrCtlGetRc(PVBOXVDMACMD_CHROMIUM_CTL pCmd) 1262 { 1263 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd); 1264 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC); 1265 return pHdr->rc; 1266 } 1267 1268 /** 1269 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrHgsmiControlCompleteAsync, 1270 * Some indirect completion magic, you gotta love this code! } 1271 */ 1272 DECLCALLBACK(int) vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc) 1273 { 1274 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface); 1275 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd); 1276 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC); 1277 1278 pHdr->rc = rc; 1279 if (pHdr->pfnCompletion) 1280 pHdr->pfnCompletion(pVGAState, pCmd, pHdr->pvCompletion); 1281 return VINF_SUCCESS; 1282 } 1283 1284 /** 1285 * @callback_method_impl{FNCRCTLCOMPLETION, 1286 * Completion callback for vboxVDMACrCtlPost. } 1287 */ 1288 static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void *pvContext) 1289 { 1290 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)pvContext; 1291 Assert(pHdr == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd)); 1292 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC); 1293 RT_NOREF(pVGAState, pCmd); 1294 1295 int rc = RTSemEventSignal(pHdr->hEvtDone); 1296 AssertRC(rc); 1297 1298 vboxVDMACrCtlRelease(&pHdr->Cmd); 1299 } 1300 1301 /** 1302 * Worker for vboxVDMACrCtlPost(). 1303 */ 1304 static int vboxVDMACrCtlPostAsync(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, 1305 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion) 1306 { 1307 if ( pVGAState->pDrv 1308 && pVGAState->pDrv->pfnCrHgsmiControlProcess) 1309 { 1310 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd); 1311 pHdr->pfnCompletion = pfnCompletion; 1312 pHdr->pvCompletion = pvCompletion; 1313 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd); 1314 return VINF_SUCCESS; 1315 } 1316 return VERR_NOT_SUPPORTED; 1317 } 1318 1319 /** 1320 * Posts stuff and waits. 1321 */ 1322 static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd) 1323 { 1324 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd); 1325 1326 /* Allocate the semaphore. */ 1327 Assert(pHdr->hEvtDone == NIL_RTSEMEVENT); 1328 int rc = RTSemEventCreate(&pHdr->hEvtDone); 1329 AssertRCReturn(rc, rc); 1330 1331 /* Grab a reference for the completion routine. */ 1332 vboxVDMACrCtlRetain(&pHdr->Cmd); 1333 1334 /* Submit and wait for it. */ 1335 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, pHdr); 1336 if (RT_SUCCESS(rc)) 1337 rc = RTSemEventWaitNoResume(pHdr->hEvtDone, RT_INDEFINITE_WAIT); 1338 else 1339 { 1340 if (rc != VERR_NOT_SUPPORTED) 1341 AssertRC(rc); 1342 vboxVDMACrCtlRelease(pCmd); 1343 } 1344 return rc; 1345 } 1346 1347 1348 /** 1349 * Structure for passing data between vboxVDMACrHgcmSubmitSync() and the 1350 * completion routine vboxVDMACrHgcmSubmitSyncCompletion(). 1351 */ 1352 typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION 1353 { 1354 int volatile rc; 1355 RTSEMEVENT hEvent; 1356 } VDMA_VBVA_CTL_CYNC_COMPLETION; 1357 1358 /** 1359 * @callback_method_impl{FNCRCTLCOMPLETION, 1360 * Completion callback for vboxVDMACrHgcmSubmitSync() that signals the 1361 * waiting thread.} 1362 */ 1363 static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd, int rc, void *pvCompletion) 1364 { 1365 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion; 1366 pData->rc = rc; 1367 rc = RTSemEventSignal(pData->hEvent); 1368 AssertLogRelRC(rc); 1369 1370 RT_NOREF(pCmd, cbCmd); 1371 } 1372 1373 /** 1374 * Worker for vboxVDMACrHgcmHandleEnable() and vdmaVBVAEnableProcess() that 1375 * works pVGAState->pDrv->pfnCrHgcmCtlSubmit. 1376 * 1377 * @thread VDMA 1378 */ 1379 static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl) 1380 { 1381 VDMA_VBVA_CTL_CYNC_COMPLETION Data; 1382 Data.rc = VERR_NOT_IMPLEMENTED; 1383 int rc = RTSemEventCreate(&Data.hEvent); 1384 if (!RT_SUCCESS(rc)) 1385 { 1386 WARN(("RTSemEventCreate failed %Rrc\n", rc)); 1387 return rc; 1388 } 1389 1390 pCtl->CalloutList.List.pNext = NULL; 1391 1392 PVGASTATE pVGAState = pVdma->pVGAState; 1393 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data); 1394 if (RT_SUCCESS(rc)) 1395 { 1396 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT); 1397 if (RT_SUCCESS(rc)) 1398 { 1399 rc = Data.rc; 1400 if (!RT_SUCCESS(rc)) 1401 { 1402 WARN(("pfnCrHgcmCtlSubmit command failed %Rrc\n", rc)); 1403 } 1404 1405 } 1406 else 1407 WARN(("RTSemEventWait failed %Rrc\n", rc)); 1408 } 1409 else 1410 WARN(("pfnCrHgcmCtlSubmit failed %Rrc\n", rc)); 1411 1412 1413 RTSemEventDestroy(Data.hEvent); 1414 1415 return rc; 1416 } 1417 1418 1419 /** 1420 * Worker for vboxVDMAReset(). 1421 */ 1422 static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma) 1423 { 1424 VBVAEXHOSTCTL HCtl; 1425 RT_ZERO(HCtl); 1426 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE; 1427 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST); 1428 if (RT_SUCCESS(rc)) 1429 vgaUpdateDisplayAll(pVdma->pVGAState, /* fFailOnResize = */ false); 1430 else 1431 Log(("vdmaVBVACtlSubmitSync failed %Rrc\n", rc)); 1432 return rc; 1433 } 1434 1435 1436 /** 1437 * @interface_method_impl{VBOXCRCMDCTL_HGCMENABLE_DATA,pfnRHCmd, 1438 * Used by vboxVDMACrHgcmNotifyTerminatingCb() and called by 1439 * crVBoxServerCrCmdDisablePostProcess() during crServerTearDown() to drain 1440 * command queues or something.} 1441 */ 1442 static DECLCALLBACK(uint8_t *) 1443 vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc) 1444 { 1445 struct VBOXVDMAHOST *pVdma = hClient; 1446 1447 if (!pVdma->pCurRemainingHostCtl) 1448 VBoxVBVAExHSDisable(&pVdma->CmdVbva); /* disable VBVA, all subsequent host commands will go HGCM way */ 1449 else 1450 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc); 1451 1452 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva); 1453 if (pVdma->pCurRemainingHostCtl) 1454 { 1455 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd; 1456 return (uint8_t *)pVdma->pCurRemainingHostCtl->u.cmd.pvCmd; 1457 } 1458 1459 *pcbCtl = 0; 1460 return NULL; 1461 } 1462 1463 /** 1464 * @interface_method_impl{VBOXCRCMDCTL_HGCMDISABLE_DATA,pfnNotifyTermDone, 1465 * Called by crServerTearDown().} 1466 */ 1467 static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient) 1468 { 1469 # ifdef VBOX_STRICT 1470 struct VBOXVDMAHOST *pVdma = hClient; 1471 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING); 1472 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING); 1473 # else 1474 RT_NOREF(hClient); 1475 # endif 1476 } 1477 1478 /** 1479 * @interface_method_impl{VBOXCRCMDCTL_HGCMDISABLE_DATA,pfnNotifyTerm, 1480 * Called by crServerTearDown().} 1481 */ 1482 static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient, 1483 VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData) 1484 { 1485 struct VBOXVDMAHOST *pVdma = hClient; 1486 1487 VBVAEXHOSTCTL HCtl; 1488 RT_ZERO(HCtl); 1489 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD; 1490 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST); 1491 1492 pHgcmEnableData->hRHCmd = pVdma; 1493 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand; 1494 1495 if (rc == VERR_INVALID_STATE) 1496 rc = VINF_SUCCESS; 1497 else if (RT_FAILURE(rc)) 1498 WARN(("vdmaVBVACtlSubmitSync failed %Rrc\n", rc)); 1499 1500 return rc; 1501 } 1502 1503 /** 1504 * Worker for vdmaVBVAEnableProcess() and vdmaVBVADisableProcess(). 1505 * 1506 * @thread VDMA 1507 */ 1508 static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma) 1509 { 1510 VBOXCRCMDCTL_ENABLE Enable; 1511 RT_ZERO(Enable); 1512 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE; 1513 Enable.Data.hRHCmd = pVdma; 1514 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand; 1515 1516 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable)); 1517 Assert(!pVdma->pCurRemainingHostCtl); 1518 if (RT_SUCCESS(rc)) 1519 { 1520 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva)); 1521 return VINF_SUCCESS; 1522 } 1523 1524 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva)); 1525 WARN(("vboxVDMACrHgcmSubmitSync failed %Rrc\n", rc)); 1526 return rc; 1527 } 1528 1529 /** 1530 * Handles VBVAEXHOSTCTL_TYPE_GHH_ENABLE and VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED 1531 * for vboxVDMACrGuestCtlProcess(). 1532 * 1533 * @thread VDMA 1534 */ 1535 static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset) 1536 { 1537 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva)) 1538 { 1539 WARN(("vdma VBVA is already enabled\n")); 1540 return VERR_INVALID_STATE; 1541 } 1542 1543 VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA 1544 = (VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset); 1545 if (!pVBVA) 1546 { 1547 WARN(("invalid offset %d (%#x)\n", u32Offset, u32Offset)); 1548 return VERR_INVALID_PARAMETER; 1549 } 1550 1551 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA, pVdma->pVGAState->vram_ptrR3, pVdma->pVGAState->vram_size); 1552 if (RT_SUCCESS(rc)) 1553 { 1554 if (!pVdma->CrSrvInfo.pfnEnable) 1555 { 1556 /* "HGCM-less" mode. All inited. */ 1557 return VINF_SUCCESS; 1558 } 1559 1560 VBOXCRCMDCTL_DISABLE Disable; 1561 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE; 1562 Disable.Data.hNotifyTerm = pVdma; 1563 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb; 1564 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb; 1565 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable)); 1566 if (RT_SUCCESS(rc)) 1567 { 1568 PVGASTATE pVGAState = pVdma->pVGAState; 1569 VBOXCRCMD_SVRENABLE_INFO Info; 1570 Info.hCltScr = pVGAState->pDrv; 1571 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin; 1572 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess; 1573 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd; 1574 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info); 1575 if (RT_SUCCESS(rc)) 1576 return VINF_SUCCESS; 1577 1578 WARN(("pfnEnable failed %Rrc\n", rc)); 1579 vboxVDMACrHgcmHandleEnable(pVdma); 1580 } 1581 else 1582 WARN(("vboxVDMACrHgcmSubmitSync failed %Rrc\n", rc)); 1583 1584 VBoxVBVAExHSDisable(&pVdma->CmdVbva); 1585 } 1586 else 1587 WARN(("VBoxVBVAExHSEnable failed %Rrc\n", rc)); 1588 1589 return rc; 1590 } 1591 1592 /** 1593 * Worker for several vboxVDMACrHostCtlProcess() commands. 1594 * 1595 * @returns IPRT status code. 1596 * @param pVdma The VDMA channel. 1597 * @param fDoHgcmEnable ??? 1598 * @thread VDMA 1599 */ 1600 static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable) 1601 { 1602 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva)) 1603 { 1604 Log(("vdma VBVA is already disabled\n")); 1605 return VINF_SUCCESS; 1606 } 1607 1608 if (!pVdma->CrSrvInfo.pfnDisable) 1609 { 1610 /* "HGCM-less" mode. Just undo what vdmaVBVAEnableProcess did. */ 1611 VBoxVBVAExHSDisable(&pVdma->CmdVbva); 1612 return VINF_SUCCESS; 1613 } 1614 1615 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr); 1616 if (RT_SUCCESS(rc)) 1617 { 1618 if (fDoHgcmEnable) 1619 { 1620 PVGASTATE pVGAState = pVdma->pVGAState; 1621 1622 /* disable is a bit tricky 1623 * we need to ensure the host ctl commands do not come out of order 1624 * and do not come over HGCM channel until after it is enabled */ 1625 rc = vboxVDMACrHgcmHandleEnable(pVdma); 1626 if (RT_SUCCESS(rc)) 1627 { 1628 vdmaVBVANotifyDisable(pVGAState); 1629 return VINF_SUCCESS; 1630 } 1631 1632 VBOXCRCMD_SVRENABLE_INFO Info; 1633 Info.hCltScr = pVGAState->pDrv; 1634 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin; 1635 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess; 1636 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd; 1637 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info); /** @todo ignoring return code */ 1638 } 1639 } 1640 else 1641 WARN(("pfnDisable failed %Rrc\n", rc)); 1642 1643 return rc; 1644 } 1645 1646 /** 1647 * Handles VBVAEXHOST_DATA_TYPE_HOSTCTL for vboxVDMAWorkerThread. 1648 * 1649 * @returns VBox status code. 1650 * @param pVdma The VDMA channel. 1651 * @param pCmd The control command to process. Should be 1652 * safe, i.e. not shared with guest. 1653 * @param pfContinue Where to return whether to continue or not. 1654 * @thread VDMA 1655 */ 1656 static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue) 1657 { 1658 *pfContinue = true; 1659 1660 int rc; 1661 switch (pCmd->enmType) 1662 { 1663 /* 1664 * See vdmaVBVACtlOpaqueHostSubmit() and its callers. 1665 */ 1666 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE: 1667 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva)) 1668 { 1669 if (pVdma->CrSrvInfo.pfnHostCtl) 1670 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, (uint8_t *)pCmd->u.cmd.pvCmd, pCmd->u.cmd.cbCmd); 1671 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n")); 1672 } 1673 else 1674 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for HGCM-less mode\n")); 1675 return VERR_INVALID_STATE; 1676 1677 /* 1678 * See vdmaVBVACtlDisableSync(). 1679 */ 1680 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE: 1681 rc = vdmaVBVADisableProcess(pVdma, true /* fDoHgcmEnable */); 1682 if (RT_SUCCESS(rc)) 1683 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false /* fNotify */ ); 1684 else 1685 WARN(("vdmaVBVADisableProcess failed %Rrc\n", rc)); 1686 return rc; 1687 1688 /* 1689 * See vboxVDMACrHgcmNotifyTerminatingCb(). 1690 */ 1691 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD: 1692 rc = vdmaVBVADisableProcess(pVdma, false /* fDoHgcmEnable */); 1693 if (RT_SUCCESS(rc)) 1694 { 1695 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true /* fNotify */); 1696 if (RT_SUCCESS(rc)) 1697 *pfContinue = false; 1698 else 1699 WARN(("VBoxVDMAThreadTerm failed %Rrc\n", rc)); 1700 } 1701 else 1702 WARN(("vdmaVBVADisableProcess failed %Rrc\n", rc)); 1703 return rc; 1704 1705 /* 1706 * See vboxVDMASaveStateExecPerform(). 1707 */ 1708 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE: 1709 rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pVdma->pVGAState->vram_ptrR3, pCmd->u.state.pSSM); 1710 if (RT_SUCCESS(rc)) 1711 { 1712 VGA_SAVED_STATE_PUT_MARKER(pCmd->u.state.pSSM, 4); 1713 if (pVdma->CrSrvInfo.pfnSaveState) 1714 rc = pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM); 1715 } 1716 else 1717 WARN(("VBoxVBVAExHSSaveState failed %Rrc\n", rc)); 1718 return rc; 1719 1720 /* 1721 * See vboxVDMASaveLoadExecPerform(). 1722 */ 1723 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE: 1724 rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pVdma->pVGAState->vram_ptrR3, pCmd->u.state.pSSM, pCmd->u.state.u32Version); 1725 if (RT_SUCCESS(rc)) 1726 { 1727 VGA_SAVED_STATE_GET_MARKER_RETURN_ON_MISMATCH(pCmd->u.state.pSSM, pCmd->u.state.u32Version, 4); 1728 if (pVdma->CrSrvInfo.pfnLoadState) 1729 { 1730 rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version); 1731 if (RT_FAILURE(rc)) 1732 WARN(("pfnLoadState failed %Rrc\n", rc)); 1733 } 1734 } 1735 else 1736 WARN(("VBoxVBVAExHSLoadState failed %Rrc\n", rc)); 1737 return rc; 1738 1739 /* 1740 * See vboxVDMASaveLoadDone(). 1741 */ 1742 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE: 1743 { 1744 PVGASTATE pVGAState = pVdma->pVGAState; 1745 for (uint32_t i = 0; i < pVGAState->cMonitors; ++i) 1746 { 1747 VBVAINFOSCREEN CurScreen; 1748 VBVAINFOVIEW CurView; 1749 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen); 1750 AssertLogRelMsgRCReturn(rc, ("VBVAGetInfoViewAndScreen [screen #%u] -> %#x\n", i, rc), rc); 1751 1752 rc = VBVAInfoScreen(pVGAState, &CurScreen); 1753 AssertLogRelMsgRCReturn(rc, ("VBVAInfoScreen [screen #%u] -> %#x\n", i, rc), rc); 1754 } 1755 1756 return VINF_SUCCESS; 1757 } 1758 1759 default: 1760 WARN(("unexpected host ctl type %d\n", pCmd->enmType)); 1761 return VERR_INVALID_PARAMETER; 1762 } 1763 } 1764 1765 /** 1766 * Worker for vboxVDMACrGuestCtlResizeEntryProcess. 1767 * 1768 * @returns VINF_SUCCESS or VERR_INVALID_PARAMETER. 1769 * @param pVGAState The VGA device state. 1770 * @param pScreen The screen info (safe copy). 1771 */ 1772 static int vboxVDMASetupScreenInfo(PVGASTATE pVGAState, VBVAINFOSCREEN *pScreen) 1773 { 1774 const uint32_t idxView = pScreen->u32ViewIndex; 1775 const uint16_t fFlags = pScreen->u16Flags; 1776 1777 if (fFlags & VBVA_SCREEN_F_DISABLED) 1778 { 1779 if ( idxView < pVGAState->cMonitors 1780 || idxView == UINT32_C(0xFFFFFFFF)) 1781 { 1782 RT_UNTRUSTED_VALIDATED_FENCE(); 1783 1784 RT_ZERO(*pScreen); 1785 pScreen->u32ViewIndex = idxView; 1786 pScreen->u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED; 1787 return VINF_SUCCESS; 1788 } 1789 } 1790 else 1791 { 1792 if (fFlags & VBVA_SCREEN_F_BLANK2) 1793 { 1794 if ( idxView >= pVGAState->cMonitors 1795 && idxView != UINT32_C(0xFFFFFFFF)) 1796 return VERR_INVALID_PARAMETER; 1797 RT_UNTRUSTED_VALIDATED_FENCE(); 1798 1799 /* Special case for blanking using current video mode. 1800 * Only 'u16Flags' and 'u32ViewIndex' field are relevant. 1801 */ 1802 RT_ZERO(*pScreen); 1803 pScreen->u32ViewIndex = idxView; 1804 pScreen->u16Flags = fFlags; 1805 return VINF_SUCCESS; 1806 } 1807 1808 if ( idxView < pVGAState->cMonitors 1809 && pScreen->u16BitsPerPixel <= 32 1810 && pScreen->u32Width <= UINT16_MAX 1811 && pScreen->u32Height <= UINT16_MAX 1812 && pScreen->u32LineSize <= UINT16_MAX * 4) 1813 { 1814 const uint32_t u32BytesPerPixel = (pScreen->u16BitsPerPixel + 7) / 8; 1815 if (pScreen->u32Width <= pScreen->u32LineSize / (u32BytesPerPixel? u32BytesPerPixel: 1)) 1816 { 1817 const uint64_t u64ScreenSize = (uint64_t)pScreen->u32LineSize * pScreen->u32Height; 1818 if ( pScreen->u32StartOffset <= pVGAState->vram_size 1819 && u64ScreenSize <= pVGAState->vram_size 1820 && pScreen->u32StartOffset <= pVGAState->vram_size - (uint32_t)u64ScreenSize) 1821 return VINF_SUCCESS; 1822 } 1823 } 1824 } 1825 1826 LogFunc(("Failed\n")); 1827 return VERR_INVALID_PARAMETER; 1828 } 1829 1830 /** 1831 * Handles on entry in a VBVAEXHOSTCTL_TYPE_GHH_RESIZE command. 1832 * 1833 * @returns IPRT status code. 1834 * @param pVdma The VDMA channel 1835 * @param pEntry The entry to handle. Considered volatile. 1836 * 1837 * @thread VDMA 1838 */ 1839 static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma, 1840 VBOXCMDVBVA_RESIZE_ENTRY RT_UNTRUSTED_VOLATILE_GUEST *pEntry) 1841 { 1842 PVGASTATE pVGAState = pVdma->pVGAState; 1843 1844 VBVAINFOSCREEN Screen; 1845 RT_COPY_VOLATILE(Screen, pEntry->Screen); 1846 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 1847 1848 /* Verify and cleanup local copy of the input data. */ 1849 int rc = vboxVDMASetupScreenInfo(pVGAState, &Screen); 1850 if (RT_FAILURE(rc)) 1851 { 1852 WARN(("invalid screen data\n")); 1853 return rc; 1854 } 1855 RT_UNTRUSTED_VALIDATED_FENCE(); 1856 1857 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap); 1858 RT_BCOPY_VOLATILE(aTargetMap, pEntry->aTargetMap, sizeof(aTargetMap)); 1859 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 1860 1861 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS); 1862 1863 if (pVdma->CrSrvInfo.pfnResize) 1864 { 1865 /* Also inform the HGCM service, if it is there. */ 1866 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap); 1867 if (RT_FAILURE(rc)) 1868 { 1869 WARN(("pfnResize failed %Rrc\n", rc)); 1870 return rc; 1871 } 1872 } 1873 1874 /* A fake view which contains the current screen for the 2D VBVAInfoView. */ 1875 VBVAINFOVIEW View; 1876 View.u32ViewOffset = 0; 1877 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset; 1878 View.u32MaxScreenSize = Screen.u32LineSize * Screen.u32Height; 1879 1880 const bool fDisable = RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_DISABLED); 1881 1882 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors); 1883 i >= 0; 1884 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i)) 1885 { 1886 Screen.u32ViewIndex = i; 1887 1888 VBVAINFOSCREEN CurScreen; 1889 VBVAINFOVIEW CurView; 1890 1891 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen); 1892 AssertRC(rc); 1893 1894 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen))) 1895 continue; 1896 1897 /* The view does not change if _BLANK2 is set. */ 1898 if ( (!fDisable || !CurView.u32ViewSize) 1899 && !RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_BLANK2)) 1900 { 1901 View.u32ViewIndex = Screen.u32ViewIndex; 1902 1903 rc = VBVAInfoView(pVGAState, &View); 1904 if (RT_FAILURE(rc)) 1905 { 1906 WARN(("VBVAInfoView failed %Rrc\n", rc)); 1907 break; 1908 } 1909 } 1910 1911 rc = VBVAInfoScreen(pVGAState, &Screen); 1912 if (RT_FAILURE(rc)) 1913 { 1914 WARN(("VBVAInfoScreen failed %Rrc\n", rc)); 1915 break; 1916 } 1917 } 1918 1919 return rc; 1920 } 1921 1922 1923 /** 1924 * Processes VBVAEXHOST_DATA_TYPE_GUESTCTL for vboxVDMAWorkerThread and 1925 * vdmaVBVACtlThreadCreatedEnable. 1926 * 1927 * @returns VBox status code. 1928 * @param pVdma The VDMA channel. 1929 * @param pCmd The command to process. Maybe safe (not shared 1930 * with guest). 1931 * 1932 * @thread VDMA 1933 */ 1934 static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd) 1935 { 1936 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType; 1937 switch (enmType) 1938 { 1939 /* 1940 * See handling of VBOXCMDVBVACTL_TYPE_3DCTL in vboxCmdVBVACmdCtl(). 1941 */ 1942 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE: 1943 ASSERT_GUEST_LOGREL_RETURN(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva), VERR_INVALID_STATE); 1944 ASSERT_GUEST_LOGREL_RETURN(pVdma->CrSrvInfo.pfnGuestCtl, VERR_INVALID_STATE); 1945 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, 1946 (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)pCmd->u.cmd.pvCmd, 1947 pCmd->u.cmd.cbCmd); 1948 1949 /* 1950 * See handling of VBOXCMDVBVACTL_TYPE_RESIZE in vboxCmdVBVACmdCtl(). 1951 */ 1952 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE: 1953 { 1954 ASSERT_GUEST_RETURN(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva), VERR_INVALID_STATE); 1955 uint32_t cbCmd = pCmd->u.cmd.cbCmd; 1956 ASSERT_GUEST_LOGREL_MSG_RETURN( !(cbCmd % sizeof(VBOXCMDVBVA_RESIZE_ENTRY)) 1957 && cbCmd > 0, 1958 ("cbCmd=%#x\n", cbCmd), VERR_INVALID_PARAMETER); 1959 1960 uint32_t const cElements = cbCmd / sizeof(VBOXCMDVBVA_RESIZE_ENTRY); 1961 VBOXCMDVBVA_RESIZE RT_UNTRUSTED_VOLATILE_GUEST *pResize 1962 = (VBOXCMDVBVA_RESIZE RT_UNTRUSTED_VOLATILE_GUEST *)pCmd->u.cmd.pvCmd; 1963 for (uint32_t i = 0; i < cElements; ++i) 1964 { 1965 VBOXCMDVBVA_RESIZE_ENTRY RT_UNTRUSTED_VOLATILE_GUEST *pEntry = &pResize->aEntries[i]; 1966 int rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry); 1967 ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("vboxVDMACrGuestCtlResizeEntryProcess failed for #%u: %Rrc\n", i, rc), rc); 1968 } 1969 return VINF_SUCCESS; 1970 } 1971 1972 /* 1973 * See vdmaVBVACtlEnableSubmitInternal(). 1974 */ 1975 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE: 1976 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED: 1977 { 1978 ASSERT_GUEST(pCmd->u.cmd.cbCmd == sizeof(VBVAENABLE)); 1979 1980 VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable = (VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *)pCmd->u.cmd.pvCmd; 1981 uint32_t const u32Offset = pEnable->u32Offset; 1982 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 1983 1984 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset); 1985 ASSERT_GUEST_MSG_RC_RETURN(rc, ("vdmaVBVAEnableProcess -> %Rrc\n", rc), rc); 1986 1987 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED) 1988 { 1989 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva); 1990 ASSERT_GUEST_MSG_RC_RETURN(rc, ("VBoxVBVAExHPPause -> %Rrc\n", rc), rc); 1991 } 1992 return VINF_SUCCESS; 1993 } 1994 1995 /* 1996 * See vdmaVBVACtlDisableSubmitInternal(). 1997 */ 1998 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE: 1999 { 2000 int rc = vdmaVBVADisableProcess(pVdma, true /* fDoHgcmEnable */); 2001 ASSERT_GUEST_MSG_RC_RETURN(rc, ("vdmaVBVADisableProcess -> %Rrc\n", rc), rc); 2002 2003 /* do vgaUpdateDisplayAll right away */ 2004 VMR3ReqCallNoWait(PDMDevHlpGetVM(pVdma->pVGAState->pDevInsR3), VMCPUID_ANY, 2005 (PFNRT)vgaUpdateDisplayAll, 2, pVdma->pVGAState, /* fFailOnResize = */ false); 2006 2007 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false /* fNotify */); 2008 } 2009 2010 default: 2011 ASSERT_GUEST_LOGREL_MSG_FAILED(("unexpected ctl type %d\n", enmType)); 2012 return VERR_INVALID_PARAMETER; 2013 } 2014 } 2015 2016 2017 /** 2018 * Copies one page in a VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER command. 2019 * 2020 * @param fIn - whether this is a page in or out op. 2021 * @thread VDMA 2022 * 2023 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM 2024 */ 2025 static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX uPageNo, uint8_t *pbVram, bool fIn) 2026 { 2027 RTGCPHYS GCPhysPage = (RTGCPHYS)uPageNo << X86_PAGE_SHIFT; 2028 PGMPAGEMAPLOCK Lock; 2029 2030 if (fIn) 2031 { 2032 const void *pvPage; 2033 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysPage, 0, &pvPage, &Lock); 2034 ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("PDMDevHlpPhysGCPhys2CCPtrReadOnly %RGp -> %Rrc\n", GCPhysPage, rc), rc); 2035 2036 memcpy(pbVram, pvPage, PAGE_SIZE); 2037 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock); 2038 } 2039 else 2040 { 2041 void *pvPage; 2042 int rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, GCPhysPage, 0, &pvPage, &Lock); 2043 ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("PDMDevHlpPhysGCPhys2CCPtr %RGp -> %Rrc\n", GCPhysPage, rc), rc); 2044 2045 memcpy(pvPage, pbVram, PAGE_SIZE); 2046 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock); 2047 } 2048 2049 return VINF_SUCCESS; 2050 } 2051 2052 /** 2053 * Handles a VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER command. 2054 * 2055 * @return 0 on success, -1 on failure. 2056 * 2057 * @thread VDMA 2058 */ 2059 static int8_t vboxVDMACrCmdVbvaPageTransfer(PVGASTATE pVGAState, VBOXCMDVBVA_HDR const RT_UNTRUSTED_VOLATILE_GUEST *pHdr, 2060 uint32_t cbCmd, const VBOXCMDVBVA_PAGING_TRANSFER_DATA RT_UNTRUSTED_VOLATILE_GUEST *pData) 2061 { 2062 /* 2063 * Extract and validate information. 2064 */ 2065 ASSERT_GUEST_MSG_RETURN(cbCmd >= sizeof(VBOXCMDVBVA_PAGING_TRANSFER), ("%#x\n", cbCmd), -1); 2066 2067 bool const fIn = RT_BOOL(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN); 2068 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 2069 2070 uint32_t cbPageNumbers = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers); 2071 ASSERT_GUEST_MSG_RETURN(!(cbPageNumbers % sizeof(VBOXCMDVBVAPAGEIDX)), ("%#x\n", cbPageNumbers), -1); 2072 VBOXCMDVBVAPAGEIDX const cPages = cbPageNumbers / sizeof(VBOXCMDVBVAPAGEIDX); 2073 2074 VBOXCMDVBVAOFFSET offVRam = pData->Alloc.u.offVRAM; 2075 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 2076 ASSERT_GUEST_MSG_RETURN(!(offVRam & X86_PAGE_OFFSET_MASK), ("%#x\n", offVRam), -1); 2077 ASSERT_GUEST_MSG_RETURN(offVRam < pVGAState->vram_size, ("%#x vs %#x\n", offVRam, pVGAState->vram_size), -1); 2078 uint32_t cVRamPages = (pVGAState->vram_size - offVRam) >> X86_PAGE_SHIFT; 2079 ASSERT_GUEST_MSG_RETURN(cPages <= cVRamPages, ("cPages=%#x vs cVRamPages=%#x @ offVRam=%#x\n", cPages, cVRamPages, offVRam), -1); 2080 2081 RT_UNTRUSTED_VALIDATED_FENCE(); 2082 2083 /* 2084 * Execute the command. 2085 */ 2086 uint8_t *pbVRam = (uint8_t *)pVGAState->vram_ptrR3 + offVRam; 2087 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbVRam += X86_PAGE_SIZE) 2088 { 2089 uint32_t uPageNo = pData->aPageNumbers[iPage]; 2090 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 2091 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pVGAState->pDevInsR3, uPageNo, pbVRam, fIn); 2092 ASSERT_GUEST_MSG_RETURN(RT_SUCCESS(rc), ("#%#x: uPageNo=%#x rc=%Rrc\n", iPage, uPageNo, rc), -1); 2093 } 2094 return 0; 2095 } 2096 2097 2098 /** 2099 * Handles VBOXCMDVBVA_OPTYPE_PAGING_FILL. 2100 * 2101 * @returns 0 on success, -1 on failure. 2102 * @param pVGAState The VGA state. 2103 * @param pFill The fill command (volatile). 2104 * 2105 * @thread VDMA 2106 */ 2107 static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL RT_UNTRUSTED_VOLATILE_GUEST *pFill) 2108 { 2109 /* 2110 * Copy and validate input. 2111 */ 2112 VBOXCMDVBVA_PAGING_FILL FillSafe; 2113 RT_COPY_VOLATILE(FillSafe, *pFill); 2114 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 2115 2116 VBOXCMDVBVAOFFSET offVRAM = FillSafe.offVRAM; 2117 ASSERT_GUEST_MSG_RETURN(!(offVRAM & X86_PAGE_OFFSET_MASK), ("offVRAM=%#x\n", offVRAM), -1); 2118 ASSERT_GUEST_MSG_RETURN(offVRAM <= pVGAState->vram_size, ("offVRAM=%#x\n", offVRAM), -1); 2119 2120 uint32_t cbFill = FillSafe.u32CbFill; 2121 ASSERT_GUEST_STMT(!(cbFill & 3), cbFill &= ~(uint32_t)3); 2122 ASSERT_GUEST_MSG_RETURN( cbFill < pVGAState->vram_size 2123 && offVRAM <= pVGAState->vram_size - cbFill, 2124 ("offVRAM=%#x cbFill=%#x\n", offVRAM, cbFill), -1); 2125 2126 RT_UNTRUSTED_VALIDATED_FENCE(); 2127 2128 /* 2129 * Execute. 2130 */ 2131 uint32_t *pu32Vram = (uint32_t *)((uint8_t *)pVGAState->vram_ptrR3 + offVRAM); 2132 uint32_t const u32Color = FillSafe.u32Pattern; 2133 2134 uint32_t cLoops = cbFill / 4; 2135 while (cLoops-- > 0) 2136 pu32Vram[cLoops] = u32Color; 2137 2138 return 0; 2139 } 2140 2141 /** 2142 * Process command data. 2143 * 2144 * @returns zero or positive is success, negative failure. 2145 * @param pVdma The VDMA channel. 2146 * @param pCmd The command data to process. Assume volatile. 2147 * @param cbCmd The amount of command data. 2148 * 2149 * @thread VDMA 2150 */ 2151 static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, 2152 const VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *pCmd, uint32_t cbCmd) 2153 { 2154 uint8_t bOpCode = pCmd->u8OpCode; 2155 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 2156 switch (bOpCode) 2157 { 2158 case VBOXCMDVBVA_OPTYPE_NOPCMD: 2159 return 0; 2160 2161 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER: 2162 return vboxVDMACrCmdVbvaPageTransfer(pVdma->pVGAState, pCmd, cbCmd, 2163 &((VBOXCMDVBVA_PAGING_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *)pCmd)->Data); 2164 2165 case VBOXCMDVBVA_OPTYPE_PAGING_FILL: 2166 ASSERT_GUEST_RETURN(cbCmd == sizeof(VBOXCMDVBVA_PAGING_FILL), -1); 2167 return vboxVDMACrCmdVbvaPagingFill(pVdma->pVGAState, (VBOXCMDVBVA_PAGING_FILL RT_UNTRUSTED_VOLATILE_GUEST *)pCmd); 2168 2169 default: 2170 ASSERT_GUEST_RETURN(pVdma->CrSrvInfo.pfnCmd != NULL, -1); 2171 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd); 2172 } 2173 } 2174 2175 # if 0 2176 typedef struct VBOXCMDVBVA_PAGING_TRANSFER 2177 { 2178 VBOXCMDVBVA_HDR Hdr; 2179 /* for now can only contain offVRAM. 2180 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */ 2181 VBOXCMDVBVA_ALLOCINFO Alloc; 2182 uint32_t u32Reserved; 2183 VBOXCMDVBVA_SYSMEMEL aSysMem[1]; 2184 } VBOXCMDVBVA_PAGING_TRANSFER; 2185 # endif 2186 2187 AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8); 2188 AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4); 2189 AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4); 2190 AssertCompile(!(X86_PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX))); 2191 2192 # define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (X86_PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL)) 2193 2194 /** 2195 * Worker for vboxVDMACrCmdProcess. 2196 * 2197 * @returns 8-bit result. 2198 * @param pVdma The VDMA channel. 2199 * @param pCmd The command. Consider volatile! 2200 * @param cbCmd The size of what @a pCmd points to. At least 2201 * sizeof(VBOXCMDVBVA_HDR). 2202 * @param fRecursion Set if recursive call, false if not. 2203 * 2204 * @thread VDMA 2205 */ 2206 static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *pCmd, 2207 uint32_t cbCmd, bool fRecursion) 2208 { 2209 int8_t i8Result = 0; 2210 uint8_t const bOpCode = pCmd->u8OpCode; 2211 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 2212 LogRelFlow(("VDMA: vboxVDMACrCmdVbvaProcess: ENTER, bOpCode=%u\n", bOpCode)); 2213 switch (bOpCode) 2214 { 2215 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD: 2216 { 2217 /* 2218 * Extract the command physical address and size. 2219 */ 2220 ASSERT_GUEST_MSG_RETURN(cbCmd >= sizeof(VBOXCMDVBVA_SYSMEMCMD), ("%#x\n", cbCmd), -1); 2221 RTGCPHYS GCPhysCmd = ((VBOXCMDVBVA_SYSMEMCMD RT_UNTRUSTED_VOLATILE_GUEST *)pCmd)->phCmd; 2222 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 2223 uint32_t cbCmdPart = X86_PAGE_SIZE - (uint32_t)(GCPhysCmd & X86_PAGE_OFFSET_MASK); 2224 2225 uint32_t cbRealCmd = pCmd->u8Flags; 2226 cbRealCmd |= (uint32_t)pCmd->u.u8PrimaryID << 8; 2227 ASSERT_GUEST_MSG_RETURN(cbRealCmd >= sizeof(VBOXCMDVBVA_HDR), ("%#x\n", cbRealCmd), -1); 2228 ASSERT_GUEST_MSG_RETURN(cbRealCmd <= _1M, ("%#x\n", cbRealCmd), -1); 2229 2230 /* 2231 * Lock down the first page of the memory specified by the command. 2232 */ 2233 PGMPAGEMAPLOCK Lock; 2234 PVGASTATE pVGAState = pVdma->pVGAState; 2235 PPDMDEVINS pDevIns = pVGAState->pDevInsR3; 2236 VBOXCMDVBVA_HDR const *pRealCmdHdr = NULL; 2237 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysCmd, 0, (const void **)&pRealCmdHdr, &Lock); 2238 ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("VDMA: %RGp -> %Rrc\n", GCPhysCmd, rc), -1); 2239 Assert((GCPhysCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pRealCmdHdr) & PAGE_OFFSET_MASK)); 2240 2241 /* 2242 * All fits within one page? We can handle that pretty efficiently. 2243 */ 2244 if (cbRealCmd <= cbCmdPart) 2245 { 2246 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd); 2247 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock); 2248 } 2249 else 2250 { 2251 /* 2252 * To keep things damn simple, just double buffer cross page or 2253 * multipage requests. 2254 */ 2255 uint8_t *pbCmdBuf = (uint8_t *)RTMemTmpAllocZ(RT_ALIGN_Z(cbRealCmd, 16)); 2256 if (pbCmdBuf) 2257 { 2258 memcpy(pbCmdBuf, pRealCmdHdr, cbCmdPart); 2259 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock); 2260 pRealCmdHdr = NULL; 2261 2262 rc = PDMDevHlpPhysRead(pDevIns, GCPhysCmd + cbCmdPart, &pbCmdBuf[cbCmdPart], cbRealCmd - cbCmdPart); 2263 if (RT_SUCCESS(rc)) 2264 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, (VBOXCMDVBVA_HDR const *)pbCmdBuf, cbRealCmd); 2265 else 2266 LogRelMax(200, ("VDMA: Error reading %#x bytes of guest memory %#RGp!\n", cbRealCmd, GCPhysCmd)); 2267 RTMemTmpFree(pbCmdBuf); 2268 } 2269 else 2270 { 2271 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock); 2272 LogRelMax(200, ("VDMA: Out of temporary memory! %#x\n", cbRealCmd)); 2273 i8Result = -1; 2274 } 2275 } 2276 return i8Result; 2277 } 2278 2279 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD: 2280 { 2281 Assert(cbCmd >= sizeof(VBOXCMDVBVA_HDR)); /* caller already checked this */ 2282 ASSERT_GUEST_RETURN(!fRecursion, -1); 2283 2284 /* Skip current command. */ 2285 cbCmd -= sizeof(*pCmd); 2286 pCmd++; 2287 2288 /* Process subcommands. */ 2289 while (cbCmd > 0) 2290 { 2291 ASSERT_GUEST_MSG_RETURN(cbCmd >= sizeof(VBOXCMDVBVA_HDR), ("%#x\n", cbCmd), -1); 2292 2293 uint16_t cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost; 2294 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 2295 ASSERT_GUEST_MSG_RETURN(cbCurCmd <= cbCmd, ("cbCurCmd=%#x, cbCmd=%#x\n", cbCurCmd, cbCmd), -1); 2296 2297 i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd, true /*fRecursive*/); 2298 ASSERT_GUEST_MSG_RETURN(i8Result >= 0, ("vboxVDMACrCmdVbvaProcess -> %d\n", i8Result), i8Result); 2299 2300 /* Advance to the next command. */ 2301 pCmd = (VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *)((uintptr_t)pCmd + cbCurCmd); 2302 cbCmd -= cbCurCmd; 2303 } 2304 return 0; 2305 } 2306 2307 default: 2308 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd); 2309 LogRelFlow(("VDMA: vboxVDMACrCmdVbvaProcess: LEAVE, opCode(%i)\n", pCmd->u8OpCode)); 2310 return i8Result; 2311 } 2312 } 2313 2314 /** 2315 * Worker for vboxVDMAWorkerThread handling VBVAEXHOST_DATA_TYPE_CMD. 2316 * 2317 * @thread VDMA 2318 */ 2319 static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmd, uint32_t cbCmd) 2320 { 2321 if ( cbCmd > 0 2322 && *pbCmd == VBOXCMDVBVA_OPTYPE_NOP) 2323 { /* nop */ } 2324 else 2325 { 2326 ASSERT_GUEST_RETURN_VOID(cbCmd >= sizeof(VBOXCMDVBVA_HDR)); 2327 VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *pCmd = (VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *)pbCmd; 2328 2329 /* check if the command is cancelled */ 2330 if (ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED)) 2331 { 2332 /* Process it. */ 2333 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd, false /*fRecursion*/); 2334 } 2335 else 2336 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED); 2337 } 2338 2339 } 2340 2341 /** 2342 * Worker for vboxVDMAConstruct(). 2343 */ 2344 static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma) 2345 { 2346 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd; 2347 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof(*pCmd)); 2348 int rc; 2349 if (pCmd) 2350 { 2351 PVGASTATE pVGAState = pVdma->pVGAState; 2352 pCmd->pvVRamBase = pVGAState->vram_ptrR3; 2353 pCmd->cbVRam = pVGAState->vram_size; 2354 pCmd->pLed = &pVGAState->Led3D; 2355 pCmd->CrClientInfo.hClient = pVdma; 2356 pCmd->CrClientInfo.pfnCallout = vboxCmdVBVACmdCallout; 2357 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd)); 2358 if (RT_SUCCESS(rc)) 2359 { 2360 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr); 2361 if (RT_SUCCESS(rc)) 2362 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo; 2363 else if (rc != VERR_NOT_SUPPORTED) 2364 WARN(("vboxVDMACrCtlGetRc returned %Rrc\n", rc)); 2365 } 2366 else 2367 WARN(("vboxVDMACrCtlPost failed %Rrc\n", rc)); 2368 2369 vboxVDMACrCtlRelease(&pCmd->Hdr); 2370 } 2371 else 2372 rc = VERR_NO_MEMORY; 2373 2374 if (!RT_SUCCESS(rc)) 2375 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo)); 2376 2377 return rc; 2378 } 2379 2380 /** 2381 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrHgsmiControlCompleteAsync, 2382 * Some indirect completion magic, you gotta love this code! } 2383 */ 2384 DECLCALLBACK(int) vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc) 2385 { 2386 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface); 2387 PHGSMIINSTANCE pIns = pVGAState->pHGSMI; 2388 VBOXVDMACMD RT_UNTRUSTED_VOLATILE_GUEST *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd); 2389 VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr); 2390 2391 AssertRC(rc); 2392 pDr->rc = rc; 2393 2394 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD); 2395 rc = VBoxSHGSMICommandComplete(pIns, pDr); 2396 AssertRC(rc); 2397 2398 return rc; 2399 } 2400 2401 /** 2402 * Worker for vboxVDMACmdExecBlt(). 2403 */ 2404 static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma, const VBOXVIDEOOFFSET offDst, const VBOXVIDEOOFFSET offSrc, 2405 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc, 2406 const VBOXVDMA_RECTL *pDstRectl, const VBOXVDMA_RECTL *pSrcRectl) 2407 { 2408 /* 2409 * We do not support color conversion. 2410 */ 2411 AssertReturn(pDstDesc->format == pSrcDesc->format, VERR_INVALID_FUNCTION); 2412 2413 /* we do not support stretching (checked by caller) */ 2414 Assert(pDstRectl->height == pSrcRectl->height); 2415 Assert(pDstRectl->width == pSrcRectl->width); 2416 2417 uint8_t *pbRam = pVdma->pVGAState->vram_ptrR3; 2418 AssertCompileSize(pVdma->pVGAState->vram_size, sizeof(uint32_t)); 2419 uint32_t cbVRamSize = pVdma->pVGAState->vram_size; 2420 uint8_t *pbDstSurf = pbRam + offDst; 2421 uint8_t *pbSrcSurf = pbRam + offSrc; 2422 2423 if ( pDstDesc->width == pDstRectl->width 2424 && pSrcDesc->width == pSrcRectl->width 2425 && pSrcDesc->width == pDstDesc->width 2426 && pSrcDesc->pitch == pDstDesc->pitch) 2427 { 2428 Assert(!pDstRectl->left); 2429 Assert(!pSrcRectl->left); 2430 uint32_t offBoth = pDstDesc->pitch * pDstRectl->top; 2431 uint32_t cbToCopy = pDstDesc->pitch * pDstRectl->height; 2432 2433 if ( cbToCopy <= cbVRamSize 2434 && (uintptr_t)(pbDstSurf + offBoth) - (uintptr_t)pbRam <= cbVRamSize - cbToCopy 2435 && (uintptr_t)(pbSrcSurf + offBoth) - (uintptr_t)pbRam <= cbVRamSize - cbToCopy) 2436 { 2437 RT_UNTRUSTED_VALIDATED_FENCE(); 2438 memcpy(pbDstSurf + offBoth, pbSrcSurf + offBoth, cbToCopy); 2439 } 2440 else 2441 return VERR_INVALID_PARAMETER; 2442 } 2443 else 2444 { 2445 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3; 2446 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3); 2447 uint32_t cbDstLine = offDstLineEnd - offDstLineStart; 2448 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart; 2449 Assert(cbDstLine <= pDstDesc->pitch); 2450 uint32_t cbDstSkip = pDstDesc->pitch; 2451 uint8_t *pbDstStart = pbDstSurf + offDstStart; 2452 2453 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3; 2454 # ifdef VBOX_STRICT 2455 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3); 2456 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart; 2457 # endif 2458 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart; 2459 Assert(cbSrcLine <= pSrcDesc->pitch); 2460 uint32_t cbSrcSkip = pSrcDesc->pitch; 2461 const uint8_t *pbSrcStart = pbSrcSurf + offSrcStart; 2462 2463 Assert(cbDstLine == cbSrcLine); 2464 2465 for (uint32_t i = 0; ; ++i) 2466 { 2467 if ( cbDstLine <= cbVRamSize 2468 && (uintptr_t)pbDstStart - (uintptr_t)pbRam <= cbVRamSize - cbDstLine 2469 && (uintptr_t)pbSrcStart - (uintptr_t)pbRam <= cbVRamSize - cbDstLine) 2470 { 2471 RT_UNTRUSTED_VALIDATED_FENCE(); /** @todo this could potentially be buzzkiller. */ 2472 memcpy(pbDstStart, pbSrcStart, cbDstLine); 2473 } 2474 else 2475 return VERR_INVALID_PARAMETER; 2476 if (i == pDstRectl->height) 2477 break; 2478 pbDstStart += cbDstSkip; 2479 pbSrcStart += cbSrcSkip; 2480 } 2481 } 2482 return VINF_SUCCESS; 2483 } 2484 2485 #if 0 /* unused */ 2486 static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2) 2487 { 2488 if (!pRectl1->width) 2489 *pRectl1 = *pRectl2; 2490 else 2491 { 2492 int16_t x21 = pRectl1->left + pRectl1->width; 2493 int16_t x22 = pRectl2->left + pRectl2->width; 2494 if (pRectl1->left > pRectl2->left) 2495 { 2496 pRectl1->left = pRectl2->left; 2497 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left; 2498 } 2499 else if (x21 < x22) 2500 pRectl1->width = x22 - pRectl1->left; 2501 2502 x21 = pRectl1->top + pRectl1->height; 2503 x22 = pRectl2->top + pRectl2->height; 2504 if (pRectl1->top > pRectl2->top) 2505 { 2506 pRectl1->top = pRectl2->top; 2507 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top; 2508 } 2509 else if (x21 < x22) 2510 pRectl1->height = x22 - pRectl1->top; 2511 } 2512 } 2513 #endif /* unused */ 2514 2515 /** 2516 * Handles VBOXVDMACMD_TYPE_DMA_PRESENT_BLT for vboxVDMACmdExec(). 2517 * 2518 * @returns number of bytes (positive) of the full command on success, 2519 * otherwise a negative error status (VERR_XXX). 2520 * 2521 * @param pVdma The VDMA channel. 2522 * @param pBlt Blit command buffer. This is to be considered 2523 * volatile! 2524 * @param cbBuffer Number of bytes accessible at @a pBtl. 2525 */ 2526 static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const VBOXVDMACMD_DMA_PRESENT_BLT RT_UNTRUSTED_VOLATILE_GUEST *pBlt, 2527 uint32_t cbBuffer) 2528 { 2529 /* 2530 * Validate and make a local copy of the blt command up to the rectangle array. 2531 */ 2532 AssertReturn(cbBuffer >= RT_UOFFSETOF(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects), VERR_INVALID_PARAMETER); 2533 VBOXVDMACMD_DMA_PRESENT_BLT BltSafe; 2534 RT_BCOPY_VOLATILE(&BltSafe, (void const *)pBlt, RT_UOFFSETOF(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects)); 2535 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 2536 2537 AssertReturn(BltSafe.cDstSubRects < _8M, VERR_INVALID_PARAMETER); 2538 uint32_t const cbBlt = RT_UOFFSETOF_DYN(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[BltSafe.cDstSubRects]); 2539 AssertReturn(cbBuffer >= cbBlt, VERR_INVALID_PARAMETER); 2540 2541 /* 2542 * We do not support stretching. 2543 */ 2544 AssertReturn(BltSafe.srcRectl.width == BltSafe.dstRectl.width, VERR_INVALID_FUNCTION); 2545 AssertReturn(BltSafe.srcRectl.height == BltSafe.dstRectl.height, VERR_INVALID_FUNCTION); 2546 2547 Assert(BltSafe.cDstSubRects); 2548 2549 RT_UNTRUSTED_VALIDATED_FENCE(); 2550 2551 /* 2552 * Do the work. 2553 */ 2554 //VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0}; - pointless 2555 if (BltSafe.cDstSubRects) 2556 { 2557 for (uint32_t i = 0; i < BltSafe.cDstSubRects; ++i) 2558 { 2559 VBOXVDMA_RECTL dstSubRectl; 2560 dstSubRectl.left = pBlt->aDstSubRects[i].left; 2561 dstSubRectl.top = pBlt->aDstSubRects[i].top; 2562 dstSubRectl.width = pBlt->aDstSubRects[i].width; 2563 dstSubRectl.height = pBlt->aDstSubRects[i].height; 2564 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 2565 2566 VBOXVDMA_RECTL srcSubRectl = dstSubRectl; 2567 2568 dstSubRectl.left += BltSafe.dstRectl.left; 2569 dstSubRectl.top += BltSafe.dstRectl.top; 2570 2571 srcSubRectl.left += BltSafe.srcRectl.left; 2572 srcSubRectl.top += BltSafe.srcRectl.top; 2573 2574 int rc = vboxVDMACmdExecBltPerform(pVdma, BltSafe.offDst, BltSafe.offSrc, &BltSafe.dstDesc, &BltSafe.srcDesc, 2575 &dstSubRectl, &srcSubRectl); 2576 AssertRCReturn(rc, rc); 2577 2578 //vboxVDMARectlUnite(&updateRectl, &dstSubRectl); - pointless 2579 } 2580 } 2581 else 2582 { 2583 int rc = vboxVDMACmdExecBltPerform(pVdma, BltSafe.offDst, BltSafe.offSrc, &BltSafe.dstDesc, &BltSafe.srcDesc, 2584 &BltSafe.dstRectl, &BltSafe.srcRectl); 2585 AssertRCReturn(rc, rc); 2586 2587 //vboxVDMARectlUnite(&updateRectl, &BltSafe.dstRectl); - pointless 2588 } 2589 2590 return cbBlt; 2591 } 2592 2593 2594 /** 2595 * Handles VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER for vboxVDMACmdCheckCrCmd() and 2596 * vboxVDMACmdExec(). 2597 * 2598 * @returns number of bytes (positive) of the full command on success, 2599 * otherwise a negative error status (VERR_XXX). 2600 * 2601 * @param pVdma The VDMA channel. 2602 * @param pTransfer Transfer command buffer. This is to be considered 2603 * volatile! 2604 * @param cbBuffer Number of bytes accessible at @a pTransfer. 2605 */ 2606 static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, VBOXVDMACMD_DMA_BPB_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *pTransfer, 2607 uint32_t cbBuffer) 2608 { 2609 /* 2610 * Make a copy of the command (it's volatile). 2611 */ 2612 AssertReturn(cbBuffer >= sizeof(*pTransfer), VERR_INVALID_PARAMETER); 2613 VBOXVDMACMD_DMA_BPB_TRANSFER TransferSafeCopy; 2614 RT_COPY_VOLATILE(TransferSafeCopy, *pTransfer); 2615 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 2616 2617 PVGASTATE pVGAState = pVdma->pVGAState; 2618 PPDMDEVINS pDevIns = pVGAState->pDevInsR3; 2619 uint8_t *pbRam = pVGAState->vram_ptrR3; 2620 uint32_t cbTransfer = TransferSafeCopy.cbTransferSize; 2621 2622 /* 2623 * Validate VRAM offset. 2624 */ 2625 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET) 2626 AssertReturn( cbTransfer <= pVGAState->vram_size 2627 && TransferSafeCopy.Src.offVramBuf <= pVGAState->vram_size - cbTransfer, 2628 VERR_INVALID_PARAMETER); 2629 2630 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET) 2631 AssertReturn( cbTransfer <= pVGAState->vram_size 2632 && TransferSafeCopy.Dst.offVramBuf <= pVGAState->vram_size - cbTransfer, 2633 VERR_INVALID_PARAMETER); 2634 RT_UNTRUSTED_VALIDATED_FENCE(); 2635 2636 /* 2637 * Transfer loop. 2638 */ 2639 uint32_t cbTransfered = 0; 2640 int rc = VINF_SUCCESS; 2641 do 2642 { 2643 uint32_t cbSubTransfer = cbTransfer; 2644 2645 const void *pvSrc; 2646 bool fSrcLocked = false; 2647 PGMPAGEMAPLOCK SrcLock; 2648 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET) 2649 pvSrc = pbRam + TransferSafeCopy.Src.offVramBuf + cbTransfered; 2650 else 2651 { 2652 RTGCPHYS GCPhysSrcPage = TransferSafeCopy.Src.phBuf + cbTransfered; 2653 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysSrcPage, 0, &pvSrc, &SrcLock); 2654 AssertRC(rc); 2655 if (RT_SUCCESS(rc)) 2656 { 2657 fSrcLocked = true; 2658 cbSubTransfer = RT_MIN(cbSubTransfer, X86_PAGE_SIZE - (uint32_t)(GCPhysSrcPage & X86_PAGE_OFFSET_MASK)); 2659 } 2660 else 2661 break; 2662 } 2663 2664 void *pvDst; 2665 PGMPAGEMAPLOCK DstLock; 2666 bool fDstLocked = false; 2667 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET) 2668 pvDst = pbRam + TransferSafeCopy.Dst.offVramBuf + cbTransfered; 2669 else 2670 { 2671 RTGCPHYS GCPhysDstPage = TransferSafeCopy.Dst.phBuf + cbTransfered; 2672 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, GCPhysDstPage, 0, &pvDst, &DstLock); 2673 AssertRC(rc); 2674 if (RT_SUCCESS(rc)) 2675 { 2676 fDstLocked = true; 2677 cbSubTransfer = RT_MIN(cbSubTransfer, X86_PAGE_SIZE - (uint32_t)(GCPhysDstPage & X86_PAGE_OFFSET_MASK)); 2678 } 2679 } 2680 2681 if (RT_SUCCESS(rc)) 2682 { 2683 memcpy(pvDst, pvSrc, cbSubTransfer); 2684 cbTransfered += cbSubTransfer; 2685 cbTransfer -= cbSubTransfer; 2686 } 2687 else 2688 cbTransfer = 0; /* force break below */ 2689 2690 if (fSrcLocked) 2691 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock); 2692 if (fDstLocked) 2693 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock); 2694 } while (cbTransfer); 2695 2696 if (RT_SUCCESS(rc)) 2697 return sizeof(TransferSafeCopy); 2698 return rc; 2699 } 2700 2701 /** 2702 * Worker for vboxVDMACommandProcess(). 2703 * 2704 * @param pVdma Tthe VDMA channel. 2705 * @param pbBuffer Command buffer, considered volatile. 2706 * @param cbBuffer The number of bytes at @a pbBuffer. 2707 * @param pCmdDr The command. For setting the async flag on chromium 2708 * requests. 2709 * @param pfAsyncCmd Flag to set if async command completion on chromium 2710 * requests. Input stat is false, so it only ever need to 2711 * be set to true. 2712 */ 2713 static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, uint8_t const RT_UNTRUSTED_VOLATILE_GUEST *pbBuffer, uint32_t cbBuffer, 2714 VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pCmdDr, bool *pfAsyncCmd) 2715 { 2716 AssertReturn(pbBuffer, VERR_INVALID_POINTER); 2717 2718 for (;;) 2719 { 2720 AssertReturn(cbBuffer >= VBOXVDMACMD_HEADER_SIZE(), VERR_INVALID_PARAMETER); 2721 2722 VBOXVDMACMD const RT_UNTRUSTED_VOLATILE_GUEST *pCmd = (VBOXVDMACMD const RT_UNTRUSTED_VOLATILE_GUEST *)pbBuffer; 2723 VBOXVDMACMD_TYPE enmCmdType = pCmd->enmType; 2724 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 2725 2726 ASSERT_GUEST_MSG_RETURN( enmCmdType == VBOXVDMACMD_TYPE_CHROMIUM_CMD 2727 || enmCmdType == VBOXVDMACMD_TYPE_DMA_PRESENT_BLT 2728 || enmCmdType == VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER 2729 || enmCmdType == VBOXVDMACMD_TYPE_DMA_NOP 2730 || enmCmdType == VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ, 2731 ("enmCmdType=%d\n", enmCmdType), 2732 VERR_INVALID_FUNCTION); 2733 RT_UNTRUSTED_VALIDATED_FENCE(); 2734 2735 int cbProcessed; 2736 switch (enmCmdType) 2737 { 2738 case VBOXVDMACMD_TYPE_CHROMIUM_CMD: 2739 { 2740 VBOXVDMACMD_CHROMIUM_CMD RT_UNTRUSTED_VOLATILE_GUEST *pCrCmd = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_CHROMIUM_CMD); 2741 uint32_t const cbBody = VBOXVDMACMD_BODY_SIZE(cbBuffer); 2742 AssertReturn(cbBody >= sizeof(*pCrCmd), VERR_INVALID_PARAMETER); 2743 2744 PVGASTATE pVGAState = pVdma->pVGAState; 2745 AssertReturn(pVGAState->pDrv->pfnCrHgsmiCommandProcess, VERR_NOT_SUPPORTED); 2746 2747 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr); 2748 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody); 2749 *pfAsyncCmd = true; 2750 return VINF_SUCCESS; 2751 } 2752 2753 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT: 2754 { 2755 VBOXVDMACMD_DMA_PRESENT_BLT RT_UNTRUSTED_VOLATILE_GUEST *pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT); 2756 cbProcessed = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer - VBOXVDMACMD_HEADER_SIZE()); 2757 Assert(cbProcessed >= 0); 2758 break; 2759 } 2760 2761 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER: 2762 { 2763 VBOXVDMACMD_DMA_BPB_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *pTransfer 2764 = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER); 2765 cbProcessed = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer - VBOXVDMACMD_HEADER_SIZE()); 2766 Assert(cbProcessed >= 0); 2767 break; 2768 } 2769 2770 case VBOXVDMACMD_TYPE_DMA_NOP: 2771 return VINF_SUCCESS; 2772 2773 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ: 2774 return VINF_SUCCESS; 2775 2776 default: 2777 AssertFailedReturn(VERR_INVALID_FUNCTION); 2778 } 2779 2780 /* Advance buffer or return. */ 2781 if (cbProcessed >= 0) 2782 { 2783 Assert(cbProcessed > 0); 2784 cbProcessed += VBOXVDMACMD_HEADER_SIZE(); 2785 if ((uint32_t)cbProcessed >= cbBuffer) 2786 { 2787 Assert((uint32_t)cbProcessed == cbBuffer); 2788 return VINF_SUCCESS; 2789 } 2790 2791 cbBuffer -= cbProcessed; 2792 pbBuffer += cbProcessed; 2793 } 2794 else 2795 { 2796 RT_UNTRUSTED_VALIDATED_FENCE(); 2797 return cbProcessed; /* error status */ 2798 } 2799 } 2800 } 2801 2802 /** 2803 * VDMA worker thread procedure, see vdmaVBVACtlEnableSubmitInternal(). 2804 * 2805 * @thread VDMA 2806 */ 2807 static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD hThreadSelf, void *pvUser) 2808 { 2809 RT_NOREF(hThreadSelf); 2810 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser; 2811 PVGASTATE pVGAState = pVdma->pVGAState; 2812 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva; 2813 int rc; 2814 2815 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser); 2816 2817 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread)) 2818 { 2819 uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmd = NULL; 2820 uint32_t cbCmd = 0; 2821 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pbCmd, &cbCmd); 2822 switch (enmType) 2823 { 2824 case VBVAEXHOST_DATA_TYPE_CMD: 2825 vboxVDMACrCmdProcess(pVdma, pbCmd, cbCmd); 2826 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd); 2827 VBVARaiseIrq(pVGAState, 0); 2828 break; 2829 2830 case VBVAEXHOST_DATA_TYPE_GUESTCTL: 2831 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL *)pbCmd); 2832 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL *)pbCmd, rc); 2833 break; 2834 2835 case VBVAEXHOST_DATA_TYPE_HOSTCTL: 2836 { 2837 bool fContinue = true; 2838 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL *)pbCmd, &fContinue); 2839 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL *)pbCmd, rc); 2840 if (fContinue) 2841 break; 2842 } 2843 RT_FALL_THRU(); 2844 2845 case VBVAEXHOST_DATA_TYPE_NO_DATA: 2846 rc = RTSemEventWaitNoResume(pVdma->Thread.hEvent, RT_INDEFINITE_WAIT); 2847 AssertMsg(RT_SUCCESS(rc) || rc == VERR_INTERRUPTED, ("%Rrc\n", rc)); 2848 break; 2849 2850 default: 2851 WARN(("unexpected type %d\n", enmType)); 2852 break; 2853 } 2854 } 2855 2856 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser); 2857 2858 return VINF_SUCCESS; 2859 } 2860 2861 /** 2862 * Worker for vboxVDMACommand. 2863 * 2864 * @returns VBox status code of the operation. 2865 * @param pVdma VDMA instance data. 2866 * @param pCmd The command to process. Consider content volatile. 2867 * @param cbCmd Number of valid bytes at @a pCmd. This is at least 2868 * sizeof(VBOXVDMACBUF_DR). 2869 * @param pfAsyncCmd Flag to set if async command completion on chromium 2870 * requests. Input stat is false, so it only ever need to 2871 * be set to true. 2872 * @thread EMT 2873 */ 2874 static int vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pCmd, 2875 uint32_t cbCmd, bool *pfAsyncCmd) 2876 { 2877 /* 2878 * Get the command buffer (volatile). 2879 */ 2880 uint16_t const cbCmdBuf = pCmd->cbBuf; 2881 uint16_t const fCmdFlags = pCmd->fFlags; 2882 uint64_t const offVramBuf_or_GCPhysBuf = pCmd->Location.offVramBuf; 2883 AssertCompile(sizeof(pCmd->Location.offVramBuf) == sizeof(pCmd->Location.phBuf)); 2884 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 2885 2886 const uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmdBuf; 2887 PGMPAGEMAPLOCK Lock; 2888 bool fReleaseLocked = false; 2889 if (fCmdFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR) 2890 { 2891 pbCmdBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t); 2892 AssertReturn((uintptr_t)&pbCmdBuf[cbCmdBuf] <= (uintptr_t)&((uint8_t *)pCmd)[cbCmd], 2893 VERR_INVALID_PARAMETER); 2894 RT_UNTRUSTED_VALIDATED_FENCE(); 2895 } 2896 else if (fCmdFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET) 2897 { 2898 AssertReturn( offVramBuf_or_GCPhysBuf <= pVdma->pVGAState->vram_size 2899 && offVramBuf_or_GCPhysBuf + cbCmdBuf <= pVdma->pVGAState->vram_size, 2900 VERR_INVALID_PARAMETER); 2901 RT_UNTRUSTED_VALIDATED_FENCE(); 2902 2903 pbCmdBuf = (uint8_t const RT_UNTRUSTED_VOLATILE_GUEST *)pVdma->pVGAState->vram_ptrR3 + offVramBuf_or_GCPhysBuf; 2904 } 2905 else 2906 { 2907 /* Make sure it doesn't cross a page. */ 2908 AssertReturn((uint32_t)(offVramBuf_or_GCPhysBuf & X86_PAGE_OFFSET_MASK) + cbCmdBuf <= (uint32_t)X86_PAGE_SIZE, 2909 VERR_INVALID_PARAMETER); 2910 RT_UNTRUSTED_VALIDATED_FENCE(); 2911 2912 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pVdma->pVGAState->pDevInsR3, offVramBuf_or_GCPhysBuf, 0 /*fFlags*/, 2913 (const void **)&pbCmdBuf, &Lock); 2914 AssertRCReturn(rc, rc); /* if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */ 2915 fReleaseLocked = true; 2916 } 2917 2918 /* 2919 * Process the command. 2920 */ 2921 int rc = vboxVDMACmdExec(pVdma, pbCmdBuf, cbCmdBuf, pCmd, pfAsyncCmd); 2922 AssertRC(rc); 2923 2924 /* Clean up comand buffer. */ 2925 if (fReleaseLocked) 2926 PDMDevHlpPhysReleasePageMappingLock(pVdma->pVGAState->pDevInsR3, &Lock); 2927 return rc; 2928 } 2929 2930 # if 0 /** @todo vboxVDMAControlProcess is unused */ 2931 static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd) 2932 { 2933 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi; 2934 pCmd->i32Result = VINF_SUCCESS; 2935 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd); 2936 AssertRC(rc); 2937 } 2938 # endif 199 2939 200 2940 #ifdef VBOX_VDMA_WITH_WATCHDOG … … 251 2991 if (RT_SUCCESS(rc)) 252 2992 { 253 pVGAState->pVdma = pVdma; 254 return VINF_SUCCESS; 2993 VBoxVDMAThreadInit(&pVdma->Thread); 2994 2995 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent); 2996 if (RT_SUCCESS(rc)) 2997 { 2998 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva); 2999 if (RT_SUCCESS(rc)) 3000 { 3001 rc = RTCritSectInit(&pVdma->CalloutCritSect); 3002 if (RT_SUCCESS(rc)) 3003 { 3004 pVGAState->pVdma = pVdma; 3005 3006 /* No HGCM service if VMSVGA is enabled. */ 3007 if (!pVGAState->fVMSVGAEnabled) 3008 { 3009 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */ 3010 } 3011 return VINF_SUCCESS; 3012 } 3013 3014 WARN(("RTCritSectInit failed %Rrc\n", rc)); 3015 VBoxVBVAExHSTerm(&pVdma->CmdVbva); 3016 } 3017 else 3018 WARN(("VBoxVBVAExHSInit failed %Rrc\n", rc)); 3019 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent); 3020 } 3021 else 3022 WARN(("RTSemEventMultiCreate failed %Rrc\n", rc)); 3023 3024 /* the timer is cleaned up automatically */ 255 3025 } 256 3026 RTMemFree(pVdma); … … 266 3036 void vboxVDMAReset(struct VBOXVDMAHOST *pVdma) 267 3037 { 268 RT_NOREF(pVdma);3038 vdmaVBVACtlDisableSync(pVdma); 269 3039 } 270 3040 … … 276 3046 if (!pVdma) 277 3047 return; 3048 3049 if (pVdma->pVGAState->fVMSVGAEnabled) 3050 VBoxVBVAExHSDisable(&pVdma->CmdVbva); 3051 else 3052 { 3053 /** @todo Remove. It does nothing because pVdma->CmdVbva is already disabled at this point 3054 * as the result of the SharedOpenGL HGCM service unloading. 3055 */ 3056 vdmaVBVACtlDisableSync(pVdma); 3057 } 3058 VBoxVDMAThreadCleanup(&pVdma->Thread); 3059 VBoxVBVAExHSTerm(&pVdma->CmdVbva); 3060 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent); 3061 RTCritSectDelete(&pVdma->CalloutCritSect); 278 3062 RTMemFree(pVdma); 279 3063 } … … 347 3131 */ 348 3132 bool fAsyncCmd = false; 349 RT_NOREF(cbCmd); 350 int rc = VERR_NOT_IMPLEMENTED; 3133 int rc = vboxVDMACommandProcess(pVdma, pCmd, cbCmd, &fAsyncCmd); 351 3134 352 3135 /* … … 361 3144 } 362 3145 3146 3147 /** 3148 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE, 3149 * Used by vdmaVBVACtlEnableDisableSubmit() and vdmaVBVACtlEnableDisableSubmit() } 3150 */ 3151 static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, 3152 int rc, void *pvContext) 3153 { 3154 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext; 3155 VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pGCtl 3156 = (VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *)((uintptr_t)pCtl->u.cmd.pvCmd - sizeof(VBOXCMDVBVA_CTL)); 3157 AssertRC(rc); 3158 pGCtl->i32Result = rc; 3159 3160 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD); 3161 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl); 3162 AssertRC(rc); 3163 3164 VBoxVBVAExHCtlFree(pVbva, pCtl); 3165 } 3166 3167 /** 3168 * Worker for vdmaVBVACtlGenericGuestSubmit() and vdmaVBVACtlOpaqueHostSubmit(). 3169 */ 3170 static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType, 3171 uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmd, uint32_t cbCmd, 3172 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete) 3173 { 3174 int rc; 3175 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType); 3176 if (pHCtl) 3177 { 3178 pHCtl->u.cmd.pvCmd = pbCmd; 3179 pHCtl->u.cmd.cbCmd = cbCmd; 3180 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete); 3181 if (RT_SUCCESS(rc)) 3182 return VINF_SUCCESS; 3183 3184 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl); 3185 Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc)); 3186 } 3187 else 3188 { 3189 WARN(("VBoxVBVAExHCtlCreate failed\n")); 3190 rc = VERR_NO_MEMORY; 3191 } 3192 return rc; 3193 } 3194 3195 /** 3196 * Handler for vboxCmdVBVACmdCtl()/VBOXCMDVBVACTL_TYPE_3DCTL. 3197 */ 3198 static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType, 3199 VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pCtl, uint32_t cbCtl) 3200 { 3201 Assert(cbCtl >= sizeof(VBOXCMDVBVA_CTL)); /* Checked by callers caller, vbvaChannelHandler(). */ 3202 3203 VBoxSHGSMICommandMarkAsynchCompletion(pCtl); 3204 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType, 3205 (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)(pCtl + 1), 3206 cbCtl - sizeof(VBOXCMDVBVA_CTL), 3207 vboxCmdVBVACmdCtlGuestCompletion, pVdma); 3208 if (RT_SUCCESS(rc)) 3209 return VINF_SUCCESS; 3210 3211 WARN(("vdmaVBVACtlGenericSubmit failed %Rrc\n", rc)); 3212 pCtl->i32Result = rc; 3213 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl); 3214 AssertRC(rc); 3215 return VINF_SUCCESS; 3216 } 3217 3218 /** 3219 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE, Used by vdmaVBVACtlOpaqueHostSubmit()} 3220 */ 3221 static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, 3222 int rc, void *pvCompletion) 3223 { 3224 VBOXCRCMDCTL *pVboxCtl = (VBOXCRCMDCTL *)pCtl->u.cmd.pvCmd; 3225 if (pVboxCtl->u.pfnInternal) 3226 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion); 3227 VBoxVBVAExHCtlFree(pVbva, pCtl); 3228 } 3229 3230 /** 3231 * Worker for vboxCmdVBVACmdHostCtl() and vboxCmdVBVACmdHostCtlSync(). 3232 */ 3233 static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, 3234 PFNCRCTLCOMPLETION pfnCompletion, void *pvCompletion) 3235 { 3236 pCmd->u.pfnInternal = (PFNRT)pfnCompletion; 3237 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, 3238 (uint8_t *)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion); 3239 if (RT_FAILURE(rc)) 3240 { 3241 if (rc == VERR_INVALID_STATE) 3242 { 3243 pCmd->u.pfnInternal = NULL; 3244 PVGASTATE pVGAState = pVdma->pVGAState; 3245 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion); 3246 if (!RT_SUCCESS(rc)) 3247 WARN(("pfnCrHgsmiControlProcess failed %Rrc\n", rc)); 3248 3249 return rc; 3250 } 3251 WARN(("vdmaVBVACtlGenericSubmit failed %Rrc\n", rc)); 3252 return rc; 3253 } 3254 3255 return VINF_SUCCESS; 3256 } 3257 3258 /** 3259 * Called from vdmaVBVACtlThreadCreatedEnable(). 3260 */ 3261 static int vdmaVBVANotifyEnable(PVGASTATE pVGAState) 3262 { 3263 for (uint32_t i = 0; i < pVGAState->cMonitors; i++) 3264 { 3265 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true); 3266 if (!RT_SUCCESS(rc)) 3267 { 3268 WARN(("pfnVBVAEnable failed %Rrc\n", rc)); 3269 for (uint32_t j = 0; j < i; j++) 3270 { 3271 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j); 3272 } 3273 3274 return rc; 3275 } 3276 } 3277 return VINF_SUCCESS; 3278 } 3279 3280 /** 3281 * Called from vdmaVBVACtlThreadCreatedEnable() and vdmaVBVADisableProcess(). 3282 */ 3283 static int vdmaVBVANotifyDisable(PVGASTATE pVGAState) 3284 { 3285 for (uint32_t i = 0; i < pVGAState->cMonitors; i++) 3286 pVGAState->pDrv->pfnVBVADisable(pVGAState->pDrv, i); 3287 return VINF_SUCCESS; 3288 } 3289 3290 /** 3291 * Hook that is called by vboxVDMAWorkerThread when it starts. 3292 * 3293 * @thread VDMA 3294 */ 3295 static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc, 3296 void *pvThreadContext, void *pvContext) 3297 { 3298 RT_NOREF(pThread); 3299 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext; 3300 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext; 3301 3302 if (RT_SUCCESS(rc)) 3303 { 3304 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl); 3305 /* rc == VINF_SUCCESS would mean the actual state change has occcured */ 3306 if (rc == VINF_SUCCESS) 3307 { 3308 /* we need to inform Main about VBVA enable/disable 3309 * main expects notifications to be done from the main thread 3310 * submit it there */ 3311 PVGASTATE pVGAState = pVdma->pVGAState; 3312 3313 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva)) 3314 vdmaVBVANotifyEnable(pVGAState); 3315 else 3316 vdmaVBVANotifyDisable(pVGAState); 3317 } 3318 else if (RT_FAILURE(rc)) 3319 WARN(("vboxVDMACrGuestCtlProcess failed %Rrc\n", rc)); 3320 } 3321 else 3322 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %Rrc\n", rc)); 3323 3324 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc); 3325 } 3326 3327 /** 3328 * Worker for vdmaVBVACtlEnableDisableSubmitInternal() and vdmaVBVACtlEnableSubmitSync(). 3329 */ 3330 static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable, bool fPaused, 3331 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete) 3332 { 3333 int rc; 3334 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, 3335 fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE); 3336 if (pHCtl) 3337 { 3338 pHCtl->u.cmd.pvCmd = pEnable; 3339 pHCtl->u.cmd.cbCmd = sizeof(*pEnable); 3340 pHCtl->pfnComplete = pfnComplete; 3341 pHCtl->pvComplete = pvComplete; 3342 3343 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl); 3344 if (RT_SUCCESS(rc)) 3345 return VINF_SUCCESS; 3346 3347 WARN(("VBoxVDMAThreadCreate failed %Rrc\n", rc)); 3348 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl); 3349 } 3350 else 3351 { 3352 WARN(("VBoxVBVAExHCtlCreate failed\n")); 3353 rc = VERR_NO_MEMORY; 3354 } 3355 3356 return rc; 3357 } 3358 3359 /** 3360 * Worker for vboxVDMASaveLoadExecPerform(). 3361 */ 3362 static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused) 3363 { 3364 VBVAENABLE Enable = {0}; 3365 Enable.u32Flags = VBVA_F_ENABLE; 3366 Enable.u32Offset = offVram; 3367 3368 VDMA_VBVA_CTL_CYNC_COMPLETION Data; 3369 Data.rc = VERR_NOT_IMPLEMENTED; 3370 int rc = RTSemEventCreate(&Data.hEvent); 3371 if (!RT_SUCCESS(rc)) 3372 { 3373 WARN(("RTSemEventCreate failed %Rrc\n", rc)); 3374 return rc; 3375 } 3376 3377 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data); 3378 if (RT_SUCCESS(rc)) 3379 { 3380 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT); 3381 if (RT_SUCCESS(rc)) 3382 { 3383 rc = Data.rc; 3384 if (!RT_SUCCESS(rc)) 3385 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %Rrc\n", rc)); 3386 } 3387 else 3388 WARN(("RTSemEventWait failed %Rrc\n", rc)); 3389 } 3390 else 3391 WARN(("vdmaVBVACtlSubmit failed %Rrc\n", rc)); 3392 3393 RTSemEventDestroy(Data.hEvent); 3394 3395 return rc; 3396 } 3397 3398 /** 3399 * Worker for vdmaVBVACtlEnableDisableSubmitInternal(). 3400 */ 3401 static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable, 3402 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete) 3403 { 3404 int rc; 3405 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva)) 3406 { 3407 WARN(("VBoxVBVAExHSIsDisabled: disabled")); 3408 return VINF_SUCCESS; 3409 } 3410 3411 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE); 3412 if (!pHCtl) 3413 { 3414 WARN(("VBoxVBVAExHCtlCreate failed\n")); 3415 return VERR_NO_MEMORY; 3416 } 3417 3418 pHCtl->u.cmd.pvCmd = pEnable; 3419 pHCtl->u.cmd.cbCmd = sizeof(*pEnable); 3420 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete); 3421 if (RT_SUCCESS(rc)) 3422 return VINF_SUCCESS; 3423 3424 WARN(("vdmaVBVACtlSubmit failed rc %Rrc\n", rc)); 3425 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl); 3426 return rc; 3427 } 3428 3429 /** 3430 * Worker for vdmaVBVACtlEnableDisableSubmit(). 3431 */ 3432 static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable, 3433 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete) 3434 { 3435 bool fEnable = (pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE; 3436 if (fEnable) 3437 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete); 3438 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete); 3439 } 3440 3441 /** 3442 * Handler for vboxCmdVBVACmdCtl/VBOXCMDVBVACTL_TYPE_ENABLE. 3443 */ 3444 static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable) 3445 { 3446 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr); 3447 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma); 3448 if (RT_SUCCESS(rc)) 3449 return VINF_SUCCESS; 3450 3451 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %Rrc\n", rc)); 3452 pEnable->Hdr.i32Result = rc; 3453 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr); 3454 AssertRC(rc); 3455 return VINF_SUCCESS; 3456 } 3457 3458 /** 3459 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE, 3460 * Used by vdmaVBVACtlSubmitSync() and vdmaVBVACtlEnableSubmitSync().} 3461 */ 3462 static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, 3463 int rc, void *pvContext) 3464 { 3465 RT_NOREF(pVbva, pCtl); 3466 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION *)pvContext; 3467 pData->rc = rc; 3468 rc = RTSemEventSignal(pData->hEvent); 3469 if (!RT_SUCCESS(rc)) 3470 WARN(("RTSemEventSignal failed %Rrc\n", rc)); 3471 } 3472 3473 3474 /** 3475 * 3476 */ 3477 static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource) 3478 { 3479 VDMA_VBVA_CTL_CYNC_COMPLETION Data; 3480 Data.rc = VERR_NOT_IMPLEMENTED; 3481 Data.hEvent = NIL_RTSEMEVENT; 3482 int rc = RTSemEventCreate(&Data.hEvent); 3483 if (RT_SUCCESS(rc)) 3484 { 3485 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data); 3486 if (RT_SUCCESS(rc)) 3487 { 3488 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT); 3489 if (RT_SUCCESS(rc)) 3490 { 3491 rc = Data.rc; 3492 if (!RT_SUCCESS(rc)) 3493 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %Rrc\n", rc)); 3494 } 3495 else 3496 WARN(("RTSemEventWait failed %Rrc\n", rc)); 3497 } 3498 else 3499 Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc)); 3500 3501 RTSemEventDestroy(Data.hEvent); 3502 } 3503 else 3504 WARN(("RTSemEventCreate failed %Rrc\n", rc)); 3505 return rc; 3506 } 3507 3508 /** 3509 * Worker for vboxVDMASaveStateExecPrep(). 3510 */ 3511 static int vdmaVBVAPause(PVBOXVDMAHOST pVdma) 3512 { 3513 VBVAEXHOSTCTL Ctl; 3514 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE; 3515 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST); 3516 } 3517 3518 /** 3519 * Worker for vboxVDMASaveLoadExecPerform() and vboxVDMASaveStateExecDone(). 3520 */ 3521 static int vdmaVBVAResume(PVBOXVDMAHOST pVdma) 3522 { 3523 VBVAEXHOSTCTL Ctl; 3524 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME; 3525 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST); 3526 } 3527 3528 /** 3529 * Worker for vboxCmdVBVACmdSubmit(), vboxCmdVBVACmdFlush() and vboxCmdVBVATimerRefresh(). 3530 */ 3531 static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma) 3532 { 3533 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva); 3534 switch (rc) 3535 { 3536 case VINF_SUCCESS: 3537 return VBoxVDMAThreadEventNotify(&pVdma->Thread); 3538 case VINF_ALREADY_INITIALIZED: 3539 case VINF_EOF: 3540 case VERR_INVALID_STATE: 3541 return VINF_SUCCESS; 3542 default: 3543 Assert(!RT_FAILURE(rc)); 3544 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR; 3545 } 3546 } 3547 3548 3549 /** 3550 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrCtlSubmit} 3551 */ 3552 int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface, 3553 struct VBOXCRCMDCTL *pCmd, 3554 uint32_t cbCmd, 3555 PFNCRCTLCOMPLETION pfnCompletion, 3556 void *pvCompletion) 3557 { 3558 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface); 3559 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma; 3560 if (pVdma == NULL) 3561 return VERR_INVALID_STATE; 3562 pCmd->CalloutList.List.pNext = NULL; 3563 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion); 3564 } 3565 3566 /** 3567 * Argument package from vboxCmdVBVACmdHostCtlSync to vboxCmdVBVACmdHostCtlSyncCb. 3568 */ 3569 typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC 3570 { 3571 struct VBOXVDMAHOST *pVdma; 3572 uint32_t fProcessing; 3573 int rc; 3574 } VBOXCMDVBVA_CMDHOSTCTL_SYNC; 3575 3576 /** 3577 * @interface_method_impl{FNCRCTLCOMPLETION, Used by vboxCmdVBVACmdHostCtlSync.} 3578 */ 3579 static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd, int rc, void *pvCompletion) 3580 { 3581 RT_NOREF(pCmd, cbCmd); 3582 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC *)pvCompletion; 3583 3584 pData->rc = rc; 3585 3586 struct VBOXVDMAHOST *pVdma = pData->pVdma; 3587 3588 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted); 3589 3590 pData->fProcessing = 0; 3591 3592 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent); 3593 } 3594 3595 /** 3596 * @callback_method_impl{FNVBOXCRCLIENT_CALLOUT, Worker for vboxVDMACrCtlHgsmiSetup } 3597 * 3598 * @note r=bird: not to be confused with the callout function below. sigh. 3599 */ 3600 static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, 3601 VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb) 3602 { 3603 pEntry->pfnCb = pfnCb; 3604 int rc = RTCritSectEnter(&pVdma->CalloutCritSect); 3605 if (RT_SUCCESS(rc)) 3606 { 3607 RTListAppend(&pCmd->CalloutList.List, &pEntry->Node); 3608 RTCritSectLeave(&pVdma->CalloutCritSect); 3609 3610 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent); 3611 } 3612 else 3613 WARN(("RTCritSectEnter failed %Rrc\n", rc)); 3614 3615 return rc; 3616 } 3617 3618 3619 /** 3620 * Worker for vboxCmdVBVACmdHostCtlSync. 3621 */ 3622 static int vboxCmdVBVACmdCalloutProcess(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd) 3623 { 3624 int rc = VINF_SUCCESS; 3625 for (;;) 3626 { 3627 rc = RTCritSectEnter(&pVdma->CalloutCritSect); 3628 if (RT_SUCCESS(rc)) 3629 { 3630 VBOXCRCMDCTL_CALLOUT_LISTENTRY* pEntry = RTListGetFirst(&pCmd->CalloutList.List, VBOXCRCMDCTL_CALLOUT_LISTENTRY, Node); 3631 if (pEntry) 3632 RTListNodeRemove(&pEntry->Node); 3633 RTCritSectLeave(&pVdma->CalloutCritSect); 3634 3635 if (!pEntry) 3636 break; 3637 3638 pEntry->pfnCb(pEntry); 3639 } 3640 else 3641 { 3642 WARN(("RTCritSectEnter failed %Rrc\n", rc)); 3643 break; 3644 } 3645 } 3646 3647 return rc; 3648 } 3649 3650 /** 3651 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrCtlSubmitSync} 3652 */ 3653 DECLCALLBACK(int) vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface, struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd) 3654 { 3655 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface); 3656 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma; 3657 if (pVdma == NULL) 3658 return VERR_INVALID_STATE; 3659 3660 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data; 3661 Data.pVdma = pVdma; 3662 Data.fProcessing = 1; 3663 Data.rc = VERR_INTERNAL_ERROR; 3664 RTListInit(&pCmd->CalloutList.List); 3665 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data); 3666 if (!RT_SUCCESS(rc)) 3667 { 3668 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %Rrc", rc)); 3669 return rc; 3670 } 3671 3672 while (Data.fProcessing) 3673 { 3674 /* Poll infrequently to make sure no completed message has been missed. */ 3675 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500); 3676 3677 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd); 3678 3679 if (Data.fProcessing) 3680 RTThreadYield(); 3681 } 3682 3683 /* extra check callouts */ 3684 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd); 3685 3686 /* 'Our' message has been processed, so should reset the semaphore. 3687 * There is still possible that another message has been processed 3688 * and the semaphore has been signalled again. 3689 * Reset only if there are no other messages completed. 3690 */ 3691 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted); 3692 Assert(c >= 0); 3693 if (!c) 3694 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent); 3695 3696 rc = Data.rc; 3697 if (!RT_SUCCESS(rc)) 3698 WARN(("host call failed %Rrc", rc)); 3699 3700 return rc; 3701 } 3702 3703 /** 3704 * Handler for VBVA_CMDVBVA_CTL, see vbvaChannelHandler(). 3705 * 3706 * @returns VBox status code 3707 * @param pVGAState The VGA state. 3708 * @param pCtl The control command. 3709 * @param cbCtl The size of it. This is at least 3710 * sizeof(VBOXCMDVBVA_CTL). 3711 * @thread EMT 3712 */ 3713 int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pCtl, uint32_t cbCtl) 3714 { 3715 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma; 3716 uint32_t uType = pCtl->u32Type; 3717 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 3718 3719 if ( uType == VBOXCMDVBVACTL_TYPE_3DCTL 3720 || uType == VBOXCMDVBVACTL_TYPE_RESIZE 3721 || uType == VBOXCMDVBVACTL_TYPE_ENABLE) 3722 { 3723 RT_UNTRUSTED_VALIDATED_FENCE(); 3724 3725 switch (uType) 3726 { 3727 case VBOXCMDVBVACTL_TYPE_3DCTL: 3728 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl); 3729 3730 case VBOXCMDVBVACTL_TYPE_RESIZE: 3731 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl); 3732 3733 case VBOXCMDVBVACTL_TYPE_ENABLE: 3734 ASSERT_GUEST_BREAK(cbCtl == sizeof(VBOXCMDVBVA_CTL_ENABLE)); 3735 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE RT_UNTRUSTED_VOLATILE_GUEST *)pCtl); 3736 3737 default: 3738 AssertFailed(); 3739 } 3740 } 3741 3742 pCtl->i32Result = VERR_INVALID_PARAMETER; 3743 int rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl); 3744 AssertRC(rc); 3745 return VINF_SUCCESS; 3746 } 3747 3748 /** 3749 * Handler for VBVA_CMDVBVA_SUBMIT, see vbvaChannelHandler(). 3750 * 3751 * @thread EMT 3752 */ 3753 int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState) 3754 { 3755 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva)) 3756 { 3757 WARN(("vdma VBVA is disabled\n")); 3758 return VERR_INVALID_STATE; 3759 } 3760 3761 return vboxVDMACmdSubmitPerform(pVGAState->pVdma); 3762 } 3763 3764 /** 3765 * Handler for VBVA_CMDVBVA_FLUSH, see vbvaChannelHandler(). 3766 * 3767 * @thread EMT 3768 */ 3769 int vboxCmdVBVACmdFlush(PVGASTATE pVGAState) 3770 { 3771 WARN(("flush\n")); 3772 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva)) 3773 { 3774 WARN(("vdma VBVA is disabled\n")); 3775 return VERR_INVALID_STATE; 3776 } 3777 return vboxVDMACmdSubmitPerform(pVGAState->pVdma); 3778 } 3779 3780 /** 3781 * Called from vgaTimerRefresh(). 3782 */ 3783 void vboxCmdVBVATimerRefresh(PVGASTATE pVGAState) 3784 { 3785 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva)) 3786 return; 3787 vboxVDMACmdSubmitPerform(pVGAState->pVdma); 3788 } 3789 3790 bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState) 3791 { 3792 return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva); 3793 } 3794 3795 3796 363 3797 /* 364 3798 * … … 373 3807 int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma) 374 3808 { 375 RT_NOREF(pVdma); 376 return VINF_SUCCESS; 3809 int rc = vdmaVBVAPause(pVdma); 3810 if (RT_SUCCESS(rc)) 3811 return VINF_SUCCESS; 3812 3813 if (rc != VERR_INVALID_STATE) 3814 { 3815 WARN(("vdmaVBVAPause failed %Rrc\n", rc)); 3816 return rc; 3817 } 3818 3819 # ifdef DEBUG_misha 3820 WARN(("debug prep")); 3821 # endif 3822 3823 PVGASTATE pVGAState = pVdma->pVGAState; 3824 PVBOXVDMACMD_CHROMIUM_CTL pCmd; 3825 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof(*pCmd)); 3826 if (pCmd) 3827 { 3828 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd)); 3829 AssertRC(rc); 3830 if (RT_SUCCESS(rc)) 3831 rc = vboxVDMACrCtlGetRc(pCmd); 3832 vboxVDMACrCtlRelease(pCmd); 3833 return rc; 3834 } 3835 return VERR_NO_MEMORY; 377 3836 } 378 3837 379 3838 int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma) 380 3839 { 381 RT_NOREF(pVdma); 382 return VINF_SUCCESS; 3840 int rc = vdmaVBVAResume(pVdma); 3841 if (RT_SUCCESS(rc)) 3842 return VINF_SUCCESS; 3843 3844 if (rc != VERR_INVALID_STATE) 3845 { 3846 WARN(("vdmaVBVAResume failed %Rrc\n", rc)); 3847 return rc; 3848 } 3849 3850 # ifdef DEBUG_misha 3851 WARN(("debug done")); 3852 # endif 3853 3854 PVGASTATE pVGAState = pVdma->pVGAState; 3855 PVBOXVDMACMD_CHROMIUM_CTL pCmd; 3856 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof(*pCmd)); 3857 Assert(pCmd); 3858 if (pCmd) 3859 { 3860 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd)); 3861 AssertRC(rc); 3862 if (RT_SUCCESS(rc)) 3863 rc = vboxVDMACrCtlGetRc(pCmd); 3864 vboxVDMACrCtlRelease(pCmd); 3865 return rc; 3866 } 3867 return VERR_NO_MEMORY; 383 3868 } 384 3869 … … 386 3871 { 387 3872 int rc; 388 RT_NOREF(pVdma, pSSM); 389 390 rc = SSMR3PutU32(pSSM, UINT32_MAX); 3873 3874 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva)) 3875 { 3876 rc = SSMR3PutU32(pSSM, UINT32_MAX); 3877 AssertRCReturn(rc, rc); 3878 return VINF_SUCCESS; 3879 } 3880 3881 PVGASTATE pVGAState = pVdma->pVGAState; 3882 uint8_t * pu8VramBase = pVGAState->vram_ptrR3; 3883 3884 rc = SSMR3PutU32(pSSM, (uint32_t)((uintptr_t)pVdma->CmdVbva.pVBVA - (uintptr_t)pu8VramBase)); 391 3885 AssertRCReturn(rc, rc); 392 return VINF_SUCCESS; 3886 3887 VBVAEXHOSTCTL HCtl; 3888 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE; 3889 HCtl.u.state.pSSM = pSSM; 3890 HCtl.u.state.u32Version = 0; 3891 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST); 393 3892 } 394 3893 … … 401 3900 if (u32 != UINT32_MAX) 402 3901 { 403 RT_NOREF(pVdma, u32Version); 404 WARN(("Unsupported VBVACtl info!\n")); 405 return VERR_VERSION_MISMATCH; 3902 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true); 3903 AssertLogRelRCReturn(rc, rc); 3904 3905 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED); 3906 3907 VBVAEXHOSTCTL HCtl; 3908 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE; 3909 HCtl.u.state.pSSM = pSSM; 3910 HCtl.u.state.u32Version = u32Version; 3911 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST); 3912 AssertLogRelRCReturn(rc, rc); 3913 3914 rc = vdmaVBVAResume(pVdma); 3915 AssertLogRelRCReturn(rc, rc); 3916 3917 return VINF_SUCCESS; 406 3918 } 407 3919 … … 411 3923 int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma) 412 3924 { 413 RT_NOREF(pVdma); 3925 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva)) 3926 return VINF_SUCCESS; 3927 3928 /** @todo r=bird: BTW. would be great if you put in a couple of comments here and there explaining what 3929 * the purpose of this code is. */ 3930 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE); 3931 if (!pHCtl) 3932 { 3933 WARN(("VBoxVBVAExHCtlCreate failed\n")); 3934 return VERR_NO_MEMORY; 3935 } 3936 3937 /* sanity */ 3938 pHCtl->u.cmd.pvCmd = NULL; 3939 pHCtl->u.cmd.cbCmd = 0; 3940 3941 /* NULL completion will just free the ctl up */ 3942 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL); 3943 if (RT_FAILURE(rc)) 3944 { 3945 Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc)); 3946 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl); 3947 return rc; 3948 } 3949 414 3950 return VINF_SUCCESS; 415 3951 }
Note:
See TracChangeset
for help on using the changeset viewer.