Changeset 87730 in vbox for trunk/src/VBox/Devices/Bus
- Timestamp:
- Feb 12, 2021 9:56:21 AM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 142780
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/Bus/DevIommuAmd.cpp
r87715 r87730 215 215 216 216 /** 217 * I/O address range. 218 */ 219 typedef struct IOADDRRANGE 220 { 221 /** The address (virtual or physical). */ 222 uint64_t uAddr; 223 /** The size of the access in bytes. */ 224 size_t cb; 225 /** The I/O permissions for this translation, see IOMMU_IO_PERM_XXX. */ 226 uint8_t fPerm; 227 } IOADDRRANGE; 228 /** Pointer to an I/O address range. */ 229 typedef IOADDRRANGE *PIOADDRRANGE; 230 /** Pointer to a const I/O address range. */ 231 typedef IOADDRRANGE const *PCIOADDRRANGE; 232 233 /** 217 234 * IOMMU I/O Device. 218 235 * Used for caching as well as passing flags to events. … … 242 259 /** The least recently used (LRU) list node. */ 243 260 RTLISTNODE NdLru; 244 /** The I/O page lookup of the translation. */261 /** The I/O page lookup results of the translation. */ 245 262 IOPAGELOOKUP PageLookup; 246 263 /** Whether the entry needs to be evicted from the cache. */ … … 417 434 /** @name IOMMU: Stat counters. 418 435 * @{ */ 419 STAMCOUNTER StatMmioReadR3; /**< Number of MMIO reads in R3. */ 420 STAMCOUNTER StatMmioReadRZ; /**< Number of MMIO reads in RZ. */ 421 STAMCOUNTER StatMmioWriteR3; /**< Number of MMIO writes in R3. */ 422 STAMCOUNTER StatMmioWriteRZ; /**< Number of MMIO writes in RZ. */ 423 424 STAMCOUNTER StatMsiRemapR3; /**< Number of MSI remap requests in R3. */ 425 STAMCOUNTER StatMsiRemapRZ; /**< Number of MSI remap requests in RZ. */ 426 427 STAMCOUNTER StatMemReadR3; /**< Number of memory read translation requests in R3. */ 428 STAMCOUNTER StatMemReadRZ; /**< Number of memory read translation requests in RZ. */ 429 STAMCOUNTER StatMemWriteR3; /**< Number of memory write translation requests in R3. */ 430 STAMCOUNTER StatMemWriteRZ; /**< Number of memory write translation requests in RZ. */ 431 432 STAMCOUNTER StatMemBulkReadR3; /**< Number of memory read bulk translation requests in R3. */ 433 STAMCOUNTER StatMemBulkReadRZ; /**< Number of memory read bulk translation requests in RZ. */ 434 STAMCOUNTER StatMemBulkWriteR3; /**< Number of memory write bulk translation requests in R3. */ 435 STAMCOUNTER StatMemBulkWriteRZ; /**< Number of memory write bulk translation requests in RZ. */ 436 437 STAMCOUNTER StatCmd; /**< Number of commands processed in total. */ 438 STAMCOUNTER StatCmdCompWait; /**< Number of Completion Wait commands processed. */ 439 STAMCOUNTER StatCmdInvDte; /**< Number of Invalidate DTE commands processed. */ 440 STAMCOUNTER StatCmdInvIommuPages; /**< Number of Invalidate IOMMU pages commands processed. */ 441 STAMCOUNTER StatCmdInvIotlbPages; /**< Number of Invalidate IOTLB pages commands processed. */ 442 STAMCOUNTER StatCmdInvIntrTable; /**< Number of Invalidate Interrupt Table commands processed. */ 443 STAMCOUNTER StatCmdPrefIommuPages; /**< Number of Prefetch IOMMU Pages commands processed. */ 444 STAMCOUNTER StatCmdCompletePprReq; /**< Number of Complete PPR Requests commands processed. */ 445 STAMCOUNTER StatCmdInvIommuAll; /**< Number of Invalidate IOMMU All commands processed. */ 446 447 STAMCOUNTER StatIotlbeLookupNonContig; /**< Number of IOTLB lookups that result in non-contiguous regions. */ 448 STAMCOUNTER StatIotlbeCached; /**< Number of IOTLB entries in the cache. */ 449 STAMCOUNTER StatIotlbeCacheHit; /**< Number of IOTLB cache hits. */ 450 STAMCOUNTER StatIotlbeCacheMiss; /**< Number of IOTLB cache misses. */ 451 STAMCOUNTER StatIotlbeLazyEvictReuse; /**< Number of IOTLB entries re-used after lazy eviction. */ 452 STAMPROFILEADV StatIotlbeLookup; /**< Profiling of IOTLB entry lookup (from cache). */ 453 454 STAMCOUNTER StatDteLookupNonContig; /**< Number of DTE lookups that result in non-contiguous regions. */ 455 STAMPROFILEADV StatIoPageWalkLookup; /**< Profiling of I/O page walk (from memory). */ 436 STAMCOUNTER StatMmioReadR3; /**< Number of MMIO reads in R3. */ 437 STAMCOUNTER StatMmioReadRZ; /**< Number of MMIO reads in RZ. */ 438 STAMCOUNTER StatMmioWriteR3; /**< Number of MMIO writes in R3. */ 439 STAMCOUNTER StatMmioWriteRZ; /**< Number of MMIO writes in RZ. */ 440 441 STAMCOUNTER StatMsiRemapR3; /**< Number of MSI remap requests in R3. */ 442 STAMCOUNTER StatMsiRemapRZ; /**< Number of MSI remap requests in RZ. */ 443 444 STAMCOUNTER StatMemReadR3; /**< Number of memory read translation requests in R3. */ 445 STAMCOUNTER StatMemReadRZ; /**< Number of memory read translation requests in RZ. */ 446 STAMCOUNTER StatMemWriteR3; /**< Number of memory write translation requests in R3. */ 447 STAMCOUNTER StatMemWriteRZ; /**< Number of memory write translation requests in RZ. */ 448 449 STAMCOUNTER StatMemBulkReadR3; /**< Number of memory read bulk translation requests in R3. */ 450 STAMCOUNTER StatMemBulkReadRZ; /**< Number of memory read bulk translation requests in RZ. */ 451 STAMCOUNTER StatMemBulkWriteR3; /**< Number of memory write bulk translation requests in R3. */ 452 STAMCOUNTER StatMemBulkWriteRZ; /**< Number of memory write bulk translation requests in RZ. */ 453 454 STAMCOUNTER StatCmd; /**< Number of commands processed in total. */ 455 STAMCOUNTER StatCmdCompWait; /**< Number of Completion Wait commands processed. */ 456 STAMCOUNTER StatCmdInvDte; /**< Number of Invalidate DTE commands processed. */ 457 STAMCOUNTER StatCmdInvIommuPages; /**< Number of Invalidate IOMMU pages commands processed. */ 458 STAMCOUNTER StatCmdInvIotlbPages; /**< Number of Invalidate IOTLB pages commands processed. */ 459 STAMCOUNTER StatCmdInvIntrTable; /**< Number of Invalidate Interrupt Table commands processed. */ 460 STAMCOUNTER StatCmdPrefIommuPages; /**< Number of Prefetch IOMMU Pages commands processed. */ 461 STAMCOUNTER StatCmdCompletePprReq; /**< Number of Complete PPR Requests commands processed. */ 462 STAMCOUNTER StatCmdInvIommuAll; /**< Number of Invalidate IOMMU All commands processed. */ 463 464 STAMCOUNTER StatIotlbeCached; /**< Number of IOTLB entries in the cache. */ 465 STAMCOUNTER StatIotlbeLazyEvictReuse; /**< Number of IOTLB entries re-used after lazy eviction. */ 466 467 STAMPROFILEADV StatProfIotlbeLookup; /**< Profiling of IOTLB entry lookup (from cache). */ 468 STAMPROFILEADV StatProfDteLookup; /**< Profiling of I/O page walk (from memory). */ 469 470 STAMCOUNTER StatAccessCacheHit; /**< Number of IOTLB cache hits. */ 471 STAMCOUNTER StatAccessCacheHitFull; /**< Number of accesses that were fully looked up from the cache. */ 472 STAMCOUNTER StatAccessCacheMiss; /**< Number of cache misses (resulting in DTE lookups). */ 473 STAMCOUNTER StatAccessCacheNonContig; /**< Number of cache accesses resulting in non-contiguous access. */ 474 STAMCOUNTER StatAccessCachePermDenied; /**< Number of cache accesses resulting in insufficient permissions. */ 475 STAMCOUNTER StatAccessDteNonContig; /**< Number of DTE accesses resulting in non-contiguous access. */ 476 STAMCOUNTER StatAccessDtePermDenied; /**< Number of DTE accesses resulting in insufficient permissions. */ 456 477 /** @} */ 457 478 #endif … … 570 591 #endif 571 592 593 /** 594 * IOMMU operation auxiliary info. 595 */ 596 typedef struct IOMMUOPAUX 597 { 598 /** The IOMMU operation being performed. */ 599 IOMMUOP enmOp; 600 /** The device table entry (can be NULL). */ 601 PCDTE_T pDte; 602 /** The device ID. */ 603 uint16_t uDeviceId; 604 /** The domain ID (when the DTE isn't provided). */ 605 uint16_t uDomainId; 606 } IOMMUOPAUX; 607 /** Pointer to an I/O address lookup struct. */ 608 typedef IOMMUOPAUX *PIOMMUOPAUX; 609 /** Pointer to a const I/O address lookup struct. */ 610 typedef IOMMUOPAUX const *PCIOMMUOPAUX; 611 612 typedef DECLCALLBACKTYPE(int, FNIOPAGELOOKUP,(PPDMDEVINS pDevIns, uint64_t uIovaPage, uint8_t fPerm, PCIOMMUOPAUX pAux, 613 PIOPAGELOOKUP pPageLookup)); 614 typedef FNIOPAGELOOKUP *PFNIOPAGELOOKUP; 615 572 616 573 617 /********************************************************************************************************************************* … … 1114 1158 1115 1159 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1160 /** @todo Check level 1 cache? */ 1116 1161 do 1117 1162 { … … 3169 3214 * @param pDevIns The IOMMU device instance. 3170 3215 * @param uIova The I/O virtual address to translate. Must be 4K aligned. 3171 * @param uDevId The device ID (bus, device, function).3172 3216 * @param fPerm The I/O permissions for this access, see 3173 3217 * IOMMU_IO_PERM_XXX. 3218 * @param uDevId The device ID (bus, device, function). 3174 3219 * @param pDte The device table entry. 3175 3220 * @param enmOp The IOMMU operation being performed. … … 3179 3224 * @thread Any. 3180 3225 */ 3181 static int iommuAmdIoPageTableWalk(PPDMDEVINS pDevIns, uint 16_t uDevId, uint64_t uIova, uint8_t fPerm, PCDTE_T pDte,3226 static int iommuAmdIoPageTableWalk(PPDMDEVINS pDevIns, uint64_t uIova, uint8_t fPerm, uint16_t uDevId, PCDTE_T pDte, 3182 3227 IOMMUOP enmOp, PIOPAGELOOKUP pPageLookup) 3183 3228 { … … 3347 3392 3348 3393 /** 3394 * Page lookup callback for finding an I/O page from guest memory. 3395 * 3396 * @returns VBox status code. 3397 * @retval VINF_SUCCESS when the page is found and has the right permissions. 3398 * @retval VERR_IOMMU_ADDR_TRANSLATION_FAILED when address translation fails. 3399 * @retval VERR_IOMMU_ADDR_ACCESS_DENIED when the page is found but permissions are 3400 * insufficient to what is requested. 3401 * 3402 * @param pDevIns The IOMMU instance data. 3403 * @param uIovaPage The I/O virtual address to lookup in the cache (must be 3404 * 4K aligned). 3405 * @param fPerm The I/O permissions for this access, see 3406 * IOMMU_IO_PERM_XXX. 3407 * @param pAux The auxiliary information required during lookup. 3408 * @param pPageLookup Where to store the looked up I/O page. 3409 */ 3410 static DECLCALLBACK(int) iommuAmdDteLookupPage(PPDMDEVINS pDevIns, uint64_t uIovaPage, uint8_t fPerm, PCIOMMUOPAUX pAux, 3411 PIOPAGELOOKUP pPageLookup) 3412 { 3413 AssertPtr(pAux); 3414 AssertPtr(pPageLookup); 3415 Assert(!(uIovaPage & X86_PAGE_4K_OFFSET_MASK)); 3416 3417 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 3418 STAM_PROFILE_ADV_START(&pThis->StatProfDteLookup, a); 3419 int rc = iommuAmdIoPageTableWalk(pDevIns, uIovaPage, fPerm, pAux->uDeviceId, pAux->pDte, pAux->enmOp, pPageLookup); 3420 STAM_PROFILE_ADV_STOP(&pThis->StatProfDteLookup, a); 3421 return rc; 3422 } 3423 3424 3425 /** 3426 * Looks up a range of I/O virtual addresses. 3427 * 3428 * @returns VBox status code. 3429 * @param pDevIns The IOMMU instance data. 3430 * @param pfnIoPageLookup The lookup function to use. 3431 * @param pAddrIn The I/O address range to lookup. 3432 * @param pAux The auxiliary information required by the lookup 3433 * function. 3434 * @param pAddrOut Where to store the translated I/O address range. 3435 * @param pcbPages Where to store the size of the access (round up to 3436 * the page size). Optional, can be NULL. 3437 */ 3438 static int iommuAmdLookupIoAddrRange(PPDMDEVINS pDevIns, PFNIOPAGELOOKUP pfnIoPageLookup, PCIOADDRRANGE pAddrIn, 3439 PCIOMMUOPAUX pAux, PIOADDRRANGE pAddrOut, size_t *pcbPages) 3440 { 3441 AssertPtr(pfnIoPageLookup); 3442 AssertPtr(pAddrIn); 3443 AssertPtr(pAddrOut); 3444 3445 int rc; 3446 size_t const cbAccess = pAddrIn->cb; 3447 uint8_t const fPerm = pAddrIn->fPerm; 3448 uint64_t const uIova = pAddrIn->uAddr; 3449 RTGCPHYS GCPhysSpa = NIL_RTGCPHYS; 3450 size_t cbRemaining = cbAccess; 3451 uint64_t uIovaPage = pAddrIn->uAddr & X86_PAGE_4K_BASE_MASK; 3452 uint64_t offIova = pAddrIn->uAddr & X86_PAGE_4K_OFFSET_MASK; 3453 uint64_t cbPages = 0; 3454 3455 IOPAGELOOKUP PageLookupPrev; 3456 RT_ZERO(PageLookupPrev); 3457 for (;;) 3458 { 3459 IOPAGELOOKUP PageLookup; 3460 rc = pfnIoPageLookup(pDevIns, uIovaPage, fPerm, pAux, &PageLookup); 3461 if (RT_SUCCESS(rc)) 3462 { 3463 Assert(PageLookup.cShift >= X86_PAGE_4K_SHIFT); 3464 3465 /* Store the translated address before continuing to access more pages. */ 3466 if (cbRemaining == cbAccess) 3467 { 3468 uint64_t const offMask = IOMMU_GET_PAGE_OFF_MASK(PageLookup.cShift); 3469 uint64_t const offSpa = uIova & offMask; 3470 Assert(!(PageLookup.GCPhysSpa & offMask)); 3471 GCPhysSpa = PageLookup.GCPhysSpa | offSpa; 3472 } 3473 /* Check if addresses translated so far result in a physically contiguous region. */ 3474 else if (!iommuAmdLookupIsAccessContig(&PageLookupPrev, &PageLookup)) 3475 { 3476 rc = VERR_OUT_OF_RANGE; 3477 break; 3478 } 3479 3480 /* Store the page lookup result from the first/previous page. */ 3481 PageLookupPrev = PageLookup; 3482 3483 /* Update size of all pages read thus far. */ 3484 uint64_t const cbPage = RT_BIT_64(PageLookup.cShift); 3485 cbPages += cbPage; 3486 3487 /* Check if we need to access more pages. */ 3488 if (cbRemaining > cbPage - offIova) 3489 { 3490 cbRemaining -= (cbPage - offIova); /* Calculate how much more we need to access. */ 3491 uIovaPage += cbPage; /* Update address of the next access. */ 3492 offIova = 0; /* After first page, all pages are accessed from off 0. */ 3493 } 3494 else 3495 { 3496 cbRemaining = 0; 3497 break; 3498 } 3499 } 3500 else 3501 break; 3502 } 3503 3504 pAddrOut->uAddr = GCPhysSpa; /* Update the translated address. */ 3505 pAddrOut->cb = cbAccess - cbRemaining; /* Update the size of the contiguous memory region. */ 3506 pAddrOut->fPerm = PageLookupPrev.fPerm; /* Update the allowed permissions for this access. */ 3507 if (pcbPages) 3508 *pcbPages = cbPages; /* Update the size of the pages accessed. */ 3509 return rc; 3510 } 3511 3512 3513 /** 3349 3514 * Looks up an I/O virtual address from the device table. 3350 3515 * … … 3366 3531 PRTGCPHYS pGCPhysSpa, size_t *pcbContiguous) 3367 3532 { 3368 PIOMMU pThis= PDMDEVINS_2_DATA(pDevIns, PIOMMU);3533 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 3369 3534 RTGCPHYS GCPhysSpa = NIL_RTGCPHYS; 3370 3535 size_t cbContiguous = 0; … … 3382 3547 if (RT_LIKELY(!fRsvd0 && !fRsvd1)) 3383 3548 { 3384 /* Note: Addresses are not subject to exclusion as we do -not- support remote IOTLBs. */ 3549 /* 3550 * Check if the DTE is configured for translating addresses. 3551 * Note: Addresses cannot be subject to exclusion as we do -not- support remote IOTLBs, 3552 * so there's no need to check the address exclusion base/limit here. 3553 */ 3385 3554 rc = iommuAmdPreTranslateChecks(pDevIns, uDevId, uIova, fPerm, &Dte, enmOp); 3386 3555 if (rc == VINF_SUCCESS) 3387 3556 { 3388 /* Walk the I/O page tables to translate the IOVA and check permissions for the 3389 remaining pages in the access. */ 3390 size_t cbRemaining = cbAccess; 3391 uint64_t uIovaPage = uIova & X86_PAGE_4K_BASE_MASK; 3392 uint64_t offIova = uIova & X86_PAGE_4K_OFFSET_MASK; 3393 uint64_t cbPages = 0; 3394 IOPAGELOOKUP PageLookupPrev; 3395 RT_ZERO(PageLookupPrev); 3396 for (;;) 3557 IOADDRRANGE AddrIn; 3558 AddrIn.uAddr = uIova; 3559 AddrIn.cb = cbAccess; 3560 AddrIn.fPerm = fPerm; 3561 3562 IOMMUOPAUX Aux; 3563 Aux.enmOp = enmOp; 3564 Aux.pDte = &Dte; 3565 Aux.uDeviceId = uDevId; 3566 Aux.uDomainId = Dte.n.u16DomainId; 3567 3568 IOADDRRANGE AddrOut; 3569 3570 /* Lookup the address from the DTE and I/O page tables.*/ 3571 size_t cbPages = 0; 3572 rc = iommuAmdLookupIoAddrRange(pDevIns, iommuAmdDteLookupPage, &AddrIn, &Aux, &AddrOut, &cbPages); 3573 GCPhysSpa = AddrOut.uAddr; 3574 cbContiguous = AddrOut.cb; 3575 3576 /* If we stopped since translation resulted in non-contiguous physical addresses, 3577 what we translated so far is still valid. */ 3578 if (rc == VERR_OUT_OF_RANGE) 3397 3579 { 3398 /** @todo split this into a separate function and reuse from 3399 * iommuAmdCacheLookup(). */ 3400 IOPAGELOOKUP PageLookup; 3401 RT_ZERO(PageLookup); 3402 STAM_PROFILE_ADV_START(&pThis->StatIoPageWalkLookup, a); 3403 rc = iommuAmdIoPageTableWalk(pDevIns, uDevId, uIovaPage, fPerm, &Dte, enmOp, &PageLookup); 3404 STAM_PROFILE_ADV_STOP(&pThis->StatIoPageWalkLookup, a); 3405 if (RT_SUCCESS(rc)) 3406 { 3407 /* Store the translated address before continuing to access more pages. */ 3408 Assert(PageLookup.cShift >= X86_PAGE_4K_SHIFT); 3409 if (cbRemaining == cbAccess) 3410 { 3411 uint64_t const offMask = IOMMU_GET_PAGE_OFF_MASK(PageLookup.cShift); 3412 uint64_t const offSpa = uIova & offMask; 3413 Assert(!(PageLookup.GCPhysSpa & offMask)); 3414 GCPhysSpa = PageLookup.GCPhysSpa | offSpa; 3415 } 3416 /* Check if addresses translated so far result in a physically contiguous region. */ 3417 else if (!iommuAmdLookupIsAccessContig(&PageLookupPrev, &PageLookup)) 3418 { 3419 STAM_COUNTER_INC(&pThis->StatDteLookupNonContig); 3420 break; 3421 } 3422 3423 /* Store the page lookup result from the first/previous page. */ 3424 PageLookupPrev = PageLookup; 3425 3426 /* Update size of all pages read thus far. */ 3427 uint64_t const cbPage = RT_BIT_64(PageLookup.cShift); 3428 cbPages += cbPage; 3429 3430 /* Check if we need to access more pages. */ 3431 if (cbRemaining > cbPage - offIova) 3432 { 3433 cbRemaining -= (cbPage - offIova); /* Calculate how much more we need to access. */ 3434 uIovaPage += cbPage; /* Update address of the next access. */ 3435 offIova = 0; /* After first page, all pages are accessed from off 0. */ 3436 } 3437 else 3438 { 3439 cbRemaining = 0; 3440 break; 3441 } 3442 } 3443 else 3444 { 3445 /* Translation failed. */ 3446 GCPhysSpa = NIL_RTGCPHYS; 3447 cbRemaining = cbAccess; 3448 break; 3449 } 3580 Assert(cbContiguous > 0 && cbContiguous < cbAccess); 3581 rc = VINF_SUCCESS; 3582 STAM_COUNTER_INC(&pThis->StatAccessDteNonContig); 3450 3583 } 3451 3584 3452 /* Update how much contiguous memory was accessed. */3453 cbContiguous = cbAccess - cbRemaining;3585 if (rc == VERR_IOMMU_ADDR_ACCESS_DENIED) 3586 STAM_COUNTER_INC(&pThis->StatAccessDtePermDenied); 3454 3587 3455 3588 #if defined(IN_RING3) && defined(IOMMU_WITH_IOTLBE_CACHE) … … 3458 3591 /* Update that addresses requires translation (cumulative permissions of DTE and I/O page tables). */ 3459 3592 iommuAmdDteCacheUpdate(pDevIns, uDevId, &Dte, IOMMU_DEV_F_PRESENT | IOMMU_DEV_F_ADDR_TRANSLATE); 3460 3461 3593 /* Update IOTLB for the contiguous range of I/O virtual addresses. */ 3462 3594 iommuAmdIotlbAddRange(pDevIns, Dte.n.u16DomainId, uIova & X86_PAGE_4K_BASE_MASK, cbPages, 3463 GCPhysSpa & X86_PAGE_4K_BASE_MASK, PageLookupPrev.fPerm);3595 GCPhysSpa & X86_PAGE_4K_BASE_MASK, AddrOut.fPerm); 3464 3596 } 3465 3597 #endif … … 3486 3618 GCPhysSpa = NIL_RTGCPHYS; 3487 3619 cbContiguous = 0; 3620 STAM_COUNTER_INC(&pThis->StatAccessDtePermDenied); 3488 3621 } 3489 3622 } … … 3522 3655 *pcbContiguous = cbContiguous; 3523 3656 AssertMsg(rc != VINF_SUCCESS || cbContiguous > 0, ("cbContiguous=%zu\n", cbContiguous)); 3524 3525 3657 return rc; 3526 3658 } 3527 3659 3528 3660 3529 #ifdef IOMMU_WITH_IOTLBE_CACHE 3661 #if defined(IN_RING3) && defined(IOMMU_WITH_IOTLBE_CACHE) 3662 /** 3663 * I/O page lookup callback for finding an I/O page from the IOTLB. 3664 * 3665 * @returns VBox status code. 3666 * @retval VINF_SUCCESS when the page is found and has the right permissions. 3667 * @retval VERR_NOT_FOUND when the page is not found. 3668 * @retval VERR_IOMMU_ADDR_ACCESS_DENIED when the page is found but permissions are 3669 * insufficient to what is requested. 3670 * 3671 * @param pDevIns The IOMMU instance data. 3672 * @param uIovaPage The I/O virtual address to lookup in the cache (must be 3673 * 4K aligned). 3674 * @param fPerm The I/O permissions for this access, see 3675 * IOMMU_IO_PERM_XXX. 3676 * @param pAux The auxiliary information required during lookup. 3677 * @param pPageLookup Where to store the looked up I/O page. 3678 */ 3679 static DECLCALLBACK(int) iommuAmdCacheLookupPage(PPDMDEVINS pDevIns, uint64_t uIovaPage, uint8_t fPerm, PCIOMMUOPAUX pAux, 3680 PIOPAGELOOKUP pPageLookup) 3681 { 3682 Assert(pAux); 3683 Assert(pPageLookup); 3684 Assert(!(uIovaPage & X86_PAGE_4K_OFFSET_MASK)); 3685 3686 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 3687 3688 STAM_PROFILE_ADV_START(&pThis->StatProfIotlbeLookup, a); 3689 PCIOTLBE pIotlbe = iommuAmdIotlbLookup(pThis, pAux->uDomainId, uIovaPage); 3690 STAM_PROFILE_ADV_STOP(&pThis->StatProfIotlbeLookup, a); 3691 if (pIotlbe) 3692 { 3693 *pPageLookup = pIotlbe->PageLookup; 3694 if ((pPageLookup->fPerm & fPerm) == fPerm) 3695 { 3696 STAM_COUNTER_INC(&pThis->StatAccessCacheHit); 3697 return VINF_SUCCESS; 3698 } 3699 return VERR_IOMMU_ADDR_ACCESS_DENIED; 3700 } 3701 return VERR_NOT_FOUND; 3702 } 3703 3704 3530 3705 /** 3531 3706 * Lookups a memory access from the IOMMU cache. … … 3553 3728 PRTGCPHYS pGCPhysSpa, size_t *pcbContiguous) 3554 3729 { 3730 int rc; 3555 3731 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 3556 int rc = VERR_NOT_FOUND;3557 3732 3558 3733 /* … … 3568 3743 { 3569 3744 /* Lookup the IOTLB entries from the level 2 cache. */ 3570 RTGCPHYS GCPhysSpa = NIL_RTGCPHYS; 3571 size_t cbContiguous = 0; 3572 size_t cbRemaining = cbAccess; 3573 uint64_t uIovaPage = uIova & X86_PAGE_4K_BASE_MASK; 3574 uint64_t offIova = uIova & X86_PAGE_4K_OFFSET_MASK; 3575 IOPAGELOOKUP PageLookupPrev; 3576 RT_ZERO(PageLookupPrev); 3577 for (;;) 3578 { 3579 STAM_PROFILE_ADV_START(&pThis->StatIotlbeLookup, b); 3580 PCIOTLBE pIotlbe = iommuAmdIotlbLookup(pThis, pDevice->uDomainId, uIovaPage); 3581 STAM_PROFILE_ADV_STOP(&pThis->StatIotlbeLookup, b); 3582 if (pIotlbe) 3583 { 3584 PCIOPAGELOOKUP pPageLookup = &pIotlbe->PageLookup; 3585 if ((pPageLookup->fPerm & fPerm) == fPerm) 3586 { /* likely */ } 3587 else 3588 { 3589 EVT_IO_PAGE_FAULT_T EvtIoPageFault; 3590 iommuAmdIoPageFaultEventInit(uDevId, pDevice->uDomainId, uIova, true /* fPresent */, 3591 false /* fRsvdNotZero */, true /* fPermDenied */, enmOp, &EvtIoPageFault); 3592 iommuAmdIoPageFaultEventRaise(pDevIns, pDevice->fFlags, NULL /* pIrte */, enmOp, &EvtIoPageFault, 3593 kIoPageFaultType_PermDenied); 3594 rc = VERR_IOMMU_ADDR_ACCESS_DENIED; 3595 break; 3596 } 3597 3598 /* Store the translated address before continuing to translate more pages. */ 3599 Assert(pPageLookup->cShift >= X86_PAGE_4K_SHIFT); 3600 if (cbRemaining == cbAccess) 3601 { 3602 uint64_t const offMask = IOMMU_GET_PAGE_OFF_MASK(pPageLookup->cShift); 3603 uint64_t const offSpa = uIova & offMask; 3604 Assert(!(pPageLookup->GCPhysSpa & offMask)); 3605 GCPhysSpa = pPageLookup->GCPhysSpa | offSpa; 3606 } 3607 /* Check if addresses translated so far result in a physically contiguous region. */ 3608 else if (!iommuAmdLookupIsAccessContig(&PageLookupPrev, pPageLookup)) 3609 { 3610 STAM_COUNTER_INC(&pThis->StatIotlbeLookupNonContig); 3611 rc = VERR_OUT_OF_RANGE; 3612 break; 3613 } 3614 3615 /* Store the page lookup result from the first/previous page. */ 3616 PageLookupPrev = *pPageLookup; 3617 3618 /* Check if we need to access more pages. */ 3619 uint64_t const cbPage = RT_BIT_64(pPageLookup->cShift); 3620 if (cbRemaining > cbPage - offIova) 3621 { 3622 cbRemaining -= (cbPage - offIova); /* Calculate how much more we need to access. */ 3623 uIovaPage += cbPage; /* Update address of the next access. */ 3624 offIova = 0; /* After first page, all pages are accessed from off 0. */ 3625 } 3626 else 3627 { 3628 cbRemaining = 0; 3629 rc = VINF_SUCCESS; 3630 break; 3631 } 3632 } 3633 else 3634 { 3635 /* 3636 * No IOTLB entry was found for this I/O virtual address. 3637 * Fallback to walking the I/O page tables from the beginning of the access. 3638 * We currently don't support partial lookups. 3639 */ 3640 Assert(rc == VERR_NOT_FOUND); 3641 break; 3642 } 3643 } 3644 3645 /* Update how much contiguous memory was accessed. */ 3646 cbContiguous = cbAccess - cbRemaining; 3647 3648 *pGCPhysSpa = GCPhysSpa; 3649 *pcbContiguous = cbContiguous; 3745 IOADDRRANGE AddrIn; 3746 AddrIn.uAddr = uIova; 3747 AddrIn.cb = cbAccess; 3748 AddrIn.fPerm = fPerm; 3749 3750 IOMMUOPAUX Aux; 3751 Aux.enmOp = enmOp; 3752 Aux.pDte = NULL; 3753 Aux.uDeviceId = uDevId; 3754 Aux.uDomainId = pDevice->uDomainId; 3755 3756 IOADDRRANGE AddrOut; 3757 rc = iommuAmdLookupIoAddrRange(pDevIns, iommuAmdCacheLookupPage, &AddrIn, &Aux, &AddrOut, NULL /* pcbPages */); 3758 Assert(AddrOut.cb <= cbAccess); 3759 *pGCPhysSpa = AddrOut.uAddr; 3760 *pcbContiguous = AddrOut.cb; 3650 3761 } 3651 3762 else if ((pDevice->fFlags & (IOMMU_DEV_F_PRESENT | IOMMU_DEV_F_VALID | IOMMU_DEV_F_IO_PERM)) … … 3675 3786 rc = VINF_SUCCESS; 3676 3787 } 3788 else 3789 { 3790 rc = VERR_NOT_FOUND; 3791 *pGCPhysSpa = NIL_RTGCPHYS; 3792 *pcbContiguous = 0; 3793 } 3677 3794 3678 3795 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 3796 3797 /* Raise event if address translation resulted in a permission failure. */ 3798 if (rc == VERR_IOMMU_ADDR_ACCESS_DENIED) 3799 { 3800 EVT_IO_PAGE_FAULT_T EvtIoPageFault; 3801 iommuAmdIoPageFaultEventInit(uDevId, pDevice->uDomainId, uIova, true /* fPresent */, 3802 false /* fRsvdNotZero */, true /* fPermDenied */, enmOp, &EvtIoPageFault); 3803 iommuAmdIoPageFaultEventRaise(pDevIns, pDevice->fFlags, NULL /* pIrte */, enmOp, &EvtIoPageFault, 3804 kIoPageFaultType_PermDenied); 3805 } 3806 3679 3807 return rc; 3680 3808 } … … 3749 3877 if (rc == VINF_SUCCESS) 3750 3878 { 3751 STAM_COUNTER_INC(&pThis->StatIotlbeCacheHit);3879 /* Entire access was cached and permissions were valid. */ 3752 3880 Assert(*pcbContiguous == cbAccess); 3753 3881 Assert(*pGCPhysSpa != NIL_RTGCPHYS); 3882 STAM_COUNTER_INC(&pThis->StatAccessCacheHitFull); 3754 3883 return rc; 3755 3884 } 3756 3885 if (rc == VERR_OUT_OF_RANGE) 3757 3886 { 3887 /* Access stopped when translations resulted in non-contiguous memory, let caller resume access. */ 3758 3888 Assert(*pcbContiguous > 0 && *pcbContiguous < cbAccess); 3889 STAM_COUNTER_INC(&pThis->StatAccessCacheNonContig); 3759 3890 return VINF_SUCCESS; 3760 3891 } 3761 3892 if (rc == VERR_IOMMU_ADDR_ACCESS_DENIED) 3762 return VERR_IOMMU_ADDR_ACCESS_DENIED; 3763 AssertMsg(rc == VERR_NOT_FOUND, ("Cache lokoup failed: %Rrc\n", rc)); 3764 STAM_COUNTER_INC(&pThis->StatIotlbeCacheMiss); 3765 /** @todo r=ramshankar: WARNING! when implementing continuing of lookups because 3766 * some entries weren't in the IOTLB, make sure to keep the lock held or to 3767 * re-lookup the level 1 cache again because the DTE might be invalidated 3768 * in-between! */ 3893 { 3894 /* Access denied due to insufficient permissions. */ 3895 STAM_COUNTER_INC(&pThis->StatAccessCachePermDenied); 3896 return rc; 3897 } 3898 3899 /* Access incomplete as not all pages were in the cache. Lookup the rest from the device table. */ 3900 AssertMsg(rc == VERR_NOT_FOUND, ("Invalid cache lookup result: %Rrc\n", rc)); 3901 AssertMsg(*pcbContiguous < cbAccess, ("Invalid size: cbContiguous=%zu cbAccess=%zu\n", *pcbContiguous, cbAccess)); 3902 uIova += *pcbContiguous; 3903 cbAccess -= *pcbContiguous; 3904 STAM_COUNTER_INC(&pThis->StatAccessCacheMiss); 3769 3905 #endif 3770 3906 … … 3774 3910 { /* likely */ } 3775 3911 else 3912 { 3913 Assert(rc != VERR_OUT_OF_RANGE); 3776 3914 LogFunc(("DTE lookup failed! uDevId=%#x uIova=%#RX64 fPerm=%u cbAccess=%zu rc=%#Rrc\n", uDevId, uIova, fPerm, 3777 3915 cbAccess, rc)); 3916 } 3778 3917 3779 3918 return rc; … … 5872 6011 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatCmdInvIommuAll, STAMTYPE_COUNTER, "R3/Commands/InvIommuAll", STAMUNIT_OCCURENCES, "Number of Invalidate IOMMU All commands processed."); 5873 6012 5874 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIotlbeLookupNonContig, STAMTYPE_COUNTER, "IOTLB/LookupNonContig", STAMUNIT_OCCURENCES, "IOTLB lookups that resulted in non-contiguous translated regions."); 6013 5875 6014 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIotlbeCached, STAMTYPE_COUNTER, "IOTLB/Cached", STAMUNIT_OCCURENCES, "Number of IOTLB entries in the cache."); 5876 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIotlbeCacheHit, STAMTYPE_COUNTER, "IOTLB/CacheHit", STAMUNIT_OCCURENCES, "Number of IOTLB cache hits.");5877 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIotlbeCacheMiss, STAMTYPE_COUNTER, "IOTLB/CacheMiss", STAMUNIT_OCCURENCES, "Number of IOTLB cache misses.");5878 6015 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIotlbeLazyEvictReuse, STAMTYPE_COUNTER, "IOTLB/LazyEvictReuse", STAMUNIT_OCCURENCES, "Number of IOTLB entries reused after lazy eviction."); 5879 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIotlbeLookup, STAMTYPE_PROFILE, "Profile/IotlbeLookup", STAMUNIT_TICKS_PER_CALL, "Profiling IOTLB entry lookup (from cache)."); 5880 5881 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatDteLookupNonContig, STAMTYPE_COUNTER, "DTE/LookupNonContig", STAMUNIT_OCCURENCES, "DTE lookups that resulted in non-contiguous translated regions."); 5882 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIoPageWalkLookup, STAMTYPE_PROFILE, "Profile/IoPageWalk", STAMUNIT_TICKS_PER_CALL, "Profiling I/O page walk (from memory)."); 6016 6017 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatProfDteLookup, STAMTYPE_PROFILE, "Profile/DteLookup", STAMUNIT_TICKS_PER_CALL, "Profiling DTE lookup."); 6018 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatProfIotlbeLookup, STAMTYPE_PROFILE, "Profile/IotlbeLookup", STAMUNIT_TICKS_PER_CALL, "Profiling IOTLBE lookup."); 6019 6020 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessCacheHit, STAMTYPE_COUNTER, "Access/CacheHit", STAMUNIT_OCCURENCES, "Number of cache hits."); 6021 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessCacheMiss, STAMTYPE_COUNTER, "Access/CacheMiss", STAMUNIT_OCCURENCES, "Number of cache misses."); 6022 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessCacheHitFull, STAMTYPE_COUNTER, "Access/CacheHitFull", STAMUNIT_OCCURENCES, "Number of accesses that was entirely in the cache."); 6023 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessCacheNonContig, STAMTYPE_COUNTER, "Access/CacheNonContig", STAMUNIT_OCCURENCES, "Number of cache accesses that resulted in non-contiguous translated regions."); 6024 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessCachePermDenied, STAMTYPE_COUNTER, "Access/CacheAddrDenied", STAMUNIT_OCCURENCES, "Number of cache accesses that resulted in denied permissions."); 6025 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessDteNonContig, STAMTYPE_COUNTER, "Access/DteNonContig", STAMUNIT_OCCURENCES, "Number of DTE accesses that resulted in non-contiguous translated regions."); 6026 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessDtePermDenied, STAMTYPE_COUNTER, "Access/DtePermDenied", STAMUNIT_OCCURENCES, "Number of DTE accesses that resulted in denied permissions."); 5883 6027 # endif 5884 6028
Note:
See TracChangeset
for help on using the changeset viewer.