VirtualBox

Changeset 98016 in vbox for trunk/src/VBox/Devices


Ignore:
Timestamp:
Jan 6, 2023 7:09:21 PM (2 years ago)
Author:
vboxsync
Message:

Devices/DevIommuIntel.cpp: Solaris GCC warned about using int8_t for indexing, cast it to uintptr_t. Tweaked the I/O page table loop, removing an unnecessary idxLevel >= 0 check and changing idxLevel from signed to unsigned 8-bit int. Also, no else after continue, break, return.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Devices/Bus/DevIommuIntel.cpp

    r96824 r98016  
    21932193
    21942194    /* Mask of reserved paging entry bits. */
    2195     static uint64_t const s_auPtEntityInvMasks[] = { ~VTD_SL_PTE_VALID_MASK,
    2196                                                      ~VTD_SL_PDE_VALID_MASK,
    2197                                                      ~VTD_SL_PDPE_VALID_MASK,
    2198                                                      ~VTD_SL_PML4E_VALID_MASK,
    2199                                                      ~VTD_SL_PML5E_VALID_MASK };
     2195    static uint64_t const s_auPtEntityInvMasks[] =
     2196    {
     2197        ~VTD_SL_PTE_VALID_MASK,
     2198        ~VTD_SL_PDE_VALID_MASK,
     2199        ~VTD_SL_PDPE_VALID_MASK,
     2200        ~VTD_SL_PML4E_VALID_MASK,
     2201        ~VTD_SL_PML5E_VALID_MASK
     2202    };
    22002203
    22012204    /* Paranoia. */
     
    22172220     * Unlike AMD IOMMU paging, here there is no feature for "skipping" levels.
    22182221     */
    2219     uint64_t uPtEntity   = pMemReqAux->GCPhysSlPt;
    2220     for (int8_t idxLevel = pMemReqAux->cPagingLevel - 1; idxLevel >= 0; idxLevel--)
    2221     {
    2222         /*
    2223          * Read the paging entry for the current level.
    2224          */
    2225         uint8_t const cLevelShift = X86_PAGE_4K_SHIFT + (idxLevel * 9);
     2222    if (pMemReqAux->cPagingLevel > 0)
     2223    {
     2224        uint64_t     uPtEntity = pMemReqAux->GCPhysSlPt;
     2225        for (uint8_t idxLevel  = pMemReqAux->cPagingLevel - 1; /* not needed: idxLevel >= 0 */; idxLevel--)
    22262226        {
    2227             uint16_t const idxPte         = (uAddrIn >> cLevelShift) & UINT64_C(0x1ff);
    2228             uint16_t const offPte         = idxPte << 3;
    2229             RTGCPHYS const GCPhysPtEntity = (uPtEntity & X86_PAGE_4K_BASE_MASK) | offPte;
    2230             int const rc = PDMDevHlpPhysReadMeta(pDevIns, GCPhysPtEntity, &uPtEntity, sizeof(uPtEntity));
    2231             if (RT_SUCCESS(rc))
     2227            /*
     2228             * Read the paging entry for the current level.
     2229             */
     2230            uint8_t const cLevelShift = X86_PAGE_4K_SHIFT + (idxLevel * 9);
     2231            {
     2232                uint16_t const idxPte         = (uAddrIn >> cLevelShift) & UINT64_C(0x1ff);
     2233                uint16_t const offPte         = idxPte << 3;
     2234                RTGCPHYS const GCPhysPtEntity = (uPtEntity & X86_PAGE_4K_BASE_MASK) | offPte;
     2235                int const rc = PDMDevHlpPhysReadMeta(pDevIns, GCPhysPtEntity, &uPtEntity, sizeof(uPtEntity));
     2236                if (RT_SUCCESS(rc))
     2237                { /* likely */ }
     2238                else
     2239                {
     2240                    if ((GCPhysPtEntity & X86_PAGE_BASE_MASK) == pMemReqAux->GCPhysSlPt)
     2241                        dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Slpptr_Read_Failed, pMemReqIn, pMemReqAux);
     2242                    else
     2243                        dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Read_Pte_Failed, pMemReqIn, pMemReqAux);
     2244                    break;
     2245                }
     2246            }
     2247
     2248            /*
     2249             * Check I/O permissions.
     2250             * This must be done prior to check reserved bits for properly reporting errors SSL.2 and SSL.3.
     2251             * See Intel spec. 7.1.3 "Fault conditions and Remapping hardware behavior for various request".
     2252             */
     2253            uint8_t const fReqPerm = pMemReqIn->AddrRange.fPerm & pThis->fPermValidMask;
     2254            uint8_t const fPtPerm  = uPtEntity & pThis->fPermValidMask;
     2255            Assert(!(fReqPerm & DMAR_PERM_EXE));                        /* No Execute-requests support yet. */
     2256            Assert(!(pThis->fExtCapReg & VTD_BF_ECAP_REG_SLADS_MASK));  /* No Second-level access/dirty support. */
     2257            if ((fPtPerm & fReqPerm) == fReqPerm)
    22322258            { /* likely */ }
    22332259            else
    22342260            {
    2235                 if ((GCPhysPtEntity & X86_PAGE_BASE_MASK) == pMemReqAux->GCPhysSlPt)
    2236                     dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Slpptr_Read_Failed, pMemReqIn, pMemReqAux);
     2261                if ((fPtPerm & (VTD_BF_SL_PTE_R_MASK | VTD_BF_SL_PTE_W_MASK)) == 0)
     2262                    dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Pte_Not_Present, pMemReqIn, pMemReqAux);
     2263                else if ((pMemReqIn->AddrRange.fPerm & DMAR_PERM_READ) != (fPtPerm & VTD_BF_SL_PTE_R_MASK))
     2264                    dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Perm_Read_Denied, pMemReqIn, pMemReqAux);
    22372265                else
    2238                     dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Read_Pte_Failed, pMemReqIn, pMemReqAux);
     2266                    dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Perm_Write_Denied, pMemReqIn, pMemReqAux);
    22392267                break;
    22402268            }
    2241         }
    2242 
    2243         /*
    2244          * Check I/O permissions.
    2245          * This must be done prior to check reserved bits for properly reporting errors SSL.2 and SSL.3.
    2246          * See Intel spec. 7.1.3 "Fault conditions and Remapping hardware behavior for various request".
    2247          */
    2248         uint8_t const fReqPerm = pMemReqIn->AddrRange.fPerm & pThis->fPermValidMask;
    2249         uint8_t const fPtPerm  = uPtEntity & pThis->fPermValidMask;
    2250         Assert(!(fReqPerm & DMAR_PERM_EXE));                        /* No Execute-requests support yet. */
    2251         Assert(!(pThis->fExtCapReg & VTD_BF_ECAP_REG_SLADS_MASK));  /* No Second-level access/dirty support. */
    2252         if ((fPtPerm & fReqPerm) == fReqPerm)
    2253         { /* likely */ }
    2254         else
    2255         {
    2256             if ((fPtPerm & (VTD_BF_SL_PTE_R_MASK | VTD_BF_SL_PTE_W_MASK)) == 0)
    2257                 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Pte_Not_Present, pMemReqIn, pMemReqAux);
    2258             else if ((pMemReqIn->AddrRange.fPerm & DMAR_PERM_READ) != (fPtPerm & VTD_BF_SL_PTE_R_MASK))
    2259                 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Perm_Read_Denied, pMemReqIn, pMemReqAux);
     2269
     2270            /*
     2271             * Validate reserved bits of the current paging entry.
     2272             */
     2273            if (!(uPtEntity & s_auPtEntityInvMasks[(uintptr_t)idxLevel]))
     2274            { /* likely */ }
    22602275            else
    2261                 dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Perm_Write_Denied, pMemReqIn, pMemReqAux);
    2262             break;
    2263         }
    2264 
    2265         /*
    2266          * Validate reserved bits of the current paging entry.
    2267          */
    2268         if (!(uPtEntity & s_auPtEntityInvMasks[idxLevel]))
    2269         { /* likely */ }
    2270         else
    2271         {
    2272             dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Pte_Rsvd, pMemReqIn, pMemReqAux);
    2273             break;
    2274         }
    2275 
    2276         /*
    2277          * Check if this is a 1GB page or a 2MB page.
    2278          */
    2279         AssertCompile(VTD_BF_SL_PDE_PS_MASK == VTD_BF_SL_PDPE_PS_MASK);
    2280         uint8_t const fLargePage = RT_BF_GET(uPtEntity, VTD_BF_SL_PDE_PS);
    2281         if (fLargePage && idxLevel > 0)
    2282         {
    2283             Assert(idxLevel == 1 || idxLevel == 2);   /* Is guaranteed by the reserved bits check above. */
    2284             uint8_t const fSllpsMask = RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_SLLPS);
    2285             if (fSllpsMask & RT_BIT(idxLevel - 1))
    22862276            {
    2287                 /*
    2288                  * We don't support MTS (asserted below), hence IPAT and EMT fields of the paging entity are ignored.
    2289                  * All other reserved bits are identical to the regular page-size paging entity which we've already
    2290                  * checked above.
    2291                  */
    2292                 Assert(!(pThis->fExtCapReg & VTD_BF_ECAP_REG_MTS_MASK));
    2293 
     2277                dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Pte_Rsvd, pMemReqIn, pMemReqAux);
     2278                break;
     2279            }
     2280
     2281            /*
     2282             * Check if this is a 1GB page or a 2MB page.
     2283             */
     2284            AssertCompile(VTD_BF_SL_PDE_PS_MASK == VTD_BF_SL_PDPE_PS_MASK);
     2285            uint8_t const fLargePage = RT_BF_GET(uPtEntity, VTD_BF_SL_PDE_PS);
     2286            if (fLargePage && idxLevel > 0)
     2287            {
     2288                Assert(idxLevel == 1 || idxLevel == 2);   /* Is guaranteed by the reserved bits check above. */
     2289                uint8_t const fSllpsMask = RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_SLLPS);
     2290                if (fSllpsMask & RT_BIT(idxLevel - 1))
     2291                {
     2292                    /*
     2293                     * We don't support MTS (asserted below), hence IPAT and EMT fields of the paging entity are ignored.
     2294                     * All other reserved bits are identical to the regular page-size paging entity which we've already
     2295                     * checked above.
     2296                     */
     2297                    Assert(!(pThis->fExtCapReg & VTD_BF_ECAP_REG_MTS_MASK));
     2298
     2299                    RTGCPHYS const GCPhysBase = uPtEntity & X86_GET_PAGE_BASE_MASK(cLevelShift);
     2300                    return dmarDrUpdateIoPageOut(pDevIns, GCPhysBase, cLevelShift, fPtPerm, pMemReqIn, pMemReqAux, pIoPageOut);
     2301                }
     2302
     2303                dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Pte_Sllps_Invalid, pMemReqIn, pMemReqAux);
     2304                break;
     2305            }
     2306
     2307            /*
     2308             * If this is the final PTE, compute the translation address and we're done.
     2309             */
     2310            if (idxLevel == 0)
     2311            {
    22942312                RTGCPHYS const GCPhysBase = uPtEntity & X86_GET_PAGE_BASE_MASK(cLevelShift);
    22952313                return dmarDrUpdateIoPageOut(pDevIns, GCPhysBase, cLevelShift, fPtPerm, pMemReqIn, pMemReqAux, pIoPageOut);
    22962314            }
    2297 
    2298             dmarAtFaultRecord(pDevIns, kDmarDiag_At_Xm_Pte_Sllps_Invalid, pMemReqIn, pMemReqAux);
    2299             break;
    2300         }
    2301 
    2302         /*
    2303          * If this is the final PTE, compute the translation address and we're done.
    2304          */
    2305         if (idxLevel == 0)
    2306         {
    2307             RTGCPHYS const GCPhysBase = uPtEntity & X86_GET_PAGE_BASE_MASK(cLevelShift);
    2308             return dmarDrUpdateIoPageOut(pDevIns, GCPhysBase, cLevelShift, fPtPerm, pMemReqIn, pMemReqAux, pIoPageOut);
    23092315        }
    23102316    }
     
    34493455                        continue;
    34503456                    }
    3451                     else
    3452                         dmarIqeFaultRecord(pDevIns, kDmarDiag_IqaReg_Dsc_Fetch_Error, VTDIQEI_FETCH_DESCRIPTOR_ERR);
     3457                    dmarIqeFaultRecord(pDevIns, kDmarDiag_IqaReg_Dsc_Fetch_Error, VTDIQEI_FETCH_DESCRIPTOR_ERR);
    34533458                }
    34543459                else
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette