VirtualBox

Ignore:
Timestamp:
Sep 15, 2007 11:33:05 PM (17 years ago)
Author:
vboxsync
Message:

Removed the old MM code.

Location:
trunk/src/VBox/HostDrivers/Support
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/HostDrivers/Support/SUPDRVShared.c

    r4829 r4831  
    9191    { "SUPR0MemGetPhys",                        (void *)SUPR0MemGetPhys },
    9292    { "SUPR0MemFree",                           (void *)SUPR0MemFree },
    93 #ifdef USE_NEW_OS_INTERFACE_FOR_MM
    9493    { "SUPR0PageAlloc",                         (void *)SUPR0PageAlloc },
    9594    { "SUPR0PageFree",                          (void *)SUPR0PageFree },
    96 #endif
    9795    { "SUPR0Printf",                            (void *)SUPR0Printf },
    9896    { "RTMemAlloc",                             (void *)RTMemAlloc },
     
    464462        for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
    465463        {
    466 #ifdef USE_NEW_OS_INTERFACE_FOR_MM
    467464            if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
    468465            {
     
    481478                pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
    482479            }
    483 
    484 #else /* !USE_NEW_OS_INTERFACE_FOR_MM */
    485             if (    pBundle->aMem[i].pvR0
    486                 ||  pBundle->aMem[i].pvR3)
    487             {
    488                 dprintf2(("eType=%d pvR0=%p pvR3=%p cb=%d\n", pBundle->aMem[i].eType,
    489                           pBundle->aMem[i].pvR0, pBundle->aMem[i].pvR3, pBundle->aMem[i].cb));
    490                 switch (pBundle->aMem[i].eType)
    491                 {
    492                     case MEMREF_TYPE_LOCKED:
    493                         supdrvOSUnlockMemOne(&pBundle->aMem[i]);
    494                         break;
    495                     case MEMREF_TYPE_CONT:
    496                         supdrvOSContFreeOne(&pBundle->aMem[i]);
    497                         break;
    498                     case MEMREF_TYPE_LOW:
    499                         supdrvOSLowFreeOne(&pBundle->aMem[i]);
    500                         break;
    501                     case MEMREF_TYPE_MEM:
    502                         supdrvOSMemFreeOne(&pBundle->aMem[i]);
    503                         break;
    504                     default:
    505                         break;
    506                 }
    507                 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
    508             }
    509 #endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
    510480        }
    511481
     
    10561026        }
    10571027
    1058 #ifdef USE_NEW_OS_INTERFACE_FOR_MM
    10591028        case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC):
    10601029        {
     
    10811050            return 0;
    10821051        }
    1083 #endif /* USE_NEW_OS_INTERFACE_FOR_MM */
    10841052
    10851053        default:
     
    14341402    }
    14351403
    1436 #ifdef USE_NEW_OS_INTERFACE_FOR_MM
    1437 # ifdef RT_OS_WINDOWS /* A temporary hack for windows, will be removed once all ring-3 code has been cleaned up. */
     1404#ifdef RT_OS_WINDOWS /* A temporary hack for windows, will be removed once all ring-3 code has been cleaned up. */
    14381405    /* First check if we allocated it using SUPPageAlloc; if so then we don't need to lock it again */
    14391406    rc = supdrvPageGetPhys(pSession, pvR3, cPages, paPages);
    14401407    if (RT_SUCCESS(rc))
    14411408        return rc;
    1442 # endif
     1409#endif
    14431410
    14441411    /*
     
    14721439    }
    14731440
    1474 #else /* !USE_NEW_OS_INTERFACE_FOR_MM */
    1475 
    1476     /*
    1477      * Let the OS specific code have a go.
    1478      */
    1479     Mem.pvR0    = NULL;
    1480     Mem.pvR3    = pvR3;
    1481     Mem.eType   = MEMREF_TYPE_LOCKED;
    1482     Mem.cb      = cb;
    1483     rc = supdrvOSLockMemOne(&Mem, paPages);
    1484     if (rc)
    1485         return rc;
    1486 
    1487     /*
    1488      * Everything when fine, add the memory reference to the session.
    1489      */
    1490     rc = supdrvMemAdd(&Mem, pSession);
    1491     if (rc)
    1492         supdrvOSUnlockMemOne(&Mem);
    1493 #endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
    14941441    return rc;
    14951442}
     
    15561503    }
    15571504
    1558 #ifdef USE_NEW_OS_INTERFACE_FOR_MM
    15591505    /*
    15601506     * Let IPRT do the job.
     
    15851531    }
    15861532
    1587 #else /* !USE_NEW_OS_INTERFACE_FOR_MM */
    1588 
    1589     /*
    1590      * Let the OS specific code have a go.
    1591      */
    1592     Mem.pvR0    = NULL;
    1593     Mem.pvR3    = NIL_RTR3PTR;
    1594     Mem.eType   = MEMREF_TYPE_CONT;
    1595     Mem.cb      = cPages << PAGE_SHIFT;
    1596     rc = supdrvOSContAllocOne(&Mem, ppvR0, ppvR3, pHCPhys);
    1597     if (rc)
    1598         return rc;
    1599     AssertMsg(!((uintptr_t)*ppvR3 & (PAGE_SIZE - 1)) || !(*pHCPhys & (PAGE_SIZE - 1)),
    1600               ("Memory is not page aligned! *ppvR0=%p *ppvR3=%p phys=%VHp\n", ppvR0 ? *ppvR0 : NULL, *ppvR3, *pHCPhys));
    1601 
    1602     /*
    1603      * Everything when fine, add the memory reference to the session.
    1604      */
    1605     rc = supdrvMemAdd(&Mem, pSession);
    1606     if (rc)
    1607         supdrvOSContFreeOne(&Mem);
    1608 #endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
    1609 
    16101533    return rc;
    16111534}
     
    16611584    }
    16621585
    1663 #ifdef USE_NEW_OS_INTERFACE_FOR_MM
    16641586    /*
    16651587     * Let IPRT do the work.
     
    16951617    }
    16961618
    1697 #else /* !USE_NEW_OS_INTERFACE_FOR_MM */
    1698 
    1699     /*
    1700      * Let the OS specific code have a go.
    1701      */
    1702     Mem.pvR0    = NULL;
    1703     Mem.pvR3    = NIL_RTR3PTR;
    1704     Mem.eType   = MEMREF_TYPE_LOW;
    1705     Mem.cb      = cPages << PAGE_SHIFT;
    1706     rc = supdrvOSLowAllocOne(&Mem, ppvR0, ppvR3, paPages);
    1707     if (rc)
    1708         return rc;
    1709     AssertMsg(!((uintptr_t)*ppvR3 & (PAGE_SIZE - 1)), ("Memory is not page aligned! virt=%p\n", *ppvR3));
    1710     AssertMsg(!((uintptr_t)*ppvR0 & (PAGE_SIZE - 1)), ("Memory is not page aligned! virt=%p\n", *ppvR0));
    1711     for (iPage = 0; iPage < cPages; iPage++)
    1712         AssertMsg(!(paPages[iPage].Phys & (PAGE_SIZE - 1)), ("iPage=%d Phys=%VHp\n", paPages[iPage].Phys));
    1713 
    1714     /*
    1715      * Everything when fine, add the memory reference to the session.
    1716      */
    1717     rc = supdrvMemAdd(&Mem, pSession);
    1718     if (rc)
    1719         supdrvOSLowFreeOne(&Mem);
    1720 #endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
    17211619    return rc;
    17221620}
     
    17671665    }
    17681666
    1769 #ifdef USE_NEW_OS_INTERFACE_FOR_MM
    17701667    /*
    17711668     * Let IPRT do the work.
     
    17951692    }
    17961693
    1797 #else /* !USE_NEW_OS_INTERFACE_FOR_MM */
    1798 
    1799     /*
    1800      * Let the OS specific code have a go.
    1801      */
    1802     Mem.pvR0    = NULL;
    1803     Mem.pvR3    = NIL_RTR3PTR;
    1804     Mem.eType   = MEMREF_TYPE_MEM;
    1805     Mem.cb      = cb;
    1806     rc = supdrvOSMemAllocOne(&Mem, ppvR0, ppvR3);
    1807     if (rc)
    1808         return rc;
    1809     AssertMsg(!((uintptr_t)*ppvR0 & (PAGE_SIZE - 1)), ("Memory is not page aligned! pvR0=%p\n", *ppvR0));
    1810     AssertMsg(!((uintptr_t)*ppvR3 & (PAGE_SIZE - 1)), ("Memory is not page aligned! pvR3=%p\n", *ppvR3));
    1811 
    1812     /*
    1813      * Everything when fine, add the memory reference to the session.
    1814      */
    1815     rc = supdrvMemAdd(&Mem, pSession);
    1816     if (rc)
    1817         supdrvOSMemFreeOne(&Mem);
    1818 #endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
    18191694    return rc;
    18201695}
     
    18531728            for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
    18541729            {
    1855 #ifdef USE_NEW_OS_INTERFACE_FOR_MM
    18561730                if (    pBundle->aMem[i].eType == MEMREF_TYPE_MEM
    18571731                    &&  pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
     
    18721746                    return VINF_SUCCESS;
    18731747                }
    1874 #else /* !USE_NEW_OS_INTERFACE_FOR_MM */
    1875                 if (    pBundle->aMem[i].eType == MEMREF_TYPE_MEM
    1876                     &&  (   (RTHCUINTPTR)pBundle->aMem[i].pvR0 == uPtr
    1877                          || (RTHCUINTPTR)pBundle->aMem[i].pvR3 == uPtr))
    1878                 {
    1879                     supdrvOSMemGetPages(&pBundle->aMem[i], paPages);
    1880                     RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
    1881                     return 0;
    1882                 }
    1883 #endif
    18841748            }
    18851749        }
     
    19061770
    19071771
    1908 #ifdef USE_NEW_OS_INTERFACE_FOR_MM
    19091772/**
    19101773 * Allocates a chunk of memory with only a R3 mappings.
     
    20651928#endif /* RT_OS_WINDOWS */
    20661929
     1930
    20671931/**
    20681932 * Free memory allocated by SUPR0PageAlloc().
     
    20781942    return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED_SUP);
    20791943}
    2080 #endif /* USE_NEW_OS_INTERFACE_FOR_MM */
    20811944
    20821945
     
    22812144            for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
    22822145            {
    2283 #ifdef USE_NEW_OS_INTERFACE_FOR_MM
    22842146                if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
    2285 #else  /* !USE_NEW_OS_INTERFACE_FOR_MM */
    2286                 if (    !pBundle->aMem[i].pvR0
    2287                     &&  !pBundle->aMem[i].pvR3)
    2288 #endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
    22892147                {
    22902148                    pBundle->cUsed++;
     
    23542212            for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
    23552213            {
    2356 #ifdef USE_NEW_OS_INTERFACE_FOR_MM
    23572214                if (    pBundle->aMem[i].eType == eType
    23582215                    &&  pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
     
    23812238                    return VINF_SUCCESS;
    23822239                }
    2383 #else /* !USE_NEW_OS_INTERFACE_FOR_MM */
    2384                 if (    pBundle->aMem[i].eType == eType
    2385                     &&  (   (RTHCUINTPTR)pBundle->aMem[i].pvR0 == uPtr
    2386                          || (RTHCUINTPTR)pBundle->aMem[i].pvR3 == uPtr))
    2387                 {
    2388                     /* Make a copy of it and release it outside the spinlock. */
    2389                     SUPDRVMEMREF Mem = pBundle->aMem[i];
    2390                     pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
    2391                     pBundle->aMem[i].pvR0  = NULL;
    2392                     pBundle->aMem[i].pvR3  = NIL_RTR3PTR;
    2393                     pBundle->aMem[i].cb    = 0;
    2394                     RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
    2395 
    2396                     /* Type specific free operation. */
    2397                     switch (Mem.eType)
    2398                     {
    2399                         case MEMREF_TYPE_LOCKED:
    2400                             supdrvOSUnlockMemOne(&Mem);
    2401                             break;
    2402                         case MEMREF_TYPE_CONT:
    2403                             supdrvOSContFreeOne(&Mem);
    2404                             break;
    2405                         case MEMREF_TYPE_LOW:
    2406                             supdrvOSLowFreeOne(&Mem);
    2407                             break;
    2408                         case MEMREF_TYPE_MEM:
    2409                             supdrvOSMemFreeOne(&Mem);
    2410                             break;
    2411                         default:
    2412                         case MEMREF_TYPE_UNUSED:
    2413                             break;
    2414                     }
    2415                     return VINF_SUCCESS;
    2416                }
    2417 #endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
    24182240            }
    24192241        }
     
    37023524
    37033525
    3704 #if !defined(SUPDRV_OS_HAVE_LOW) && !defined(USE_NEW_OS_INTERFACE_FOR_MM)  /* Use same backend as the contiguous stuff */
    3705 /**
    3706  * OS Specific code for allocating page aligned memory with fixed
    3707  * physical backing below 4GB.
    3708  *
    3709  * @returns 0 on success.
    3710  * @returns SUPDRV_ERR_* on failure.
    3711  * @param   pMem        Memory reference record of the memory to be allocated.
    3712  *                      (This is not linked in anywhere.)
    3713  * @param   ppvR3       Where to store the Ring-0 mapping of the allocated memory.
    3714  * @param   ppvR3       Where to store the Ring-3 mapping of the allocated memory.
    3715  * @param   paPagesOut  Where to store the physical addresss.
    3716  */
    3717 int VBOXCALL supdrvOSLowAllocOne(PSUPDRVMEMREF pMem, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PSUPPAGE paPagesOut)
    3718 {
    3719 #if defined(USE_NEW_OS_INTERFACE_FOR_LOW)  /* a temp hack */
    3720     int rc = RTR0MemObjAllocLow(&pMem->u.iprt.MemObj, pMem->cb, true /* executable ring-0 mapping */);
    3721     if (RT_SUCCESS(rc))
    3722     {
    3723         int rc2;
    3724         rc = RTR0MemObjMapUser(&pMem->u.iprt.MapObjR3, pMem->u.iprt.MemObj, (RTR3PTR)-1, 0,
    3725                                RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
    3726         if (RT_SUCCESS(rc))
    3727         {
    3728             pMem->eType = MEMREF_TYPE_LOW;
    3729             pMem->pvR0 = RTR0MemObjAddress(pMem->u.iprt.MemObj);
    3730             pMem->pvR3 = RTR0MemObjAddressR3(pMem->u.iprt.MapObjR3);
    3731             /*if (RT_SUCCESS(rc))*/
    3732             {
    3733                 size_t  cPages = pMem->cb >> PAGE_SHIFT;
    3734                 size_t  iPage;
    3735                 for (iPage = 0; iPage < cPages; iPage++)
    3736                 {
    3737                     paPagesOut[iPage].Phys = RTR0MemObjGetPagePhysAddr(pMem->u.iprt.MemObj, iPage);
    3738                     paPagesOut[iPage].uReserved = 0;
    3739                     AssertMsg(!(paPagesOut[iPage].Phys & (PAGE_SIZE - 1)), ("iPage=%d Phys=%VHp\n", paPagesOut[iPage].Phys));
    3740                 }
    3741                 *ppvR0 = RTR0MemObjAddress(pMem->u.iprt.MemObj);
    3742                 *ppvR3 = RTR0MemObjAddressR3(pMem->u.iprt.MapObjR3);
    3743                 return VINF_SUCCESS;
    3744             }
    3745 
    3746             rc2 = RTR0MemObjFree(pMem->u.iprt.MapObjR3, false);
    3747             AssertRC(rc2);
    3748         }
    3749 
    3750         rc2 = RTR0MemObjFree(pMem->u.iprt.MemObj, false);
    3751         AssertRC(rc2);
    3752     }
    3753     return rc;
    3754 #else
    3755     RTHCPHYS HCPhys;
    3756     int rc = supdrvOSContAllocOne(pMem, ppvR0, ppvR3, &HCPhys);
    3757     if (!rc)
    3758     {
    3759         unsigned iPage = pMem->cb >> PAGE_SHIFT;
    3760         while (iPage-- > 0)
    3761         {
    3762             paPagesOut[iPage].Phys = HCPhys + (iPage << PAGE_SHIFT);
    3763             paPagesOut[iPage].uReserved = 0;
    3764         }
    3765     }
    3766     return rc;
    3767 #endif
    3768 }
    3769 
    3770 
    3771 /**
    3772  * Frees low memory.
    3773  *
    3774  * @param   pMem    Memory reference record of the memory to be freed.
    3775  */
    3776 void VBOXCALL supdrvOSLowFreeOne(PSUPDRVMEMREF pMem)
    3777 {
    3778 # if defined(USE_NEW_OS_INTERFACE_FOR_LOW)
    3779     if (pMem->u.iprt.MapObjR3)
    3780     {
    3781         int rc = RTR0MemObjFree(pMem->u.iprt.MapObjR3, false);
    3782         AssertRC(rc); /** @todo figure out how to handle this. */
    3783     }
    3784     if (pMem->u.iprt.MemObj)
    3785     {
    3786         int rc = RTR0MemObjFree(pMem->u.iprt.MemObj, false);
    3787         AssertRC(rc); /** @todo figure out how to handle this. */
    3788     }
    3789 # else
    3790     supdrvOSContFreeOne(pMem);
    3791 # endif
    3792 }
    3793 #endif /* !SUPDRV_OS_HAVE_LOW && !USE_NEW_OS_INTERFACE_FOR_MM */
    3794 
    3795 
    37963526#ifdef USE_NEW_OS_INTERFACE_FOR_GIP
    37973527/**
  • trunk/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c

    r4827 r4831  
    290290static int      VBoxDrvLinuxIOCtl(struct inode *pInode, struct file *pFilp, unsigned int uCmd, unsigned long ulArg);
    291291static int      VBoxDrvLinuxIOCtlSlow(struct inode *pInode, struct file *pFilp, unsigned int uCmd, unsigned long ulArg);
    292 #ifndef USE_NEW_OS_INTERFACE_FOR_MM
    293 static RTR3PTR  VBoxDrvLinuxMapUser(struct page **papPages, unsigned cPages, unsigned fProt, pgprot_t pgFlags);
    294 #endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
    295292static int      VBoxDrvLinuxInitGip(PSUPDRVDEVEXT pDevExt);
    296293static int      VBoxDrvLinuxTermGip(PSUPDRVDEVEXT pDevExt);
     
    887884
    888885
    889 #ifndef USE_NEW_OS_INTERFACE_FOR_MM
    890 
    891 /**
    892  * Compute order. Some functions allocate 2^order pages.
    893  *
    894  * @returns order.
    895  * @param   cPages      Number of pages.
    896  */
    897 static int VBoxDrvOrder(unsigned long cPages)
    898 {
    899     int             iOrder;
    900     unsigned long   cTmp;
    901 
    902     for (iOrder = 0, cTmp = cPages; cTmp >>= 1; ++iOrder)
    903         ;
    904     if (cPages & ~(1 << iOrder))
    905         ++iOrder;
    906 
    907     return iOrder;
    908 }
    909 
    910 
    911 /**
    912  * OS Specific code for locking down memory.
    913  *
    914  * @returns 0 on success.
    915  * @returns SUPDRV_ERR_* on failure.
    916  * @param   pMem        Pointer to memory.
    917  *                      This is not linked in anywhere.
    918  * @param   paPages     Array which should be filled with the address of the physical pages.
    919  *
    920  * @remark  See sgl_map_user_pages() for an example of an similar function.
    921  */
    922 int  VBOXCALL   supdrvOSLockMemOne(PSUPDRVMEMREF pMem, PSUPPAGE paPages)
    923 {
    924     int         rc;
    925     struct page **papPages;
    926     unsigned    iPage;
    927     unsigned    cPages = pMem->cb >> PAGE_SHIFT;
    928     unsigned long pv = (unsigned long)pMem->pvR3;
    929     struct vm_area_struct **papVMAs;
    930 
    931     /*
    932      * Allocate page pointer array.
    933      */
    934     papPages = vmalloc(cPages * sizeof(*papPages));
    935     if (!papPages)
    936         return SUPDRV_ERR_NO_MEMORY;
    937 
    938     /*
    939      * Allocate the VMA pointer array.
    940      */
    941     papVMAs = vmalloc(cPages * sizeof(*papVMAs));
    942     if (!papVMAs)
    943         return SUPDRV_ERR_NO_MEMORY;
    944 
    945     /*
    946      * Get user pages.
    947      */
    948     down_read(&current->mm->mmap_sem);
    949     rc = get_user_pages(current,                /* Task for fault acounting. */
    950                         current->mm,            /* Whose pages. */
    951                         (unsigned long)pv,      /* Where from. */
    952                         cPages,                 /* How many pages. */
    953                         1,                      /* Write to memory. */
    954                         0,                      /* force. */
    955                         papPages,               /* Page array. */
    956                         papVMAs);               /* vmas */
    957     if (rc != cPages)
    958     {
    959         up_read(&current->mm->mmap_sem);
    960         dprintf(("supdrvOSLockMemOne: get_user_pages failed. rc=%d\n", rc));
    961         return SUPDRV_ERR_LOCK_FAILED;
    962     }
    963 
    964     for (iPage = 0; iPage < cPages; iPage++)
    965         flush_dcache_page(papPages[iPage]);
    966     up_read(&current->mm->mmap_sem);
    967 
    968     pMem->u.locked.papPages = papPages;
    969     pMem->u.locked.cPages = cPages;
    970 
    971     /*
    972      * Get addresses, protect against fork()
    973      */
    974     for (iPage = 0; iPage < cPages; iPage++)
    975     {
    976         paPages[iPage].Phys = page_to_phys(papPages[iPage]);
    977         paPages[iPage].uReserved = 0;
    978         papVMAs[iPage]->vm_flags |= VM_DONTCOPY;
    979     }
    980 
    981     vfree(papVMAs);
    982 
    983     dprintf2(("supdrvOSLockMemOne: pvR3=%p cb=%d papPages=%p\n",
    984               pMem->pvR3, pMem->cb, pMem->u.locked.papPages));
    985     return 0;
    986 }
    987 
    988 
    989 /**
    990  * Unlocks the memory pointed to by pv.
    991  *
    992  * @param   pMem  Pointer to memory to unlock.
    993  *
    994  * @remark  See sgl_unmap_user_pages() for an example of an similar function.
    995  */
    996 void VBOXCALL supdrvOSUnlockMemOne(PSUPDRVMEMREF pMem)
    997 {
    998     unsigned    iPage;
    999     dprintf2(("supdrvOSUnlockMemOne: pvR3=%p cb=%d papPages=%p\n",
    1000               pMem->pvR3, pMem->cb, pMem->u.locked.papPages));
    1001 
    1002     /*
    1003      * Loop thru the pages and release them.
    1004      */
    1005     for (iPage = 0; iPage < pMem->u.locked.cPages; iPage++)
    1006     {
    1007         if (!PageReserved(pMem->u.locked.papPages[iPage]))
    1008             SetPageDirty(pMem->u.locked.papPages[iPage]);
    1009         page_cache_release(pMem->u.locked.papPages[iPage]);
    1010     }
    1011 
    1012     /* free the page array */
    1013     vfree(pMem->u.locked.papPages);
    1014     pMem->u.locked.cPages = 0;
    1015 }
    1016 
    1017 
    1018 /**
    1019  * OS Specific code for allocating page aligned memory with continuous fixed
    1020  * physical paged backing.
    1021  *
    1022  * @returns 0 on success.
    1023  * @returns SUPDRV_ERR_* on failure.
    1024  * @param   pMem        Memory reference record of the memory to be allocated.
    1025  *                      (This is not linked in anywhere.)
    1026  * @param   ppvR0       Where to store the virtual address of the ring-0 mapping. (optional)
    1027  * @param   ppvR3       Where to store the virtual address of the ring-3 mapping.
    1028  * @param   pHCPhys     Where to store the physical address.
    1029  */
    1030 int VBOXCALL supdrvOSContAllocOne(PSUPDRVMEMREF pMem, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
    1031 {
    1032     struct page *paPages;
    1033     unsigned    iPage;
    1034     unsigned    cbAligned = RT_ALIGN(pMem->cb, PAGE_SIZE);
    1035     unsigned    cPages = cbAligned >> PAGE_SHIFT;
    1036     unsigned    cOrder = VBoxDrvOrder(cPages);
    1037     unsigned long ulAddr;
    1038     dma_addr_t  HCPhys;
    1039     int         rc = 0;
    1040     pgprot_t    pgFlags;
    1041     pgprot_val(pgFlags) = _PAGE_PRESENT | _PAGE_RW | _PAGE_USER;
    1042 
    1043     Assert(ppvR3);
    1044     Assert(pHCPhys);
    1045 
    1046     /*
    1047      * Allocate page pointer array.
    1048      */
    1049 #ifdef RT_ARCH_AMD64 /** @todo check out if there is a correct way of getting memory below 4GB (physically). */
    1050     paPages = alloc_pages(GFP_DMA, cOrder);
    1051 #else
    1052     paPages = alloc_pages(GFP_USER, cOrder);
    1053 #endif
    1054     if (!paPages)
    1055         return SUPDRV_ERR_NO_MEMORY;
    1056 
    1057     /*
    1058      * Lock the pages.
    1059      */
    1060     for (iPage = 0; iPage < cPages; iPage++)
    1061     {
    1062         SetPageReserved(&paPages[iPage]);
    1063         if (!PageHighMem(&paPages[iPage]) && pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL))
    1064             MY_CHANGE_PAGE_ATTR(&paPages[iPage], 1, MY_PAGE_KERNEL_EXEC);
    1065 #ifdef DEBUG
    1066         if (iPage + 1 < cPages && (page_to_phys((&paPages[iPage])) + 0x1000) != page_to_phys((&paPages[iPage + 1])))
    1067         {
    1068             dprintf(("supdrvOSContAllocOne: Pages are not continuous!!!! iPage=%d phys=%llx physnext=%llx\n",
    1069                      iPage, (long long)page_to_phys((&paPages[iPage])), (long long)page_to_phys((&paPages[iPage + 1]))));
    1070             BUG();
    1071         }
    1072 #endif
    1073     }
    1074     HCPhys = page_to_phys(paPages);
    1075 
    1076     /*
    1077      * Allocate user space mapping and put the physical pages into it.
    1078      */
    1079     down_write(&current->mm->mmap_sem);
    1080     ulAddr = do_mmap(NULL, 0, cbAligned, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_ANONYMOUS, 0);
    1081     if (!(ulAddr & ~PAGE_MASK))
    1082     {
    1083 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
    1084         int rc2 = remap_page_range(ulAddr, HCPhys, cbAligned, pgFlags);
    1085 #else
    1086         int rc2 = 0;
    1087         struct vm_area_struct *vma = find_vma(current->mm, ulAddr);
    1088         if (vma)
    1089 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11)
    1090             rc2 = remap_page_range(vma, ulAddr, HCPhys, cbAligned, pgFlags);
    1091 #else
    1092             rc2 = remap_pfn_range(vma, ulAddr, HCPhys >> PAGE_SHIFT, cbAligned, pgFlags);
    1093 #endif
    1094         else
    1095         {
    1096             rc = SUPDRV_ERR_NO_MEMORY;
    1097             dprintf(("supdrvOSContAllocOne: no vma found for ulAddr=%#lx!\n", ulAddr));
    1098         }
    1099 #endif
    1100         if (rc2)
    1101         {
    1102             rc = SUPDRV_ERR_NO_MEMORY;
    1103             dprintf(("supdrvOSContAllocOne: remap_page_range failed rc2=%d\n", rc2));
    1104         }
    1105     }
    1106     else
    1107     {
    1108         dprintf(("supdrvOSContAllocOne: do_mmap failed ulAddr=%#lx\n", ulAddr));
    1109         rc = SUPDRV_ERR_NO_MEMORY;
    1110     }
    1111     up_write(&current->mm->mmap_sem);   /* not quite sure when to give this up. */
    1112 
    1113     /*
    1114      * Success?
    1115      */
    1116     if (!rc)
    1117     {
    1118         *pHCPhys = HCPhys;
    1119         *ppvR3 = ulAddr;
    1120         if (ppvR0)
    1121             *ppvR0 = (void *)ulAddr;
    1122         pMem->pvR3              = ulAddr;
    1123         pMem->pvR0              = NULL;
    1124         pMem->u.cont.paPages    = paPages;
    1125         pMem->u.cont.cPages     = cPages;
    1126         pMem->cb                = cbAligned;
    1127 
    1128         dprintf2(("supdrvOSContAllocOne: pvR0=%p pvR3=%p cb=%d paPages=%p *pHCPhys=%lx *ppvR0=*ppvR3=%p\n",
    1129                   pMem->pvR0, pMem->pvR3, pMem->cb, paPages, (unsigned long)*pHCPhys, *ppvR3));
    1130         global_flush_tlb();
    1131         return 0;
    1132     }
    1133 
    1134     /*
    1135      * Failure, cleanup and be gone.
    1136      */
    1137     down_write(&current->mm->mmap_sem);
    1138     if (ulAddr & ~PAGE_MASK)
    1139         MY_DO_MUNMAP(current->mm, ulAddr, pMem->cb);
    1140     for (iPage = 0; iPage < cPages; iPage++)
    1141     {
    1142         ClearPageReserved(&paPages[iPage]);
    1143         if (!PageHighMem(&paPages[iPage]) && pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL))
    1144             MY_CHANGE_PAGE_ATTR(&paPages[iPage], 1, PAGE_KERNEL);
    1145     }
    1146     up_write(&current->mm->mmap_sem);   /* check when we can leave this. */
    1147     __free_pages(paPages, cOrder);
    1148 
    1149     global_flush_tlb();
    1150     return rc;
    1151 }
    1152 
    1153 
    1154 /**
    1155  * Frees contiguous memory.
    1156  *
    1157  * @param   pMem    Memory reference record of the memory to be freed.
    1158  */
    1159 void VBOXCALL supdrvOSContFreeOne(PSUPDRVMEMREF pMem)
    1160 {
    1161     unsigned    iPage;
    1162 
    1163     dprintf2(("supdrvOSContFreeOne: pvR0=%p pvR3=%p cb=%d paPages=%p\n",
    1164               pMem->pvR0, pMem->pvR3, pMem->cb, pMem->u.cont.paPages));
    1165 
    1166     /*
    1167      * do_exit() destroys the mm before closing files.
    1168      * I really hope it cleans up our stuff properly...
    1169      */
    1170     if (current->mm)
    1171     {
    1172         down_write(&current->mm->mmap_sem);
    1173         MY_DO_MUNMAP(current->mm, (unsigned long)pMem->pvR3, pMem->cb);
    1174         up_write(&current->mm->mmap_sem);   /* check when we can leave this. */
    1175     }
    1176 
    1177     /*
    1178      * Change page attributes freeing the pages.
    1179      */
    1180     for (iPage = 0; iPage < pMem->u.cont.cPages; iPage++)
    1181     {
    1182         ClearPageReserved(&pMem->u.cont.paPages[iPage]);
    1183         if (!PageHighMem(&pMem->u.cont.paPages[iPage]) && pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL))
    1184             MY_CHANGE_PAGE_ATTR(&pMem->u.cont.paPages[iPage], 1, PAGE_KERNEL);
    1185     }
    1186     __free_pages(pMem->u.cont.paPages, VBoxDrvOrder(pMem->u.cont.cPages));
    1187 
    1188     pMem->u.cont.cPages = 0;
    1189 }
    1190 
    1191 
    1192 /**
    1193  * Allocates memory which mapped into both kernel and user space.
    1194  * The returned memory is page aligned and so is the allocation.
    1195  *
    1196  * @returns 0 on success.
    1197  * @returns SUPDRV_ERR_* on failure.
    1198  * @param   pMem        Memory reference record of the memory to be allocated.
    1199  *                      (This is not linked in anywhere.)
    1200  * @param   ppvR0       Where to store the address of the Ring-0 mapping.
    1201  * @param   ppvR3       Where to store the address of the Ring-3 mapping.
    1202  */
    1203 int  VBOXCALL   supdrvOSMemAllocOne(PSUPDRVMEMREF pMem, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
    1204 {
    1205     const unsigned  cbAligned = RT_ALIGN(pMem->cb, PAGE_SIZE);
    1206     const unsigned  cPages = cbAligned >> PAGE_SHIFT;
    1207 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 22)
    1208     unsigned        cOrder = VBoxDrvOrder(cPages);
    1209     struct page    *paPages;
    1210 #endif
    1211     struct page   **papPages;
    1212     unsigned        iPage;
    1213     pgprot_t        pgFlags;
    1214     pgprot_val(pgFlags) = _PAGE_PRESENT | _PAGE_RW | _PAGE_USER;
    1215 
    1216     /*
    1217      * Allocate array with page pointers.
    1218      */
    1219     pMem->u.mem.cPages = 0;
    1220     pMem->u.mem.papPages = papPages = kmalloc(sizeof(papPages[0]) * cPages, GFP_KERNEL);
    1221     if (!papPages)
    1222         return SUPDRV_ERR_NO_MEMORY;
    1223 
    1224     /*
    1225      * Allocate the pages.
    1226      */
    1227 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
    1228     for (iPage = 0; iPage < cPages; iPage++)
    1229     {
    1230         papPages[iPage] = alloc_page(GFP_HIGHUSER);
    1231         if (!papPages[iPage])
    1232         {
    1233             pMem->u.mem.cPages = iPage;
    1234             supdrvOSMemFreeOne(pMem);
    1235             return SUPDRV_ERR_NO_MEMORY;
    1236         }
    1237     }
    1238 
    1239 #else /* < 2.4.22 */
    1240     paPages = alloc_pages(GFP_USER, cOrder);
    1241     if (!paPages)
    1242     {
    1243         supdrvOSMemFreeOne(pMem);
    1244         return SUPDRV_ERR_NO_MEMORY;
    1245     }
    1246     for (iPage = 0; iPage < cPages; iPage++)
    1247     {
    1248         papPages[iPage] = &paPages[iPage];
    1249         if (pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL))
    1250             MY_CHANGE_PAGE_ATTR(papPages[iPage], 1, MY_PAGE_KERNEL_EXEC);
    1251         if (PageHighMem(papPages[iPage]))
    1252             BUG();
    1253     }
    1254 #endif
    1255     pMem->u.mem.cPages = cPages;
    1256 
    1257     /*
    1258      * Reserve the pages.
    1259      */
    1260     for (iPage = 0; iPage < cPages; iPage++)
    1261         SetPageReserved(papPages[iPage]);
    1262 
    1263     /*
    1264      * Create the Ring-0 mapping.
    1265      */
    1266     if (ppvR0)
    1267     {
    1268 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
    1269 # ifdef VM_MAP
    1270         *ppvR0 = pMem->pvR0 = vmap(papPages, cPages, VM_MAP, pgFlags);
    1271 # else
    1272         *ppvR0 = pMem->pvR0 = vmap(papPages, cPages, VM_ALLOC, pgFlags);
    1273 # endif
    1274 #else
    1275         *ppvR0 = pMem->pvR0 = phys_to_virt(page_to_phys(papPages[0]));
    1276 #endif
    1277     }
    1278     if (pMem->pvR0 || !ppvR0)
    1279     {
    1280         /*
    1281          * Create the ring3 mapping.
    1282          */
    1283         if (ppvR3)
    1284             *ppvR3 = pMem->pvR3 = VBoxDrvLinuxMapUser(papPages, cPages, PROT_READ | PROT_WRITE | PROT_EXEC, pgFlags);
    1285         if (pMem->pvR3 || !ppvR3)
    1286             return 0;
    1287         dprintf(("supdrvOSMemAllocOne: failed to map into r3! cPages=%u\n", cPages));
    1288     }
    1289     else
    1290         dprintf(("supdrvOSMemAllocOne: failed to map into r0! cPages=%u\n", cPages));
    1291 
    1292     supdrvOSMemFreeOne(pMem);
    1293     return SUPDRV_ERR_NO_MEMORY;
    1294 }
    1295 
    1296 
    1297 /**
    1298  * Get the physical addresses of the pages in the allocation.
    1299  * This is called while inside bundle the spinlock.
    1300  *
    1301  * @param   pMem        Memory reference record of the memory.
    1302  * @param   paPages     Where to store the page addresses.
    1303  */
    1304 void VBOXCALL   supdrvOSMemGetPages(PSUPDRVMEMREF pMem, PSUPPAGE paPages)
    1305 {
    1306     unsigned iPage;
    1307     for (iPage = 0; iPage < pMem->u.mem.cPages; iPage++)
    1308     {
    1309         paPages[iPage].Phys = page_to_phys(pMem->u.mem.papPages[iPage]);
    1310         paPages[iPage].uReserved = 0;
    1311     }
    1312 }
    1313 
    1314 
    1315 /**
    1316  * Frees memory allocated by supdrvOSMemAllocOne().
    1317  *
    1318  * @param   pMem        Memory reference record of the memory to be free.
    1319  */
    1320 void VBOXCALL   supdrvOSMemFreeOne(PSUPDRVMEMREF pMem)
    1321 {
    1322     dprintf2(("supdrvOSMemFreeOne: pvR0=%p pvR3=%p cb=%d cPages=%d papPages=%p\n",
    1323               pMem->pvR0, pMem->pvR3, pMem->cb, pMem->u.mem.cPages, pMem->u.mem.papPages));
    1324 
    1325     /*
    1326      * Unmap the user mapping (if any).
    1327      * do_exit() destroys the mm before closing files.
    1328      */
    1329     if (pMem->pvR3 && current->mm)
    1330     {
    1331         down_write(&current->mm->mmap_sem);
    1332         MY_DO_MUNMAP(current->mm, (unsigned long)pMem->pvR3, RT_ALIGN(pMem->cb, PAGE_SIZE));
    1333         up_write(&current->mm->mmap_sem);   /* check when we can leave this. */
    1334     }
    1335     pMem->pvR3 = NIL_RTR3PTR;
    1336 
    1337     /*
    1338      * Unmap the kernel mapping (if any).
    1339      */
    1340     if (pMem->pvR0)
    1341     {
    1342 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
    1343         vunmap(pMem->pvR0);
    1344 #endif
    1345         pMem->pvR0 = NULL;
    1346     }
    1347 
    1348     /*
    1349      * Free the physical pages.
    1350      */
    1351     if (pMem->u.mem.papPages)
    1352     {
    1353         struct page   **papPages = pMem->u.mem.papPages;
    1354         const unsigned  cPages   = pMem->u.mem.cPages;
    1355         unsigned        iPage;
    1356 
    1357         /* Restore the page flags. */
    1358         for (iPage = 0; iPage < cPages; iPage++)
    1359         {
    1360             ClearPageReserved(papPages[iPage]);
    1361 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 22)
    1362             if (pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL))
    1363                 MY_CHANGE_PAGE_ATTR(papPages[iPage], 1, PAGE_KERNEL);
    1364 #endif
    1365         }
    1366 
    1367         /* Free the pages. */
    1368 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
    1369         for (iPage = 0; iPage < pMem->u.cont.cPages; iPage++)
    1370             __free_page(papPages[iPage]);
    1371 #else
    1372         if (cPages > 0)
    1373             __free_pages(papPages[0], VBoxDrvOrder(cPages));
    1374 #endif
    1375         /* Free the page pointer array. */
    1376         kfree(papPages);
    1377         pMem->u.mem.papPages = NULL;
    1378     }
    1379     pMem->u.mem.cPages = 0;
    1380 }
    1381 
    1382 
    1383 /**
    1384  * Maps a range of pages into user space.
    1385  *
    1386  * @returns Pointer to the user space mapping on success.
    1387  * @returns NULL on failure.
    1388  * @param   papPages    Array of the pages to map.
    1389  * @param   cPages      Number of pages to map.
    1390  * @param   fProt       The mapping protection.
    1391  * @param   pgFlags     The page level protection.
    1392  */
    1393 static RTR3PTR VBoxDrvLinuxMapUser(struct page **papPages, unsigned cPages, unsigned fProt, pgprot_t pgFlags)
    1394 {
    1395     int             rc = SUPDRV_ERR_NO_MEMORY;
    1396     unsigned long   ulAddr;
    1397 
    1398     /*
    1399      * Allocate user space mapping.
    1400      */
    1401     down_write(&current->mm->mmap_sem);
    1402     ulAddr = do_mmap(NULL, 0, cPages * PAGE_SIZE, fProt, MAP_SHARED | MAP_ANONYMOUS, 0);
    1403     if (!(ulAddr & ~PAGE_MASK))
    1404     {
    1405         /*
    1406          * Map page by page into the mmap area.
    1407          * This is generic, paranoid and not very efficient.
    1408          */
    1409         int             rc = 0;
    1410         unsigned long   ulAddrCur = ulAddr;
    1411         unsigned        iPage;
    1412         for (iPage = 0; iPage < cPages; iPage++, ulAddrCur += PAGE_SIZE)
    1413         {
    1414 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
    1415             struct vm_area_struct *vma = find_vma(current->mm, ulAddrCur);
    1416             if (!vma)
    1417                 break;
    1418 #endif
    1419 
    1420 #if   LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
    1421             rc = remap_pfn_range(vma, ulAddrCur, page_to_pfn(papPages[iPage]), PAGE_SIZE, pgFlags);
    1422 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
    1423             rc = remap_page_range(vma, ulAddrCur, page_to_phys(papPages[iPage]), PAGE_SIZE, pgFlags);
    1424 #else /* 2.4 */
    1425             rc = remap_page_range(ulAddrCur, page_to_phys(papPages[iPage]), PAGE_SIZE, pgFlags);
    1426 #endif
    1427             if (rc)
    1428                 break;
    1429         }
    1430 
    1431         /*
    1432          * Successful?
    1433          */
    1434         if (iPage >= cPages)
    1435         {
    1436             up_write(&current->mm->mmap_sem);
    1437             return ulAddr;
    1438         }
    1439 
    1440         /* no, cleanup! */
    1441         if (rc)
    1442             dprintf(("VBoxDrvLinuxMapUser: remap_[page|pfn]_range failed! rc=%d\n", rc));
    1443         else
    1444             dprintf(("VBoxDrvLinuxMapUser: find_vma failed!\n"));
    1445 
    1446         MY_DO_MUNMAP(current->mm, ulAddr, cPages << PAGE_SHIFT);
    1447     }
    1448     else
    1449     {
    1450         dprintf(("supdrvOSContAllocOne: do_mmap failed ulAddr=%#lx\n", ulAddr));
    1451         rc = SUPDRV_ERR_NO_MEMORY;
    1452     }
    1453     up_write(&current->mm->mmap_sem);
    1454 
    1455     return NIL_RTR3PTR;
    1456 }
    1457 
    1458 #endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
    1459 
    1460 
    1461886/**
    1462887 * Initializes the GIP.
  • trunk/src/VBox/HostDrivers/Support/win/SUPDrv-win.cpp

    r4811 r4831  
    440440
    441441
    442 #ifndef USE_NEW_OS_INTERFACE_FOR_MM
    443 
    444 /**
    445  * OS Specific code for locking down memory.
    446  *
    447  * @returns 0 on success.
    448  * @returns SUPDRV_ERR_* on failure.
    449  * @param   pMem        Pointer to memory.
    450  *                      This is not linked in anywhere.
    451  * @param   paPages     Array which should be filled with the address of the physical pages.
    452  */
    453 int  VBOXCALL   supdrvOSLockMemOne(PSUPDRVMEMREF pMem, PSUPPAGE paPages)
    454 {
    455     /* paranoia */
    456     if (!pMem->cb)
    457     {
    458         AssertMsgFailed(("Fool! No memory to lock!\n"));
    459         return SUPDRV_ERR_INVALID_PARAM;
    460     }
    461     Assert(RT_ALIGN(pMem->cb, PAGE_SIZE) == pMem->cb);
    462 
    463     /*
    464      * Calc the number of MDLs we need to allocate.
    465      */
    466     unsigned cMdls = pMem->cb / MAX_LOCK_MEM_SIZE;
    467     if ((pMem->cb % MAX_LOCK_MEM_SIZE) > 0)
    468         cMdls++;
    469 
    470     /*
    471      * Allocate memory for the MDL pointer array.
    472      */
    473     pMem->u.locked.papMdl = (PMDL *)ExAllocatePoolWithTag(NonPagedPool, sizeof(*pMem->u.locked.papMdl) * cMdls, SUPDRV_NT_POOL_TAG);
    474     if (!pMem->u.locked.papMdl)
    475     {
    476         AssertMsgFailed(("shit, couldn't allocated %d bytes for the mdl pointer array!\n", sizeof(*pMem->u.locked.papMdl) * cMdls));
    477         return SUPDRV_ERR_NO_MEMORY;
    478     }
    479 
    480     /*
    481      * Loop locking down the sub parts of the memory.
    482      */
    483     PSUPPAGE    pPage   = paPages;
    484     unsigned    cbTotal = 0;
    485     uint8_t    *pu8     = (uint8_t *)pMem->pvR3;
    486     for (unsigned i = 0; i < cMdls; i++)
    487     {
    488         /*
    489          * Calc the number of bytes to lock this time.
    490          */
    491         unsigned cbCur = pMem->cb - cbTotal;
    492         if (cbCur > MAX_LOCK_MEM_SIZE)
    493             cbCur = MAX_LOCK_MEM_SIZE;
    494 
    495         if (cbCur == 0)
    496             AssertMsgFailed(("cbCur: 0!\n"));
    497 
    498         /*
    499          * Allocate pMdl.
    500          */
    501         PMDL pMdl = IoAllocateMdl(pu8, cbCur, FALSE, FALSE, NULL);
    502         if (!pMdl)
    503         {
    504             AssertMsgFailed(("Ops! IoAllocateMdl failed for pu8=%p and cb=%d\n", pu8, cbCur));
    505             return SUPDRV_ERR_NO_MEMORY;
    506         }
    507 
    508         /*
    509          * Lock the pages.
    510          */
    511         NTSTATUS rc = STATUS_SUCCESS;
    512         __try
    513         {
    514             MmProbeAndLockPages(pMdl, UserMode, IoModifyAccess);
    515         }
    516         __except(EXCEPTION_EXECUTE_HANDLER)
    517         {
    518             rc = GetExceptionCode();
    519             dprintf(("supdrvOSLockMemOne: Exception Code %#x\n", rc));
    520         }
    521 
    522         if (!NT_SUCCESS(rc))
    523         {
    524             /*
    525              * Cleanup and fail.
    526              */
    527             IoFreeMdl(pMdl);
    528             while (i-- > 0)
    529             {
    530                 MmUnlockPages(pMem->u.locked.papMdl[i]);
    531                 IoFreeMdl(pMem->u.locked.papMdl[i]);
    532             }
    533             ExFreePool(pMem->u.locked.papMdl);
    534             pMem->u.locked.papMdl = NULL;
    535             return SUPDRV_ERR_LOCK_FAILED;
    536         }
    537 
    538         /*
    539          * Add MDL to array and update the pages.
    540          */
    541         pMem->u.locked.papMdl[i] = pMdl;
    542 
    543         const uintptr_t *pauPFNs = (uintptr_t *)(pMdl + 1);  /* ASSUMES ULONG_PTR == uintptr_t, NTDDK4 doesn't have ULONG_PTR. */
    544         for (unsigned iPage = 0, cPages = cbCur >> PAGE_SHIFT; iPage < cPages; iPage++)
    545         {
    546             pPage->Phys = (RTHCPHYS)pauPFNs[iPage] << PAGE_SHIFT;
    547             pPage->uReserved = 0;
    548             pPage++;
    549         }
    550 
    551         /* next */
    552         cbTotal += cbCur;
    553         pu8     += cbCur;
    554     }
    555 
    556     /*
    557      * Finish structure and return succesfully.
    558      */
    559     pMem->u.locked.cMdls = cMdls;
    560 
    561     dprintf2(("supdrvOSLockMemOne: pvR3=%p cb=%d cMdls=%d\n",
    562               pMem->pvR3, pMem->cb, cMdls));
    563     return 0;
    564 }
    565 
    566 
    567 /**
    568  * Unlocks the memory pointed to by pv.
    569  *
    570  * @param   pv  Memory to unlock.
    571  * @param   cb  Size of the memory (debug).
    572  */
    573 void VBOXCALL supdrvOSUnlockMemOne(PSUPDRVMEMREF pMem)
    574 {
    575     dprintf2(("supdrvOSUnlockMemOne: pvR3=%p cb=%d cMdl=%p papMdl=%p\n",
    576               pMem->pvR3, pMem->cb, pMem->u.locked.cMdls, pMem->u.locked.papMdl));
    577 
    578     for (unsigned i = 0; i < pMem->u.locked.cMdls; i++)
    579     {
    580         MmUnlockPages(pMem->u.locked.papMdl[i]);
    581         IoFreeMdl(pMem->u.locked.papMdl[i]);
    582     }
    583 
    584     ExFreePool(pMem->u.locked.papMdl);
    585     pMem->u.locked.papMdl = NULL;
    586 }
    587 
    588 
    589 /**
    590  * OS Specific code for allocating page aligned memory with continuous fixed
    591  * physical paged backing.
    592  *
    593  * @returns 0 on success.
    594  * @returns SUPDRV_ERR_* on failure.
    595  * @param   pMem    Memory reference record of the memory to be allocated.
    596  *                  (This is not linked in anywhere.)
    597  * @param   ppvR0       Where to store the virtual address of the ring-0 mapping. (optional)
    598  * @param   ppvR3       Where to store the virtual address of the ring-3 mapping.
    599  * @param   pHCPhys     Where to store the physical address.
    600  */
    601 int VBOXCALL supdrvOSContAllocOne(PSUPDRVMEMREF pMem, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
    602 {
    603     Assert(ppvR3);
    604     Assert(pHCPhys);
    605 
    606     /*
    607      * Try allocate the memory.
    608      */
    609     PHYSICAL_ADDRESS Phys;
    610     Phys.HighPart = 0;
    611     Phys.LowPart = ~0;
    612     unsigned cbAligned = RT_ALIGN(pMem->cb, PAGE_SIZE);
    613     pMem->pvR0 = MmAllocateContiguousMemory(cbAligned, Phys);
    614     if (!pMem->pvR0)
    615         return SUPDRV_ERR_NO_MEMORY;
    616 
    617     /*
    618      * Map into user space.
    619      */
    620     int rc = SUPDRV_ERR_NO_MEMORY;
    621     pMem->u.cont.pMdl = IoAllocateMdl(pMem->pvR0, cbAligned, FALSE, FALSE, NULL);
    622     if (pMem->u.cont.pMdl)
    623     {
    624         MmBuildMdlForNonPagedPool(pMem->u.cont.pMdl);
    625         __try
    626         {
    627             pMem->pvR3 = (RTR3PTR)MmMapLockedPagesSpecifyCache(pMem->u.cont.pMdl, UserMode, MmCached, NULL, FALSE, NormalPagePriority);
    628             if (pMem->pvR3)
    629             {
    630                 /*
    631                  * Done, setup pMem and return values.
    632                  */
    633 #ifdef RT_ARCH_AMD64
    634                  MmProtectMdlSystemAddress(pMem->u.cont.pMdl, PAGE_EXECUTE_READWRITE);
    635 #endif
    636                 *ppvR3 = pMem->pvR3;
    637                 if (ppvR0)
    638                     *ppvR0 = pMem->pvR0;
    639                 const uintptr_t *pauPFNs = (const uintptr_t *)(pMem->u.cont.pMdl + 1); /* ASSUMES ULONG_PTR == uintptr_t, NTDDK4 doesn't have ULONG_PTR. */
    640                 *pHCPhys = (RTHCPHYS)pauPFNs[0] << PAGE_SHIFT;
    641                 dprintf2(("supdrvOSContAllocOne: pvR0=%p pvR3=%p cb=%d pMdl=%p *pHCPhys=%VHp\n",
    642                           pMem->pvR0, pMem->pvR3, pMem->cb, pMem->u.mem.pMdl, *pHCPhys));
    643                 return 0;
    644             }
    645         }
    646         __except(EXCEPTION_EXECUTE_HANDLER)
    647         {
    648             NTSTATUS rc = GetExceptionCode();
    649             dprintf(("supdrvOSContAllocOne: Exception Code %#x\n", rc));
    650         }
    651         IoFreeMdl(pMem->u.cont.pMdl);
    652         rc = SUPDRV_ERR_LOCK_FAILED;
    653     }
    654     MmFreeContiguousMemory(pMem->pvR0);
    655     pMem->pvR0 = NULL;
    656     return rc;
    657 }
    658 
    659 
    660 /**
    661  * Frees contiguous memory.
    662  *
    663  * @param   pMem    Memory reference record of the memory to be freed.
    664  */
    665 void VBOXCALL supdrvOSContFreeOne(PSUPDRVMEMREF pMem)
    666 {
    667     __try
    668     {
    669         dprintf2(("supdrvOSContFreeOne: pvR0=%p pvR3=%p cb=%d pMdl=%p\n",
    670                  pMem->pvR0, pMem->pvR3, pMem->cb, pMem->u.cont.pMdl));
    671         if (pMem->pvR3)
    672         {
    673             MmUnmapLockedPages((void *)pMem->pvR3, pMem->u.cont.pMdl);
    674             dprintf2(("MmUnmapLockedPages ok!\n"));
    675             pMem->pvR3 = NULL;
    676         }
    677 
    678         IoFreeMdl(pMem->u.cont.pMdl);
    679         dprintf2(("IoFreeMdl ok!\n"));
    680         pMem->u.cont.pMdl = NULL;
    681 
    682         MmFreeContiguousMemory(pMem->pvR0);
    683         dprintf2(("MmFreeContiguousMemory ok!\n"));
    684         pMem->pvR0 = NULL;
    685     }
    686     __except(EXCEPTION_EXECUTE_HANDLER)
    687     {
    688         NTSTATUS rc = GetExceptionCode();
    689         dprintf(("supdrvOSContFreeOne: Exception Code %#x\n", rc));
    690     }
    691 }
    692 
    693 
    694 /**
    695  * Allocates memory which mapped into both kernel and user space.
    696  * The returned memory is page aligned and so is the allocation.
    697  *
    698  * @returns 0 on success.
    699  * @returns SUPDRV_ERR_* on failure.
    700  * @param   pMem        Memory reference record of the memory to be allocated.
    701  *                      (This is not linked in anywhere.)
    702  * @param   ppvR0       Where to store the address of the Ring-0 mapping.
    703  * @param   ppvR3       Where to store the address of the Ring-3 mapping.
    704  */
    705 int  VBOXCALL   supdrvOSMemAllocOne(PSUPDRVMEMREF pMem, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
    706 {
    707     Assert(ppvR0);
    708     Assert(ppvR3);
    709 
    710     /*
    711      * Try allocate the memory.
    712      */
    713     unsigned cbAligned = RT_ALIGN(RT_MAX(pMem->cb, PAGE_SIZE * 2), PAGE_SIZE);
    714     pMem->pvR0 = ExAllocatePoolWithTag(NonPagedPool, cbAligned, SUPDRV_NT_POOL_TAG);
    715     if (!pMem->pvR0)
    716         return SUPDRV_ERR_NO_MEMORY;
    717 
    718     /*
    719      * Map into user space.
    720      */
    721     int rc = SUPDRV_ERR_NO_MEMORY;
    722     pMem->u.mem.pMdl = IoAllocateMdl(pMem->pvR0, cbAligned, FALSE, FALSE, NULL);
    723     if (pMem->u.mem.pMdl)
    724     {
    725         MmBuildMdlForNonPagedPool(pMem->u.mem.pMdl);
    726         __try
    727         {
    728             pMem->pvR3 = (RTR3PTR)MmMapLockedPagesSpecifyCache(pMem->u.mem.pMdl, UserMode, MmCached, NULL, FALSE, NormalPagePriority);
    729             if (pMem->pvR3)
    730             {
    731                 /*
    732                  * Done, setup pMem and return values.
    733                  */
    734                 *ppvR3 = pMem->pvR3;
    735                 *ppvR0 = pMem->pvR0;
    736                 dprintf2(("supdrvOSContAllocOne: pvR0=%p pvR3=%p cb=%d pMdl=%p\n",
    737                           pMem->pvR0, pMem->pvR3, pMem->cb, pMem->u.mem.pMdl));
    738                 return 0;
    739             }
    740         }
    741         __except(EXCEPTION_EXECUTE_HANDLER)
    742         {
    743             NTSTATUS rc = GetExceptionCode();
    744             dprintf(("supdrvOSContAllocOne: Exception Code %#x\n", rc));
    745         }
    746         rc = SUPDRV_ERR_LOCK_FAILED;
    747 
    748         IoFreeMdl(pMem->u.mem.pMdl);
    749         pMem->u.mem.pMdl = NULL;
    750         pMem->pvR3 = NULL;
    751     }
    752 
    753     MmFreeContiguousMemory(pMem->pvR0);
    754     pMem->pvR0 = NULL;
    755     return rc;
    756 }
    757 
    758 
    759 /**
    760  * Get the physical addresses of the pages in the allocation.
    761  * This is called while inside bundle the spinlock.
    762  *
    763  * @param   pMem        Memory reference record of the memory.
    764  * @param   paPages     Where to store the page addresses.
    765  */
    766 void VBOXCALL   supdrvOSMemGetPages(PSUPDRVMEMREF pMem, PSUPPAGE paPages)
    767 {
    768     const unsigned      cPages = RT_ALIGN(pMem->cb, PAGE_SIZE) >> PAGE_SHIFT;
    769     const uintptr_t    *pauPFNs = (const uintptr_t *)(pMem->u.mem.pMdl + 1); /* ASSUMES ULONG_PTR == uintptr_t, NTDDK doesn't have ULONG_PTR. */
    770     for (unsigned iPage = 0; iPage < cPages; iPage++)
    771     {
    772         paPages[iPage].Phys = (RTHCPHYS)pauPFNs[iPage] << PAGE_SHIFT;
    773         paPages[iPage].uReserved = 0;
    774     }
    775 }
    776 
    777 
    778 /**
    779  * Frees memory allocated by supdrvOSMemAllocOne().
    780  *
    781  * @param   pMem        Memory reference record of the memory to be free.
    782  */
    783 void VBOXCALL   supdrvOSMemFreeOne(PSUPDRVMEMREF pMem)
    784 {
    785     __try
    786     {
    787         dprintf2(("supdrvOSContFreeOne: pvR0=%p pvR3=%p cb=%d pMdl=%p\n",
    788                  pMem->pvR0, pMem->pvR3, pMem->cb, pMem->u.mem.pMdl));
    789         if (pMem->pvR3)
    790         {
    791             MmUnmapLockedPages((void *)pMem->pvR3, pMem->u.mem.pMdl);
    792             pMem->pvR3 = NULL;
    793             dprintf2(("MmUnmapLockedPages ok!\n"));
    794         }
    795 
    796         IoFreeMdl(pMem->u.mem.pMdl);
    797         pMem->u.mem.pMdl = NULL;
    798         dprintf2(("IoFreeMdl ok!\n"));
    799 
    800         ExFreePool(pMem->pvR0);
    801         pMem->pvR0 = NULL;
    802         dprintf2(("MmFreeContiguousMemory ok!\n"));
    803     }
    804     __except(EXCEPTION_EXECUTE_HANDLER)
    805     {
    806         NTSTATUS rc = GetExceptionCode();
    807         dprintf(("supdrvOSContFreeOne: Exception Code %#x\n", rc));
    808     }
    809 }
    810 
    811 #endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
    812 
    813 
    814442/**
    815443 * Gets the monotone timestamp (nano seconds).
     
    1093721#endif
    1094722}
    1095 
    1096 
    1097 #ifndef USE_NEW_OS_INTERFACE_FOR_MM
    1098 
    1099 /**
    1100  * Allocate small amounts of memory which is does not have the NX bit set.
    1101  *
    1102  * @returns Pointer to the allocated memory
    1103  * @returns NULL if out of memory.
    1104  * @param   cb   Size of the memory block.
    1105  */
    1106 void *VBOXCALL  supdrvOSExecAlloc(size_t cb)
    1107 {
    1108 #if 0 //def RT_ARCH_AMD64
    1109     cb = RT_ALIGN_Z(cb, PAGE_SIZE);
    1110     void *pv = ExAllocatePoolWithTag(NonPagedPool, cb, SUPDRV_NT_POOL_TAG);
    1111     if (pv)
    1112     {
    1113         /*
    1114          * Create a kernel mapping which we make PAGE_EXECUTE_READWRITE using
    1115          * the MmProtectMdlSystemAddress API.
    1116          */
    1117         int rc = SUPDRV_ERR_NO_MEMORY;
    1118         PMDL pMdl = IoAllocateMdl(pv, cb, FALSE, FALSE, NULL);
    1119         if (pMdl)
    1120         {
    1121             MmBuildMdlForNonPagedPool(pMdl);
    1122             __try
    1123             {
    1124                 void *pvMapping = MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL, FALSE, NormalPagePriority);
    1125                 if (pvMapping)
    1126                 {
    1127                     NTSTATUS rc = MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
    1128                     if (NT_SUCCESS(rc))
    1129                     {
    1130                         /*
    1131                          * Create tracking structure and insert it into the list.
    1132                          */
    1133 
    1134 
    1135                         return pvMapping;
    1136                     }
    1137 
    1138                     MmUnmapLockedPages(pvMapping, pMdl);
    1139                 }
    1140             }
    1141             __except(EXCEPTION_EXECUTE_HANDLER)
    1142             {
    1143                 NTSTATUS rc = GetExceptionCode();
    1144                 dprintf(("supdrvOSExecAlloc: Exception Code %#x\n", rc));
    1145             }
    1146             IoFreeMdl(pMem->u.mem.pMdl);
    1147         }
    1148         ExFreePool(pv);
    1149     }
    1150     dprintf2(("supdrvOSExecAlloc(%d): returns NULL\n", cb));
    1151     return NULL;
    1152 #else
    1153     void *pv = ExAllocatePoolWithTag(NonPagedPool, cb, SUPDRV_NT_POOL_TAG);
    1154     dprintf2(("supdrvOSExecAlloc(%d): returns %p\n", cb, pv));
    1155     return pv;
    1156 #endif
    1157 }
    1158 
    1159 #endif /* !USE_NEW_OS_INTERFACE_FOR_MM */
    1160723
    1161724
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette