Changeset 4219 in vbox for trunk/src/VBox/Runtime
- Timestamp:
- Aug 18, 2007 11:42:23 PM (17 years ago)
- Location:
- trunk/src/VBox/Runtime
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/Makefile.kmk
r4178 r4219 768 768 generic/RTAssertDoBreakpoint-generic.cpp \ 769 769 nt/RTErrConvertFromNtStatus.cpp \ 770 r0drv/memobj-r0drv.cpp \ 770 771 r0drv/nt/alloc-r0drv-nt.cpp \ 771 772 r0drv/nt/initterm-r0drv-nt.cpp \ 773 r0drv/nt/memobj-r0drv-nt.cpp \ 772 774 r0drv/nt/process-r0drv-nt.cpp \ 773 775 r0drv/nt/RTLogWriteDebugger-r0drv-nt.cpp \ … … 776 778 r0drv/nt/thread-r0drv-nt.cpp \ 777 779 string/strncmp.cpp 778 779 #RuntimeR0Drv_SOURCES.win += \780 # r0drv/memobj-r0drv.cpp \781 # r0drv/nt/memobj-r0drv-nt.cpp782 780 783 781 RuntimeR0Drv_SOURCES.win.amd64 = \ -
trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp
r4154 r4219 62 62 #endif 63 63 /** The number of PMDLs (memory descriptor lists) in the array. */ 64 u nsignedcMdls;64 uint32_t cMdls; 65 65 /** Array of MDL pointers. (variable size) */ 66 66 PMDL apMdls[1]; … … 78 78 { 79 79 case RTR0MEMOBJTYPE_LOW: 80 #if def IPRT_TARGET_NT480 #ifndef IPRT_TARGET_NT4 81 81 if (pMemNt->fAllocatedPagesForMdl) 82 82 { … … 91 91 } 92 92 #endif 93 /* fall thru */ 93 AssertFailed(); 94 break; 95 94 96 case RTR0MEMOBJTYPE_PAGE: 95 97 Assert(pMemNt->Core.pv); … … 116 118 case RTR0MEMOBJTYPE_PHYS: 117 119 case RTR0MEMOBJTYPE_PHYS_NC: 118 #if def IPRT_TARGET_NT4120 #ifndef IPRT_TARGET_NT4 119 121 if (pMemNt->fAllocatedPagesForMdl) 120 122 { … … 122 124 pMemNt->apMdls[0] = NULL; 123 125 pMemNt->cMdls = 0; 126 break; 124 127 } 125 128 #endif 129 AssertFailed(); 126 130 break; 127 131 128 132 case RTR0MEMOBJTYPE_LOCK: 129 for (u nsignedi = 0; i < pMemNt->cMdls; i++)130 { 131 MmUnlockPages(pMemNt->apMdl [i]);132 IoFreeMdl(pMemNt->apMdl [i]);133 pMemNt->apMdl [i] = NULL;133 for (uint32_t i = 0; i < pMemNt->cMdls; i++) 134 { 135 MmUnlockPages(pMemNt->apMdls[i]); 136 IoFreeMdl(pMemNt->apMdls[i]); 137 pMemNt->apMdls[i] = NULL; 134 138 } 135 139 break; 136 140 137 141 case RTR0MEMOBJTYPE_RES_VIRT: 138 if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS) 139 { 140 MmMapIoSpace 142 /* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS) 143 { 141 144 } 142 145 else 143 146 { 144 } 147 }*/ 145 148 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n")); 146 149 return VERR_INTERNAL_ERROR; … … 149 152 case RTR0MEMOBJTYPE_MAPPING: 150 153 { 151 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);154 Assert(pMemNt->Core.pv); 152 155 PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent; 153 156 Assert(pMemNtParent); 154 Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]); 155 MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls); 157 if (pMemNtParent->cMdls) 158 { 159 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]); 160 Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]); 161 Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS 162 || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf()); 163 MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]); 164 } 165 else 166 { 167 Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS 168 && !pMemNtParent->Core.u.Phys.fAllocated); 169 MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb); 170 } 156 171 pMemNt->Core.pv = NULL; 157 172 break; … … 169 184 int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable) 170 185 { 186 AssertMsgReturn(cb > _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */ 187 171 188 /* 172 189 * Try allocate the memory and create an MDL for them so … … 178 195 if (pv) 179 196 { 180 PMDL pMdl = IoAllocateMdl(pv, cb, FALSE, FALSE, NULL);197 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL); 181 198 if (pMdl) 182 199 { 183 200 MmBuildMdlForNonPagedPool(pMdl); 184 /** @todo if (fExecutable) */ 201 #ifdef RT_ARCH_AMD64 202 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE); 203 #endif 185 204 186 205 /* … … 207 226 int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable) 208 227 { 228 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */ 229 209 230 /* 210 231 * Try see if we get lucky first... … … 234 255 */ 235 256 PHYSICAL_ADDRESS Zero; 257 Zero.QuadPart = 0; 236 258 PHYSICAL_ADDRESS HighAddr; 237 Zero.QuadPart = 0; 238 High.QuadPart = _4G - 1; 259 HighAddr.QuadPart = _4G - 1; 239 260 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb); 240 261 if (pMdl) … … 289 310 static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest) 290 311 { 312 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */ 313 291 314 /* 292 315 * Allocate the memory and create an MDL for it. … … 298 321 return VERR_NO_MEMORY; 299 322 300 PMDL pMdl = IoAllocateMdl(pv, cb, FALSE, FALSE, NULL);323 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL); 301 324 if (pMdl) 302 325 { 303 326 MmBuildMdlForNonPagedPool(pMdl); 304 /** @todo fExecutable */ 327 #ifdef RT_ARCH_AMD64 328 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE); 329 #endif 305 330 306 331 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb); … … 333 358 * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl. 334 359 * 335 * If the allocation is big, the chances are *probably* not very good. The current 336 * max limit is kind of random. 360 * This is preferable to using MmAllocateContiguousMemory because there are 361 * a few situations where the memory shouldn't be mapped, like for instance 362 * VT-x control memory. Since these are rather small allocations (one or 363 * two pages) MmAllocatePagesForMdl will probably be able to satisfy the 364 * request. 365 * 366 * If the allocation is big, the chances are *probably* not very good. The 367 * current limit is kind of random... 337 368 */ 338 369 if (cb < _128K) 339 370 { 340 371 PHYSICAL_ADDRESS Zero; 372 Zero.QuadPart = 0; 341 373 PHYSICAL_ADDRESS HighAddr; 342 Zero.QuadPart = 0; 343 High.QuadPart = _4G - 1; 374 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest; 344 375 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb); 345 376 if (pMdl) … … 349 380 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl); 350 381 PFN_NUMBER Pfn = paPfns[0] + 1; 351 const size_t cPages = cb >> PAGE_S IZE;382 const size_t cPages = cb >> PAGE_SHIFT; 352 383 size_t iPage; 353 384 for (iPage = 1; iPage < cPages; iPage++, Pfn++) … … 372 403 } 373 404 } 374 #endif 405 #endif /* !IPRT_TARGET_NT4 */ 375 406 376 407 return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest); … … 382 413 #ifndef IPRT_TARGET_NT4 383 414 PHYSICAL_ADDRESS Zero; 415 Zero.QuadPart = 0; 384 416 PHYSICAL_ADDRESS HighAddr; 385 Zero.QuadPart = 0; 386 High.QuadPart = _4G - 1; 417 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest; 387 418 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb); 388 419 if (pMdl) … … 390 421 if (MmGetMdlByteCount(pMdl) >= cb) 391 422 { 392 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_ CONT, pv, cb);423 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb); 393 424 if (pMemNt) 394 425 { … … 424 455 if (pMemNt) 425 456 { 426 pMemNt->Core.u.Phys.PhysBase = Phys Addr;457 pMemNt->Core.u.Phys.PhysBase = Phys; 427 458 pMemNt->Core.u.Phys.fAllocated = false; 428 pMemNt->pMemDesc = pMemDesc;429 459 *ppMem = &pMemNt->Core; 430 460 return VINF_SUCCESS; … … 449 479 * Calc the number of MDLs we need and allocate the memory object structure. 450 480 */ 451 unsigned cMdls = pMem->cb / MAX_LOCK_MEM_SIZE;452 if ( (pMem->cb % MAX_LOCK_MEM_SIZE) > 0)481 size_t cMdls = cb / MAX_LOCK_MEM_SIZE; 482 if (cb % MAX_LOCK_MEM_SIZE) 453 483 cMdls++; 484 if (cMdls >= UINT32_MAX) 485 return VERR_OUT_OF_RANGE; 454 486 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJNT, apMdls[cMdls]), 455 RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);487 RTR0MEMOBJTYPE_LOCK, pv, cb); 456 488 if (!pMemNt) 457 489 return VERR_NO_MEMORY; … … 462 494 int rc = VINF_SUCCESS; 463 495 size_t cbTotal = 0; 464 uint8_t *pb = pv;465 u nsignediMdl;496 uint8_t *pb = (uint8_t *)pv; 497 uint32_t iMdl; 466 498 for (iMdl = 0; iMdl < cMdls; iMdl++) 467 499 { … … 473 505 cbCur = MAX_LOCK_MEM_SIZE; 474 506 AssertMsg(cbCur, ("cbCur: 0!\n")); 475 PMDL pMdl = IoAllocateMdl(pb, cbCur, FALSE, FALSE, NULL);507 PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL); 476 508 if (!pMdl) 477 509 { … … 513 545 while (iMdl-- > 0) 514 546 { 515 MmUnlockPages(pMemNt->apMdl [iMdl]);516 IoFreeMdl(pMemNt->apMdl [iMdl]);517 pMemNt->apMdl [iMdl] = NULL;518 } 519 rtR0MemObjDelete( pMemNt);520 return SUPDRV_ERR_LOCK_FAILED;547 MmUnlockPages(pMemNt->apMdls[iMdl]); 548 IoFreeMdl(pMemNt->apMdls[iMdl]); 549 pMemNt->apMdls[iMdl] = NULL; 550 } 551 rtR0MemObjDelete(&pMemNt->Core); 552 return VERR_LOCK_FAILED; 521 553 } 522 554 … … 525 557 { 526 558 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED); 527 /* ( 559 /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */ 528 560 return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, R0Process); 529 561 } … … 532 564 int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb) 533 565 { 534 return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, NIL_RTR0PROCESS);566 return rtR0MemObjNtLock(ppMem, pv, cb, NIL_RTR0PROCESS); 535 567 } 536 568 … … 547 579 } 548 580 549 550 int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt) 551 { 552 /* 553 * Must have a memory descriptor. 554 */ 555 int rc = VERR_INVALID_PARAMETER; 556 PRTR0MEMOBJNT pMemToMapDarwin = (PRTR0MEMOBJNT)pMemToMap; 557 if (pMemToMapDarwin->pMemDesc) 558 { 559 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task, kIOMapAnywhere, 560 kIOMapAnywhere | kIOMapDefaultCache); 561 if (pMemMap) 562 { 563 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress(); 564 void *pv = (void *)(uintptr_t)VirtAddr; 565 if ((uintptr_t)pv == VirtAddr) 566 { 567 /* 568 * Create the IPRT memory object. 569 */ 570 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, 571 pv, pMemToMapDarwin->Core.cb); 581 /** 582 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser. 583 * 584 * @returns IPRT status code. 585 * @param ppMem Where to store the memory object for the mapping. 586 * @param pMemToMap The memory object to map. 587 * @param pvFixed Where to map it. (void *)-1 if anywhere is fine. 588 * @param uAlignment The alignment requirement for the mapping. 589 * @param fProt The desired page protection for the mapping. 590 * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory. 591 * If not nil, it's the current process. 592 */ 593 static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, 594 unsigned fProt, RTR0PROCESS R0Process) 595 { 596 int rc = VERR_MAP_FAILED; 597 598 /* 599 * There are two basic cases here, either we've got an MDL and can 600 * map it using MmMapLockedPages, or we've got a contiguous physical 601 * range (MMIO most likely) and can use MmMapIoSpace. 602 */ 603 PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap; 604 if (pMemNtToMap->cMdls) 605 { 606 /* don't attempt map locked regions with more than one mdl. */ 607 if (pMemNtToMap->cMdls != 1) 608 return VERR_NOT_SUPPORTED; 609 610 /* we can't map anything to the first page, sorry. */ 611 if (pvFixed == 0) 612 return VERR_NOT_SUPPORTED; 613 614 /* only one system mapping for now - no time to figure out MDL restrictions right now. */ 615 if ( pMemNtToMap->Core.uRel.Parent.cMappings 616 && R0Process == NIL_RTR0PROCESS) 617 return VERR_NOT_SUPPORTED; 618 619 __try 620 { 621 /** @todo uAlignment */ 622 /** @todo How to set the protection on the pages? */ 623 void *pv = MmMapLockedPagesSpecifyCache(pMemNtToMap->apMdls[0], 624 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode, 625 MmCached, 626 pvFixed == (void *)-1 ? pvFixed : NULL, 627 FALSE /* no bug check on failure */, 628 NormalPagePriority); 629 if (pv) 630 { 631 NOREF(fProt); 632 633 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv, 634 pMemNtToMap->Core.cb); 572 635 if (pMemNt) 573 636 { 574 pMemNt->Core.u.Mapping.R0Process = NIL_RTR0PROCESS; 575 pMemNt->pMemMap = pMemMap; 637 pMemNt->Core.u.Mapping.R0Process = R0Process; 576 638 *ppMem = &pMemNt->Core; 577 639 return VINF_SUCCESS; … … 579 641 580 642 rc = VERR_NO_MEMORY; 581 }582 else583 rc = VERR_ADDRESS_TOO_BIG;584 pMemMap->release();585 }586 else643 MmUnmapLockedPages(pv, pMemNtToMap->apMdls[0]); 644 } 645 } 646 __except(EXCEPTION_EXECUTE_HANDLER) 647 { 648 /* nothing */ 587 649 rc = VERR_MAP_FAILED; 588 } 650 } 651 } 652 else 653 { 654 AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS 655 && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR); 656 657 /* cannot map phys mem to user space (yet). */ 658 if (R0Process != NIL_RTR0PROCESS) 659 return VERR_NOT_SUPPORTED; 660 661 /** @todo uAlignment */ 662 /** @todo How to set the protection on the pages? */ 663 PHYSICAL_ADDRESS Phys; 664 Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase; 665 void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb, MmCached); /** @todo add cache type to fProt. */ 666 if (pv) 667 { 668 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv, 669 pMemNtToMap->Core.cb); 670 if (pMemNt) 671 { 672 pMemNt->Core.u.Mapping.R0Process = R0Process; 673 *ppMem = &pMemNt->Core; 674 return VINF_SUCCESS; 675 } 676 677 rc = VERR_NO_MEMORY; 678 MmUnmapIoSpace(pv, pMemNtToMap->Core.cb); 679 } 680 } 681 682 NOREF(uAlignment); NOREF(fProt); 589 683 return rc; 590 684 } 591 685 592 686 687 int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt) 688 { 689 return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS); 690 } 691 692 593 693 int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process) 594 694 { 595 /* 596 * Must have a memory descriptor. 597 */ 598 int rc = VERR_INVALID_PARAMETER; 599 PRTR0MEMOBJNT pMemToMapDarwin = (PRTR0MEMOBJNT)pMemToMap; 600 if (pMemToMapDarwin->pMemDesc) 601 { 602 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process, kIOMapAnywhere, 603 kIOMapAnywhere | kIOMapDefaultCache); 604 if (pMemMap) 605 { 606 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress(); 607 void *pv = (void *)(uintptr_t)VirtAddr; 608 if ((uintptr_t)pv == VirtAddr) 609 { 610 /* 611 * Create the IPRT memory object. 612 */ 613 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, 614 pv, pMemToMapDarwin->Core.cb); 615 if (pMemNt) 616 { 617 pMemNt->Core.u.Mapping.R0Process = R0Process; 618 pMemNt->pMemMap = pMemMap; 619 *ppMem = &pMemNt->Core; 620 return VINF_SUCCESS; 621 } 622 623 rc = VERR_NO_MEMORY; 624 } 625 else 626 rc = VERR_ADDRESS_TOO_BIG; 627 pMemMap->release(); 628 } 629 else 630 rc = VERR_MAP_FAILED; 631 } 632 return rc; 633 } 634 635 636 RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, unsigned iPage) 637 { 638 RTHCPHYS PhysAddr; 639 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem; 640 641 #ifdef USE_VM_MAP_WIRE 642 /* 643 * Locked memory doesn't have a memory descriptor and 644 * needs to be handled differently. 645 */ 646 if (pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOCK) 647 { 648 ppnum_t PgNo; 649 if (pMemNt->Core.u.Lock.R0Process == NIL_RTR0PROCESS) 650 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemNt->Core.pv + iPage * PAGE_SIZE); 651 else 652 { 653 /* 654 * From what I can tell, Apple seems to have locked up the all the 655 * available interfaces that could help us obtain the pmap_t of a task 656 * or vm_map_t. 657 658 * So, we'll have to figure out where in the vm_map_t structure it is 659 * and read it our selves. ASSUMING that kernel_pmap is pointed to by 660 * kernel_map->pmap, we scan kernel_map to locate the structure offset. 661 * Not nice, but it will hopefully do the job in a reliable manner... 662 * 663 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.) 664 */ 665 static int s_offPmap = -1; 666 if (RT_UNLIKELY(s_offPmap == -1)) 667 { 668 pmap_t const *p = (pmap_t *)kernel_map; 669 pmap_t const * const pEnd = p + 64; 670 for (; p < pEnd; p++) 671 if (*p == kernel_pmap) 672 { 673 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map; 674 break; 675 } 676 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS); 677 } 678 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemNt->Core.u.Lock.R0Process) + s_offPmap); 679 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemNt->Core.pv + iPage * PAGE_SIZE); 680 } 681 682 AssertReturn(PgNo, NIL_RTHCPHYS); 683 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT; 684 Assert((PhysAddr >> PAGE_SHIFT) == PgNo); 685 } 686 else 687 #endif /* USE_VM_MAP_WIRE */ 688 { 689 /* 690 * Get the memory descriptor. 691 */ 692 IOMemoryDescriptor *pMemDesc = pMemNt->pMemDesc; 693 if (!pMemDesc) 694 pMemDesc = pMemNt->pMemMap->getMemoryDescriptor(); 695 AssertReturn(pMemDesc, NIL_RTHCPHYS); 696 697 /* 698 * If we've got a memory descriptor, use getPhysicalSegment64(). 699 */ 700 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL); 701 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS); 702 PhysAddr = Addr; 703 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%VHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS); 704 } 705 706 return PhysAddr; 707 } 708 695 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED); 696 return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process); 697 } 698 699 700 RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage) 701 { 702 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem; 703 704 if (pMemNt->cMdls) 705 { 706 if (pMemNt->cMdls == 1) 707 { 708 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]); 709 return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT; 710 } 711 712 size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT); 713 size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT); 714 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]); 715 return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT; 716 } 717 718 switch (pMemNt->Core.enmType) 719 { 720 case RTR0MEMOBJTYPE_MAPPING: 721 return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage); 722 723 case RTR0MEMOBJTYPE_PHYS: 724 return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT); 725 726 case RTR0MEMOBJTYPE_PAGE: 727 case RTR0MEMOBJTYPE_PHYS_NC: 728 case RTR0MEMOBJTYPE_LOW: 729 case RTR0MEMOBJTYPE_CONT: 730 case RTR0MEMOBJTYPE_LOCK: 731 default: 732 AssertMsgFailed(("%d\n", pMemNt->Core.enmType)); 733 case RTR0MEMOBJTYPE_RES_VIRT: 734 return NIL_RTHCPHYS; 735 } 736 } 737 -
trunk/src/VBox/Runtime/r0drv/nt/the-nt-kernel.h
r4071 r4219 45 45 #endif 46 46 47 #include <iprt/param.h> 47 48 #ifndef PAGE_OFFSET_MASK 48 49 # define PAGE_OFFSET_MASK (PAGE_SIZE - 1)
Note:
See TracChangeset
for help on using the changeset viewer.