Changeset 20507 in vbox for trunk/src/VBox/Runtime/r0drv
- Timestamp:
- Jun 12, 2009 12:26:15 PM (16 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp
r19956 r20507 44 44 #include <iprt/process.h> 45 45 #include <iprt/string.h> 46 #include <iprt/thread.h> 46 47 47 48 #include "internal/memobj.h" … … 67 68 68 69 70 /** 71 * HACK ALERT! 72 * 73 * Touch the pages to force the kernel to create the page 74 * table entries. This is necessary since the kernel gets 75 * upset if we take a page fault when preemption is disabled 76 * and/or we own a simple lock. It has no problems with us 77 * disabling interrupts when taking the traps, weird stuff. 78 * 79 * @param pv Pointer to the first page. 80 * @param cb The number of bytes. 81 */ 82 static void rtR0MemObjDarwinTouchPages(void *pv, size_t cb) 83 { 84 uint32_t volatile *pu32 = (uint32_t volatile *)pv; 85 for (;;) 86 { 87 ASMAtomicCmpXchgU32(pu32, 0xdeadbeef, 0xdeadbeef); 88 if (cb <= PAGE_SIZE) 89 break; 90 cb -= PAGE_SIZE; 91 pu32 += PAGE_SIZE / sizeof(uint32_t); 92 } 93 } 94 95 #ifdef RT_STRICT 96 97 /** 98 * Read from a physical page. 99 * 100 * @param HCPhys The address to start reading at. 101 * @param cb How many bytes to read. 102 * @param pvDst Where to put the bytes. This is zero'ed on failure. 103 */ 104 static void rtR0MemObjDarwinReadPhys(RTHCPHYS HCPhys, size_t cb, void *pvDst) 105 { 106 memset(pvDst, '\0', cb); 107 108 IOAddressRange aRanges[1] = { { (mach_vm_address_t)HCPhys, RT_ALIGN(cb, PAGE_SIZE) } }; 109 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges), 110 kIODirectionIn, NULL /*task*/); 111 if (pMemDesc) 112 { 113 #if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050 114 IOMemoryMap *pMemMap = pMemDesc->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache); 115 #else 116 IOMemoryMap *pMemMap = pMemDesc->map(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache); 117 #endif 118 if (pMemMap) 119 { 120 void const *pvSrc = (void const *)(uintptr_t)pMemMap->getVirtualAddress(); 121 memcpy(pvDst, pvSrc, cb); 122 pMemMap->release(); 123 } 124 else 125 printf("rtR0MemObjDarwinReadPhys: createMappingInTask failed; HCPhys=%llx\n", HCPhys); 126 127 pMemDesc->release(); 128 } 129 else 130 printf("rtR0MemObjDarwinReadPhys: withAddressRanges failed; HCPhys=%llx\n", HCPhys); 131 } 132 133 134 /** 135 * Gets the PTE for a page. 136 * 137 * @returns the PTE. 138 * @param pvPage The virtual address to get the PTE for. 139 */ 140 uint64_t rtR0MemObjDarwinGetPTE(void *pvPage) 141 { 142 RTUINT64U u64; 143 RTCCUINTREG cr3 = ASMGetCR3(); 144 RTCCUINTREG cr4 = ASMGetCR4(); 145 bool fPAE = false; 146 bool fLMA = false; 147 if (cr4 & RT_BIT(5) /*X86_CR4_PAE*/) 148 { 149 fPAE = true; 150 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001); 151 if (fAmdFeatures & RT_BIT(29) /*X86_CPUID_AMD_FEATURE_EDX_LONG_MODE*/) 152 { 153 uint64_t efer = ASMRdMsr(0xc0000080 /*MSR_K6_EFER*/); 154 if (efer & RT_BIT(10) /*MSR_K6_EFER_LMA*/) 155 fLMA = true; 156 } 157 } 158 159 if (fLMA) 160 { 161 /* PML4 */ 162 rtR0MemObjDarwinReadPhys((cr3 & ~(RTCCUINTREG)PAGE_OFFSET_MASK) | (((uint64_t)(uintptr_t)pvPage >> 39) & 0x1ff) * 8, 8, &u64); 163 if (!(u64.u & RT_BIT(0) /* present */)) 164 { 165 printf("rtR0MemObjDarwinGetPTE: %p -> PML4E !p\n", pvPage); 166 return 0; 167 } 168 169 /* PDPTR */ 170 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 30) & 0x1ff) * 8, 8, &u64); 171 if (!(u64.u & RT_BIT(0) /* present */)) 172 { 173 printf("rtR0MemObjDarwinGetPTE: %p -> PDPTE !p\n", pvPage); 174 return 0; 175 } 176 if (u64.u & RT_BIT(7) /* big */) 177 return (u64.u & ~(uint64_t)(_1G -1)) | ((uintptr_t)pvPage & (_1G -1)); 178 179 /* PD */ 180 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 21) & 0x1ff) * 8, 8, &u64); 181 if (!(u64.u & RT_BIT(0) /* present */)) 182 { 183 printf("rtR0MemObjDarwinGetPTE: %p -> PDE !p\n", pvPage); 184 return 0; 185 } 186 if (u64.u & RT_BIT(7) /* big */) 187 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1)); 188 189 /* PD */ 190 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x1ff) * 8, 8, &u64); 191 if (!(u64.u & RT_BIT(0) /* present */)) 192 { 193 printf("rtR0MemObjDarwinGetPTE: %p -> PTE !p\n", pvPage); 194 return 0; 195 } 196 return u64.u; 197 } 198 199 if (fPAE) 200 { 201 /* PDPTR */ 202 rtR0MemObjDarwinReadPhys((u64.u & 0xffffffe0 /*X86_CR3_PAE_PAGE_MASK*/) | (((uintptr_t)pvPage >> 30) & 0x3) * 8, 8, &u64); 203 if (!(u64.u & RT_BIT(0) /* present */)) 204 return 0; 205 206 /* PD */ 207 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 21) & 0x1ff) * 8, 8, &u64); 208 if (!(u64.u & RT_BIT(0) /* present */)) 209 return 0; 210 if (u64.u & RT_BIT(7) /* big */) 211 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1)); 212 213 /* PD */ 214 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x1ff) * 8, 8, &u64); 215 if (!(u64.u & RT_BIT(0) /* present */)) 216 return 0; 217 return u64.u; 218 } 219 220 /* PD */ 221 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 22) & 0x3ff) * 4, 4, &u64); 222 if (!(u64.au32[0] & RT_BIT(0) /* present */)) 223 return 0; 224 if (u64.au32[0] & RT_BIT(7) /* big */) 225 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1)); 226 227 /* PD */ 228 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x3ff) * 4, 4, &u64); 229 if (!(u64.au32[0] & RT_BIT(0) /* present */)) 230 return 0; 231 return u64.au32[0]; 232 233 return 0; 234 } 235 236 #endif /* RT_STRICT */ 237 69 238 int rtR0MemObjNativeFree(RTR0MEMOBJ pMem) 70 239 { … … 72 241 73 242 /* 74 * Release the IOMemoryDescriptor /IOMemoryMap associated with the object.243 * Release the IOMemoryDescriptor or/and IOMemoryMap associated with the object. 75 244 */ 76 245 if (pMemDarwin->pMemDesc) … … 80 249 pMemDarwin->pMemDesc->release(); 81 250 pMemDarwin->pMemDesc = NULL; 82 Assert(!pMemDarwin->pMemMap);83 } 84 elseif (pMemDarwin->pMemMap)251 } 252 253 if (pMemDarwin->pMemMap) 85 254 { 86 255 pMemDarwin->pMemMap->release(); … … 167 336 * we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that. 168 337 * 169 * The kIOMemory SharingTypeMaskflag just forces the result to be page aligned.338 * The kIOMemoryKernelUserShared flag just forces the result to be page aligned. 170 339 */ 171 340 int rc; 172 341 IOBufferMemoryDescriptor *pMemDesc = 173 342 IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, 174 kIOMemory SharingTypeMask343 kIOMemoryKernelUserShared 175 344 | kIODirectionInOut 176 345 | (fContiguous ? kIOMemoryPhysicallyContiguous : 0), … … 207 376 pMemDesc->release(); 208 377 if (PhysMask) 209 LogAlways(("rtR0MemObjNativeAlloc Low: off=%x Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx - buggy API!\n",378 LogAlways(("rtR0MemObjNativeAllocWorker: off=%x Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx - buggy API!\n", 210 379 off, Addr, AddrPrev, MaxPhysAddr, PhysMask)); 211 380 return VERR_ADDRESS_TOO_BIG; … … 213 382 AddrPrev = Addr; 214 383 } 384 385 #ifdef RT_STRICT 386 /* check that the memory is actually mapped. */ 387 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL); 388 //printf("rtR0MemObjNativeAllocWorker: pv=%p %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr); 389 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER; 390 RTThreadPreemptDisable(&State); 391 rtR0MemObjDarwinTouchPages(pv, cb); 392 RTThreadPreemptRestore(&State); 393 #endif 215 394 216 395 /* … … 487 666 488 667 /* 489 * Must have a memory descriptor .668 * Must have a memory descriptor that we can map. 490 669 */ 491 670 int rc = VERR_INVALID_PARAMETER; … … 500 679 cbSub); 501 680 #else 502 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task, 0, 681 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task, 682 0, 503 683 kIOMapAnywhere | kIOMapDefaultCache, 504 offSub, cbSub); 684 offSub, 685 cbSub); 505 686 #endif 506 687 if (pMemMap) … … 510 691 if ((uintptr_t)pv == VirtAddr) 511 692 { 512 /* 513 * HACK ALERT! 514 * 515 * Touch the pages to force the kernel to create the page 516 * table entries. This is necessary since the kernel gets 517 * upset if we take a page fault when preemption is disabled 518 * and/or we own a simple lock. It has no problems with us 519 * disabling interrupts when taking the traps, weird stuff. 520 */ 521 uint32_t volatile *pu32 = (uint32_t volatile *)pv; 522 size_t cbLeft = cbSub; 523 for (;;) 524 { 525 ASMAtomicCmpXchgU32(pu32, 0xdeadbeef, 0xdeadbeef); 526 if (cbLeft <= PAGE_SIZE) 527 break; 528 cbLeft -= PAGE_SIZE; 529 pu32 += PAGE_SIZE / sizeof(uint32_t); 530 } 531 532 /* 533 * Create the IPRT memory object. 534 */ 535 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING, 536 pv, pMemToMapDarwin->Core.cb); 537 if (pMemDarwin) 538 { 539 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS; 540 pMemDarwin->pMemMap = pMemMap; 541 *ppMem = &pMemDarwin->Core; 542 return VINF_SUCCESS; 543 } 544 545 rc = VERR_NO_MEMORY; 693 //addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL); 694 //printf("pv=%p: %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr); 695 696 // /* 697 // * Explicitly lock it so that we're sure it is present and that 698 // * its PTEs cannot be recycled. 699 // * Note! withAddressRange() doesn't work as it adds kIOMemoryTypeVirtual64 700 // * to the options which causes prepare() to not wire the pages. 701 // * This is probably a bug. 702 // */ 703 // IOAddressRange Range = { (mach_vm_address_t)pv, cbSub }; 704 // IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withOptions(&Range, 705 // 1 /* count */, 706 // 0 /* offset */, 707 // kernel_task, 708 // kIODirectionInOut | kIOMemoryTypeVirtual, 709 // kIOMapperSystem); 710 // if (pMemDesc) 711 // { 712 // IOReturn IORet = pMemDesc->prepare(kIODirectionInOut); 713 // if (IORet == kIOReturnSuccess) 714 // { 715 /* HACK ALERT! */ 716 rtR0MemObjDarwinTouchPages(pv, cbSub); 717 /** @todo First, the memory should've been mapped by now, and second, it 718 * shouild have the wired attribute in the PTE (bit 9). Neither is 719 * seems to be the case. The disabled locking code doesn't make any 720 * difference, which is extremely odd, and breaks 721 * rtR0MemObjNativeGetPagePhysAddr (getPhysicalSegment64 -> 64 for the 722 * lock descriptor. */ 723 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL); 724 //printf("pv=%p: %8llx %8llx (%d)\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr, 2); 725 726 /* 727 * Create the IPRT memory object. 728 */ 729 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING, 730 pv, cbSub); 731 if (pMemDarwin) 732 { 733 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS; 734 pMemDarwin->pMemMap = pMemMap; 735 // pMemDarwin->pMemDesc = pMemDesc; 736 *ppMem = &pMemDarwin->Core; 737 return VINF_SUCCESS; 738 } 739 740 // pMemDesc->complete(); 741 // rc = VERR_NO_MEMORY; 742 // } 743 // else 744 // rc = RTErrConvertFromDarwinIO(IORet); 745 // pMemDesc->release(); 746 // } 747 // else 748 // rc = VERR_MEMOBJ_INIT_FAILED; 546 749 } 547 750 else … … 574 777 0 /* length */); 575 778 #else 576 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process, 0, 779 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process, 780 0, 577 781 kIOMapAnywhere | kIOMapDefaultCache); 578 782 #endif
Note:
See TracChangeset
for help on using the changeset viewer.