Changeset 14492 in vbox
- Timestamp:
- Nov 23, 2008 4:18:30 PM (16 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp
r14484 r14492 34 34 #include <iprt/memobj.h> 35 35 #include <iprt/mp.h> 36 #include <iprt/semaphore.h> 36 37 #include <iprt/spinlock.h> 37 #include <iprt/s emaphore.h>38 #include <iprt/string.h> 38 39 39 40 … … 79 80 uint16_t cPTs; 80 81 /** The memory objects for the page tables. */ 81 RTR0MEMOBJ ahMemObjPT [1];82 RTR0MEMOBJ ahMemObjPTs[1]; 82 83 } PGMR0DYNMAPSEG; 83 84 /** Pointer to a ring-0 dynamic mapping cache segment. */ … … 107 108 /** PTE pointer, PAE version. */ 108 109 PX86PTEPAE pPae; 110 /** PTE pointer, the void version. */ 111 void *pv; 109 112 } uPte; 110 113 /** CPUs that haven't invalidated this entry after it's last update. */ … … 159 162 160 163 164 /** 165 * Paging level data. 166 */ 167 typedef struct PGMR0DYNMAPPGLVL 168 { 169 uint32_t cLevels; /**< The number of levels. */ 170 struct 171 { 172 RTHCPHYS HCPhys; /**< The address of the page for the current level, 173 * i.e. what hMemObj/hMapObj is currently mapping. */ 174 RTHCPHYS fPhysMask; /**< Mask for extracting HCPhys from uEntry. */ 175 RTR0MEMOBJ hMemObj; /**< Memory object for HCPhys, PAGE_SIZE. */ 176 RTR0MEMOBJ hMapObj; /**< Mapping object for hMemObj. */ 177 uint32_t fPtrShift; /**< The pointer shift count. */ 178 uint64_t fPtrMask; /**< The mask to apply to the shifted pointer to get the table index. */ 179 uint64_t fAndMask; /**< And mask to check entry flags. */ 180 uint64_t fResMask; /**< The result from applying fAndMask. */ 181 union 182 { 183 void *pv; /**< hMapObj address. */ 184 PX86PGUINT paLegacy; /**< Legacy table view. */ 185 PX86PGPAEUINT paPae; /**< PAE/AMD64 table view. */ 186 } u; 187 } a[4]; 188 } PGMR0DYNMAPPGLVL; 189 /** Pointer to paging level data. */ 190 typedef PGMR0DYNMAPPGLVL *PPGMR0DYNMAPPGLVL; 191 192 161 193 /******************************************************************************* 162 194 * Global Variables * … … 439 471 440 472 /** 473 * Initializes the paging level data. 474 * 475 * @param pThis The dynamic mapping cache instance. 476 * @param pPgLvl The paging level data. 477 */ 478 void pgmR0DynMapPagingArrayInit(PPGMR0DYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl) 479 { 480 RTCCUINTREG cr4 = ASMGetCR4(); 481 switch (pThis->enmPgMode) 482 { 483 case SUPPAGINGMODE_32_BIT: 484 case SUPPAGINGMODE_32_BIT_GLOBAL: 485 pPgLvl->cLevels = 2; 486 pPgLvl->a[0].fPhysMask = X86_CR3_PAGE_MASK; 487 pPgLvl->a[0].fAndMask = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0); 488 pPgLvl->a[0].fResMask = X86_PDE_P | X86_PDE_RW; 489 pPgLvl->a[0].fPtrMask = X86_PD_MASK; 490 pPgLvl->a[0].fPtrShift = X86_PD_SHIFT; 491 492 pPgLvl->a[1].fPhysMask = X86_PDE_PG_MASK; 493 pPgLvl->a[1].fAndMask = X86_PTE_P | X86_PTE_RW; 494 pPgLvl->a[1].fResMask = X86_PTE_P | X86_PTE_RW; 495 pPgLvl->a[1].fPtrMask = X86_PT_MASK; 496 pPgLvl->a[1].fPtrShift = X86_PT_SHIFT; 497 break; 498 499 case SUPPAGINGMODE_PAE: 500 case SUPPAGINGMODE_PAE_GLOBAL: 501 case SUPPAGINGMODE_PAE_NX: 502 case SUPPAGINGMODE_PAE_GLOBAL_NX: 503 pPgLvl->cLevels = 3; 504 pPgLvl->a[0].fPhysMask = X86_CR3_PAE_PAGE_MASK; 505 pPgLvl->a[0].fPtrMask = X86_PDPT_MASK_PAE; 506 pPgLvl->a[0].fPtrShift = X86_PDPT_SHIFT; 507 pPgLvl->a[0].fAndMask = X86_PDPE_P; 508 pPgLvl->a[0].fResMask = X86_PDPE_P; 509 510 pPgLvl->a[1].fPhysMask = X86_PDPE_PG_MASK; 511 pPgLvl->a[1].fPtrMask = X86_PD_MASK; 512 pPgLvl->a[1].fPtrShift = X86_PD_SHIFT; 513 pPgLvl->a[1].fAndMask = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0); 514 pPgLvl->a[1].fResMask = X86_PDE_P | X86_PDE_RW; 515 516 pPgLvl->a[2].fPhysMask = X86_PDE_PAE_PG_MASK; 517 pPgLvl->a[2].fPtrMask = X86_PT_MASK; 518 pPgLvl->a[2].fPtrShift = X86_PT_SHIFT; 519 pPgLvl->a[2].fAndMask = X86_PTE_P | X86_PTE_RW; 520 pPgLvl->a[2].fResMask = X86_PTE_P | X86_PTE_RW; 521 break; 522 523 case SUPPAGINGMODE_AMD64: 524 case SUPPAGINGMODE_AMD64_GLOBAL: 525 case SUPPAGINGMODE_AMD64_NX: 526 case SUPPAGINGMODE_AMD64_GLOBAL_NX: 527 pPgLvl->cLevels = 3; 528 pPgLvl->a[0].fPhysMask = X86_CR3_AMD64_PAGE_MASK; 529 pPgLvl->a[0].fPtrMask = X86_PML4_MASK; 530 pPgLvl->a[0].fPtrShift = X86_PML4_SHIFT; 531 pPgLvl->a[0].fAndMask = X86_PML4E_P | X86_PML4E_RW; 532 pPgLvl->a[0].fResMask = X86_PML4E_P | X86_PML4E_RW; 533 534 pPgLvl->a[1].fPhysMask = X86_PML4E_PG_MASK; 535 pPgLvl->a[1].fPtrMask = X86_PDPT_MASK_AMD64; 536 pPgLvl->a[1].fPtrShift = X86_PDPT_SHIFT; 537 pPgLvl->a[1].fAndMask = X86_PDPE_P | X86_PDPE_RW /** @todo check for X86_PDPT_PS support. */; 538 pPgLvl->a[1].fResMask = X86_PDPE_P | X86_PDPE_RW; 539 540 pPgLvl->a[2].fPhysMask = X86_PDPE_PG_MASK; 541 pPgLvl->a[2].fPtrMask = X86_PD_MASK; 542 pPgLvl->a[2].fPtrShift = X86_PD_SHIFT; 543 pPgLvl->a[2].fAndMask = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0); 544 pPgLvl->a[2].fResMask = X86_PDE_P | X86_PDE_RW; 545 546 pPgLvl->a[3].fPhysMask = X86_PDE_PAE_PG_MASK; 547 pPgLvl->a[3].fPtrMask = X86_PT_MASK; 548 pPgLvl->a[3].fPtrShift = X86_PT_SHIFT; 549 pPgLvl->a[3].fAndMask = X86_PTE_P | X86_PTE_RW; 550 pPgLvl->a[3].fResMask = X86_PTE_P | X86_PTE_RW; 551 break; 552 553 default: 554 AssertFailed(); 555 pPgLvl->cLevels = 0; 556 break; 557 } 558 559 for (uint32_t i = 0; i < 4; i++) /* ASSUMING array size. */ 560 { 561 pPgLvl->a[i].HCPhys = NIL_RTHCPHYS; 562 pPgLvl->a[i].hMapObj = NIL_RTR0MEMOBJ; 563 pPgLvl->a[i].hMemObj = NIL_RTR0MEMOBJ; 564 pPgLvl->a[i].u.pv = NULL; 565 } 566 } 567 568 569 static int pgmR0DynMapPagingArrayMapPte(PPGMR0DYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl, void *pvPage, 570 PPGMR0DYNMAPSEG pSeg, uint32_t cMaxPTs, void **ppvPTE) 571 { 572 void *pvEntry = NULL; 573 X86PGPAEUINT uEntry = ASMGetCR3(); 574 for (uint32_t i = 0; i < pPgLvl->cLevels; i++) 575 { 576 RTHCPHYS HCPhys = uEntry & pPgLvl->a[i].fPhysMask; 577 if (pPgLvl->a[i].HCPhys != HCPhys) 578 { 579 /* 580 * Need to remap this level. 581 * The final level, the PT, will not be freed since that is what it's all about. 582 */ 583 ASMIntEnable(); 584 if (i + 1 == pPgLvl->cLevels) 585 AssertReturn(pSeg->cPTs < cMaxPTs, VERR_INTERNAL_ERROR); 586 else 587 { 588 int rc2 = RTR0MemObjFree(pPgLvl->a[i].hMemObj, true /* fFreeMappings */); AssertRC(rc2); 589 pPgLvl->a[i].hMemObj = pPgLvl->a[i].hMapObj = NIL_RTR0MEMOBJ; 590 } 591 592 int rc = RTR0MemObjEnterPhys(&pPgLvl->a[i].hMemObj, HCPhys, PAGE_SIZE); 593 if (RT_SUCCESS(rc)) 594 { 595 rc = RTR0MemObjMapKernel(&pPgLvl->a[i].hMapObj, pPgLvl->a[i].hMemObj, &pPgLvl->a[i].u.pv, 0, RTMEM_PROT_WRITE | RTMEM_PROT_READ); 596 if (RT_SUCCESS(rc)) 597 { 598 pPgLvl->a[i].HCPhys = HCPhys; 599 if (i + 1 == pPgLvl->cLevels) 600 pSeg->ahMemObjPTs[pSeg->cPTs++] = pPgLvl->a[i].hMemObj; 601 ASMIntDisable(); 602 return VERR_TRY_AGAIN; 603 } 604 605 pPgLvl->a[i].hMapObj = NIL_RTR0MEMOBJ; 606 } 607 else 608 pPgLvl->a[i].hMemObj = NIL_RTR0MEMOBJ; 609 pPgLvl->a[i].HCPhys = NIL_RTHCPHYS; 610 AssertReturn(rc != VERR_TRY_AGAIN, VERR_INTERNAL_ERROR); 611 return rc; 612 } 613 614 /* 615 * The next level. 616 */ 617 uint32_t iEntry = ((uintptr_t)pvPage >> pPgLvl->a[i].fPtrShift) & pPgLvl->a[i].fPtrMask; 618 if (pThis->fLegacyMode) 619 { 620 pvEntry = &pPgLvl->a[i].u.paLegacy[iEntry]; 621 uEntry = pPgLvl->a[i].u.paLegacy[iEntry]; 622 } 623 else 624 { 625 pvEntry = &pPgLvl->a[i].u.paPae[iEntry]; 626 uEntry = pPgLvl->a[i].u.paPae[iEntry]; 627 } 628 629 if ((uEntry & pPgLvl->a[i].fAndMask) != pPgLvl->a[i].fResMask) 630 { 631 LogRel(("PGMR0DynMap: internal error - iPgLvl=%u cLevels=%u uEntry=%#llx fAnd=%#llx fRes=%#llx got=%#llx\n", 632 i, pPgLvl->cLevels, uEntry, pPgLvl->a[i].fAndMask, pPgLvl->a[i].fResMask, uEntry & pPgLvl->a[i].fAndMask)); 633 return VERR_INTERNAL_ERROR; 634 } 635 } 636 637 /* made it thru without needing to remap anything. */ 638 *ppvPTE = pvEntry; 639 return VINF_SUCCESS; 640 } 641 642 643 /** 441 644 * Adds a new segment of the specified size. 442 645 * … … 447 650 static int pgmR0DynMapAddSeg(PPGMR0DYNMAP pThis, uint32_t cPages) 448 651 { 449 #if 0450 652 int rc2; 653 AssertReturn(ASMGetFlags() & X86_EFL_IF, VERR_PREEMPT_DISABLED); 451 654 452 655 /* 453 656 * Do the array rellocation first. 454 * (T oo lazy to clean these up on failure.)455 */ 456 void *pv = RTMemRealloc(pThis->paPages, sizeof(pThis->paPages[0]) * (pThis->cPages + cPages));457 if (!pv )657 * (The pages array has to be replaced behind the spinlock of course.) 658 */ 659 void *pvSavedPTEs = RTMemRealloc(pThis->pvSavedPTEs, (pThis->fLegacyMode ? sizeof(X86PGUINT) : sizeof(X86PGPAEUINT)) * (pThis->cPages + cPages)); 660 if (!pvSavedPTEs) 458 661 return VERR_NO_MEMORY; 459 pThis->paPages = (PPGMR0DYNMAPENTRY)pv; 460 461 pv = RTMemRealloc(pThis->pvSavedPTEs, (pThis->fLegacyMode ? sizeof(X86PGUINT) : sizeof(X86PGPAEUINT)) * (pThis->cPages + cPages)); 462 if (!pv) 662 pThis->pvSavedPTEs = pvSavedPTEs; 663 664 void *pvPages = RTMemAllocZ(sizeof(pThis->paPages[0]) * (pThis->cPages + cPages)); 665 if (!pvPages) 666 { 667 pvSavedPTEs = RTMemRealloc(pThis->pvSavedPTEs, (pThis->fLegacyMode ? sizeof(X86PGUINT) : sizeof(X86PGPAEUINT)) * pThis->cPages); 668 if (pvSavedPTEs) 669 pThis->pvSavedPTEs = pvSavedPTEs; 463 670 return VERR_NO_MEMORY; 464 pThis->pvSavedPTEs = pv; 671 } 672 673 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; 674 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); 675 676 memcpy(pvPages, pThis->paPages, sizeof(pThis->paPages[0]) * pThis->cPages); 677 void *pvToFree = pThis->paPages; 678 pThis->paPages = (PPGMR0DYNMAPENTRY)pvPages; 679 680 RTSpinlockRelease(pThis->hSpinlock, &Tmp); 681 RTMemFree(pvToFree); 465 682 466 683 /* 467 684 * Allocate the segment structure and pages memory. 468 685 */ 469 uint32_t c PTs = cPages / (pThis->fLegacyMode ? X86_PG_ENTRIES : X86_PG_PAE_ENTRIES) + 2;470 PPGMR0DYNMAPSEG pSeg = RTMemAllocZ(RT_UOFFSETOF(PGMR0DYNMAPSEG, ahMemObjPTs[cPTs]));686 uint32_t cMaxPTs = cPages / (pThis->fLegacyMode ? X86_PG_ENTRIES : X86_PG_PAE_ENTRIES) + 2; 687 PPGMR0DYNMAPSEG pSeg = (PPGMR0DYNMAPSEG)RTMemAllocZ(RT_UOFFSETOF(PGMR0DYNMAPSEG, ahMemObjPTs[cMaxPTs])); 471 688 if (!pSeg) 472 689 return VERR_NO_MEMORY; … … 478 695 if (RT_SUCCESS(rc)) 479 696 { 697 uint8_t *pbPage = (uint8_t *)RTR0MemObjAddress(pSeg->hMemObj); 698 AssertMsg(VALID_PTR(pbPage) && !((uintptr_t)pbPage & PAGE_OFFSET_MASK), ("%p\n", pbPage)); 699 480 700 /* 481 * Walk th e paging hierarchy and map the relevant page tables.701 * Walk thru the pages and set them up with a mapping of their PTE and everything. 482 702 */ 483 uint8_t *pbPage = RTR0MemObjAddress(pSeg->hMemObj); 484 AssertMsg(VALID_PTR(pbPage) && !((uintptr_t)pbPage & PAGE_OFFSET_MASK), ("%p\n", pbPage)); 485 uint32_t iPage = pThis->cPages; 486 uint32_t iEndPage = iPage + cPages; 487 struct 488 { 489 RTHCPHYS HCPhys; /**< The entry that's currently mapped */ 490 RTHCPHYS fPhysMask; /**< Mask for extracting HCPhys from uEntry. */ 491 RTR0MEMOBJ hMemObj; 492 RTR0MEMOBJ hMapObj; 493 uint64_t fPtrMask; 494 uint32_t fPtrShift; 495 uint64_t fAndMask; 496 uint64_t fResMask; 497 union 498 { 499 void *pv; 500 } u; 501 } a[4]; 502 RTCCUINTREG cr4 = ASMGetCR4(); 503 uint32_t cLevels; 504 switch (pThis->enmPgMode) 505 { 506 case SUPPAGINGMODE_32_BIT: 507 case SUPPAGINGMODE_32_BIT_GLOBAL: 508 cLevels = 2; 509 a[0].fAndMask = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0); 510 a[0].fResMask = X86_PDE_P | X86_PDE_RW; 511 a[0].fPtrMask = X86_PD_MASK; 512 a[0].fPtrShift = X86_PD_SHIFT; 513 a[1].fAndMask = X86_PTE_P | X86_PTE_RW; 514 a[1].fResMask = X86_PTE_P | X86_PTE_RW; 515 a[1].fPtrMask = X86_PT_MASK; 516 a[1].fPtrShift = X86_PT_SHIFT; 517 break; 518 519 case SUPPAGINGMODE_PAE: 520 case SUPPAGINGMODE_PAE_GLOBAL: 521 case SUPPAGINGMODE_PAE_NX: 522 case SUPPAGINGMODE_PAE_GLOBAL_NX: 523 cLevels = 3; 524 a[0].fAndMask = X86_PDPE_P; 525 a[0].fResMask = X86_PDPE_P; 526 a[0].fPtrMask = X86_PDPT_MASK_PAE; 527 a[0].fPtrShift = X86_PDPT_SHIFT; 528 a[1].fAndMask = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0); 529 a[1].fResMask = X86_PDE_P | X86_PDE_RW; 530 a[1].fPtrMask = X86_PD_MASK; 531 a[1].fPtrShift = X86_PD_SHIFT; 532 a[2].fAndMask = X86_PTE_P | X86_PTE_RW; 533 a[2].fResMask = X86_PTE_P | X86_PTE_RW; 534 a[2].fPtrMask = X86_PT_MASK; 535 a[2].fPtrShift = X86_PT_SHIFT; 536 break; 537 538 case SUPPAGINGMODE_AMD64: 539 case SUPPAGINGMODE_AMD64_GLOBAL: 540 case SUPPAGINGMODE_AMD64_NX: 541 case SUPPAGINGMODE_AMD64_GLOBAL_NX: 542 cLevels = 3; 543 a[0].fAndMask = X86_PML4E_P | X86_PML4E_RW; 544 a[0].fResMask = X86_PML4E_P | X86_PML4E_RW; 545 a[0].fPtrMask = X86_PML4_MASK; 546 a[0].fPtrShift = X86_PML4_SHIFT; 547 a[1].fAndMask = X86_PDPE_P | X86_PDPE_RW /** @todo check for X86_PDPT_PS support. */; 548 a[1].fResMask = X86_PDPE_P | X86_PDPE_RW; 549 a[1].fPtrMask = X86_PDPT_MASK_AMD64; 550 a[1].fPtrShift = X86_PDPT_SHIFT; 551 a[2].fAndMask = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0); 552 a[2].fResMask = X86_PDE_P | X86_PDE_RW; 553 a[2].fPtrMask = X86_PD_MASK; 554 a[2].fPtrShift = X86_PD_SHIFT; 555 a[3].fAndMask = X86_PTE_P | X86_PTE_RW; 556 a[3].fResMask = X86_PTE_P | X86_PTE_RW; 557 a[3].fPtrMask = X86_PT_MASK; 558 a[3].fPtrShift = X86_PT_SHIFT; 559 break; 560 default: 561 cLevels = 0; 562 break; 563 } 564 for (uint32_t i = 0; i < RT_ELEMENTS(a); i++) 565 { 566 a[i].HCPhys = NIL_RTHCPHYS; 567 a[i].hMapObj = a[i].hMemObj = NIL_RTR0MEMOBJ; 568 a[i].u.pv = NULL; 569 } 570 571 for (; iPage < iEndPage && RT_SUCCESS(rc); iPage++, pbPage += PAGE_SIZE) 572 { 573 /* Initialize it */ 703 ASMIntDisable(); 704 PGMR0DYNMAPPGLVL PgLvl; 705 pgmR0DynMapPagingArrayInit(pThis, &PgLvl); 706 uint32_t iEndPage = pThis->cPages + cPages; 707 for (uint32_t iPage = pThis->cPages; 708 iPage < iEndPage; 709 iPage++, pbPage += PAGE_SIZE) 710 { 711 /* Initialize the page data. */ 574 712 pThis->paPages[iPage].HCPhys = NIL_RTHCPHYS; 575 713 pThis->paPages[iPage].pvPage = pbPage; 576 714 pThis->paPages[iPage].cRefs = 0; 577 pThis->paPages[iPage].uPte.pPae = NULL;715 pThis->paPages[iPage].uPte.pPae = 0; 578 716 RTCpuSetFill(&pThis->paPages[iPage].PendingSet); 579 717 718 /* Map its page table, retry until we've got a clean run (paranoia). */ 719 do 720 rc = pgmR0DynMapPagingArrayMapPte(pThis, &PgLvl, pbPage, pSeg, cMaxPTs, 721 &pThis->paPages[iPage].uPte.pv); 722 while (rc == VERR_TRY_AGAIN); 723 if (RT_FAILURE(rc)) 724 break; 725 rc = VINF_SUCCESS; 726 727 /* Save the PTE. */ 728 if (pThis->fLegacyMode) 729 ((PX86PGUINT)pThis->pvSavedPTEs)[iPage] = pThis->paPages[iPage].uPte.pLegacy->u; 730 else 731 ((PX86PGPAEUINT)pThis->pvSavedPTEs)[iPage] = pThis->paPages[iPage].uPte.pPae->u; 732 } /* for each page */ 733 ASMIntEnable(); 734 735 /* cleanup non-PT mappings */ 736 for (uint32_t i = 0; i < PgLvl.cLevels - 1; i++) 737 RTR0MemObjFree(PgLvl.a[i].hMemObj, true /* fFreeMappings */); 738 739 if (RT_SUCCESS(rc)) 740 { 741 /** @todo setup guard pages here later (strict builds should leave every 742 * second page and the start/end pages not present). */ 743 580 744 /* 581 * Map its page table. 582 * 583 * This is a bit ASSUMPTIVE, it should really do a clean run thru 584 * the tables everything something was mapped and disable preemption 585 * or/and interrupts. 745 * Commit it by adding the segment to the list and updating the page count. 586 746 */ 587 X86PGPAEUINT uEntry = ASMGetCR3(); 588 for (unsigned i = 0; i < cLevels && RT_SUCCESS(rc); i++) 589 { 590 RTHCPHYS HCPhys = uEntry & a[i].fPhysMask; 591 if (a[i].HCPhys != HCPhys) 592 { 593 if (i + 1 != cLevels) 594 { 595 RTR0MemObjFree(a[i].hMemObj, true /* fFreeMappings */); 596 a[i].hMemObj = a[i].hMapObj = NIL_RTR0MEMOBJ; 597 } 598 rc = RTR0MemObjEnterPhys(&a[i].hMemObj, HCPhys, PAGE_SIZE); 599 if (RT_SUCCESS(rc)) 600 rc = RTR0MemObjMapKernel(&a[i].hMapObj, a[i].hMemObj, &a[i].u.pv, 0, RTMEM_PROT_WRITE | RTMEM_PROT_READ); 601 if (RT_FAILURE(rc)) 602 break; 603 } 604 605 } 606 607 608 } /* for each page */ 609 610 for (iPage = 0; i < cLevels; ) 611 612 rc2 = RTR0MemObjFree(hMemObjCR3, true /* fFreeMappings */); AssertRC(rc2); 613 614 rc2 = RTR0MemObjFree(pSeg->hMemObj, true /* fFreeMappings */); AssertRC(rc2); 747 pSeg->pNext = pThis->pSegHead; 748 pThis->pSegHead = pSeg; 749 pThis->cPages += cPages; 750 return VINF_SUCCESS; 751 } 752 753 /* 754 * Bail out. 755 */ 756 while (pSeg->cPTs-- > 0) 757 { 758 rc2 = RTR0MemObjFree(pSeg->ahMemObjPTs[pSeg->cPTs], true /* fFreeMappings */); 759 AssertRC(rc2); 760 pSeg->ahMemObjPTs[pSeg->cPTs] = NIL_RTR0MEMOBJ; 761 } 762 763 rc2 = RTR0MemObjFree(pSeg->hMemObj, true /* fFreeMappings */); 764 AssertRC(rc2); 765 pSeg->hMemObj = NIL_RTR0MEMOBJ; 615 766 } 616 767 RTMemFree(pSeg); 768 769 /* Don't bother resizing the arrays, too layz. */ 617 770 return rc; 618 #else619 return VERR_NOT_IMPLEMENTED;620 #endif621 771 } 622 772 … … 740 890 while (iPT-- > 0) 741 891 { 742 rc = RTR0MemObjFree(pSeg->ahMemObjPT [iPT], true /* fFreeMappings */); AssertRC(rc);743 pSeg->ahMemObjPT [iPT] = NIL_RTR0MEMOBJ;892 rc = RTR0MemObjFree(pSeg->ahMemObjPTs[iPT], true /* fFreeMappings */); AssertRC(rc); 893 pSeg->ahMemObjPTs[iPT] = NIL_RTR0MEMOBJ; 744 894 } 745 895 rc = RTR0MemObjFree(pSeg->hMemObj, true /* fFreeMappings */); AssertRC(rc);
Note:
See TracChangeset
for help on using the changeset viewer.