Changeset 86704 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Oct 26, 2020 12:04:05 PM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 141088
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/DBGFR0Bp.cpp
r86700 r86704 62 62 } 63 63 64 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->dbgfr0.s.aBpL2TblChunks); i++) 65 { 66 PDBGFBPL2TBLCHUNKR0 pL2Chunk = &pGVM->dbgfr0.s.aBpL2TblChunks[i]; 67 68 pL2Chunk->hMemObj = NIL_RTR0MEMOBJ; 69 pL2Chunk->hMapObj = NIL_RTR0MEMOBJ; 70 //pL2Chunk->paBpL2TblBaseSharedR0 = NULL; 71 } 72 64 73 pGVM->dbgfr0.s.hMemObjBpLocL1 = NIL_RTR0MEMOBJ; 65 74 //pGVM->dbgfr0.s.paBpLocL1R0 = NULL; … … 112 121 } 113 122 123 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->dbgfr0.s.aBpL2TblChunks); i++) 124 { 125 PDBGFBPL2TBLCHUNKR0 pL2Chunk = &pGVM->dbgfr0.s.aBpL2TblChunks[i]; 126 127 if (pL2Chunk->hMemObj != NIL_RTR0MEMOBJ) 128 { 129 Assert(pL2Chunk->hMapObj != NIL_RTR0MEMOBJ); 130 131 pL2Chunk->paBpL2TblBaseSharedR0 = NULL; 132 133 hMemObj = pL2Chunk->hMapObj; 134 pL2Chunk->hMapObj = NIL_RTR0MEMOBJ; 135 RTR0MemObjFree(hMemObj, true); 136 137 hMemObj = pL2Chunk->hMemObj; 138 pL2Chunk->hMemObj = NIL_RTR0MEMOBJ; 139 RTR0MemObjFree(hMemObj, true); 140 } 141 } 142 114 143 pGVM->dbgfr0.s.fInit = false; 115 144 } … … 128 157 Assert(!pBpChunk->paBpBaseSharedR0); 129 158 Assert(!pBpChunk->paBpBaseR0Only); 159 } 160 161 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->dbgfr0.s.aBpL2TblChunks); i++) 162 { 163 PDBGFBPL2TBLCHUNKR0 pL2Chunk = &pGVM->dbgfr0.s.aBpL2TblChunks[i]; 164 165 Assert(pL2Chunk->hMemObj == NIL_RTR0MEMOBJ); 166 Assert(pL2Chunk->hMapObj == NIL_RTR0MEMOBJ); 167 Assert(!pL2Chunk->paBpL2TblBaseSharedR0); 130 168 } 131 169 } … … 239 277 240 278 /** 279 * Worker for DBGFR0BpL2TblChunkAllocReqHandler() that does the actual chunk allocation. 280 * 281 * @returns VBox status code. 282 * @param pGVM The global (ring-0) VM structure. 283 * @param idChunk The chunk ID to allocate. 284 * @param ppL2ChunkBaseR3 Where to return the ring-3 chunk base address on success. 285 * @thread EMT(0) 286 */ 287 static int dbgfR0BpL2TblChunkAllocWorker(PGVM pGVM, uint32_t idChunk, R3PTRTYPE(void *) *ppL2ChunkBaseR3) 288 { 289 /* 290 * Figure out how much memory we need for the chunk and allocate it. 291 */ 292 uint32_t const cbTotal = RT_ALIGN_32(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK * sizeof(DBGFBPL2ENTRY), PAGE_SIZE); 293 294 RTR0MEMOBJ hMemObj; 295 int rc = RTR0MemObjAllocPage(&hMemObj, cbTotal, false /*fExecutable*/); 296 if (RT_FAILURE(rc)) 297 return rc; 298 RT_BZERO(RTR0MemObjAddress(hMemObj), cbTotal); 299 300 /* Map it. */ 301 RTR0MEMOBJ hMapObj; 302 rc = RTR0MemObjMapUserEx(&hMapObj, hMemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, RTR0ProcHandleSelf(), 303 0 /*offSub*/, cbTotal); 304 if (RT_SUCCESS(rc)) 305 { 306 PDBGFBPL2TBLCHUNKR0 pL2ChunkR0 = &pGVM->dbgfr0.s.aBpL2TblChunks[idChunk]; 307 308 pL2ChunkR0->hMemObj = hMemObj; 309 pL2ChunkR0->hMapObj = hMapObj; 310 pL2ChunkR0->paBpL2TblBaseSharedR0 = (PDBGFBPL2ENTRY)RTR0MemObjAddress(hMemObj); 311 312 /* 313 * We're done. 314 */ 315 *ppL2ChunkBaseR3 = RTR0MemObjAddressR3(hMapObj); 316 return rc; 317 } 318 319 RTR0MemObjFree(hMemObj, true); 320 return rc; 321 } 322 323 324 /** 241 325 * Used by ring-3 DBGF to fully initialize the breakpoint manager for operation. 242 326 * … … 293 377 } 294 378 379 380 /** 381 * Used by ring-3 DBGF to allocate a given chunk in the global L2 lookup table. 382 * 383 * @returns VBox status code. 384 * @param pGVM The global (ring-0) VM structure. 385 * @param pReq Pointer to the request buffer. 386 * @thread EMT(0) 387 */ 388 VMMR0_INT_DECL(int) DBGFR0BpL2TblChunkAllocReqHandler(PGVM pGVM, PDBGFBPL2TBLCHUNKALLOCREQ pReq) 389 { 390 LogFlow(("DBGFR0BpL2TblChunkAllocReqHandler:\n")); 391 392 /* 393 * Validate the request. 394 */ 395 AssertReturn(pReq->Hdr.cbReq == sizeof(*pReq), VERR_INVALID_PARAMETER); 396 397 uint32_t const idChunk = pReq->idChunk; 398 AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, VERR_INVALID_PARAMETER); 399 400 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0); 401 AssertRCReturn(rc, rc); 402 403 AssertReturn(pGVM->dbgfr0.s.fInit, VERR_WRONG_ORDER); 404 AssertReturn(pGVM->dbgfr0.s.aBpL2TblChunks[idChunk].hMemObj == NIL_RTR0MEMOBJ, VERR_INVALID_PARAMETER); 405 406 return dbgfR0BpL2TblChunkAllocWorker(pGVM, idChunk, &pReq->pChunkBaseR3); 407 } 408 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r86699 r86704 2287 2287 break; 2288 2288 } 2289 2290 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC: 2291 { 2292 if (!pReqHdr || u64Arg || idCpu != 0) 2293 return VERR_INVALID_PARAMETER; 2294 rc = DBGFR0BpL2TblChunkAllocReqHandler(pGVM, (PDBGFBPL2TBLCHUNKALLOCREQ)pReqHdr); 2295 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2296 break; 2297 } 2289 2298 #endif 2290 2299 … … 2395 2404 case VMMR0_DO_DBGF_BP_INIT: 2396 2405 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC: 2406 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC: 2397 2407 #endif 2398 2408 { -
trunk/src/VBox/VMM/VMMR3/DBGFR3Bp.cpp
r86701 r86704 201 201 } 202 202 203 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++) 204 { 205 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i]; 206 207 //pL2Chunk->pL2BaseR3 = NULL; 208 //pL2Chunk->pbmAlloc = NULL; 209 //pL2Chunk->cFree = 0; 210 pL2Chunk->idChunk = DBGF_BP_CHUNK_ID_INVALID; /* Not allocated. */ 211 } 212 203 213 //pUVM->dbgf.s.paBpLocL1R3 = NULL; 204 return VINF_SUCCESS; 214 pUVM->dbgf.s.hMtxBpL2Wr = NIL_RTSEMFASTMUTEX; 215 return RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxBpL2Wr); 205 216 } 206 217 … … 226 237 pBpChunk->idChunk = DBGF_BP_CHUNK_ID_INVALID; 227 238 } 239 } 240 241 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++) 242 { 243 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i]; 244 245 if (pL2Chunk->idChunk != DBGF_BP_CHUNK_ID_INVALID) 246 { 247 AssertPtr(pL2Chunk->pbmAlloc); 248 RTMemFree((void *)pL2Chunk->pbmAlloc); 249 pL2Chunk->pbmAlloc = NULL; 250 pL2Chunk->idChunk = DBGF_BP_CHUNK_ID_INVALID; 251 } 252 } 253 254 if (pUVM->dbgf.s.hMtxBpL2Wr != NIL_RTSEMFASTMUTEX) 255 { 256 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxBpL2Wr); 257 pUVM->dbgf.s.hMtxBpL2Wr = NIL_RTSEMFASTMUTEX; 228 258 } 229 259 … … 563 593 564 594 /** 595 * @callback_method_impl{FNVMMEMTRENDEZVOUS} 596 */ 597 static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpL2TblChunkAllocEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser) 598 { 599 uint32_t idChunk = (uint32_t)(uintptr_t)pvUser; 600 601 VMCPU_ASSERT_EMT(pVCpu); 602 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE); 603 604 AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, VERR_DBGF_BP_IPE_1); 605 606 PUVM pUVM = pVM->pUVM; 607 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk]; 608 609 AssertReturn( pL2Chunk->idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID 610 || pL2Chunk->idChunk == idChunk, 611 VERR_DBGF_BP_IPE_2); 612 613 /* 614 * The initialization will be done on EMT(0). It is possible that multiple 615 * allocation attempts are done when multiple racing non EMT threads try to 616 * allocate a breakpoint and a new chunk needs to be allocated. 617 * Ignore the request and succeed if the chunk is allocated meaning that a 618 * previous rendezvous successfully allocated the chunk. 619 */ 620 int rc = VINF_SUCCESS; 621 if ( pVCpu->idCpu == 0 622 && pL2Chunk->idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID) 623 { 624 /* Allocate the bitmap first so we can skip calling into VMMR0 if it fails. */ 625 AssertCompile(!(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK % 8)); 626 volatile void *pbmAlloc = RTMemAllocZ(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK / 8); 627 if (RT_LIKELY(pbmAlloc)) 628 { 629 DBGFBPL2TBLCHUNKALLOCREQ Req; 630 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC; 631 Req.Hdr.cbReq = sizeof(Req); 632 Req.idChunk = idChunk; 633 Req.pChunkBaseR3 = NULL; 634 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC, 0 /*u64Arg*/, &Req.Hdr); 635 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC failed: %Rrc\n", rc)); 636 if (RT_SUCCESS(rc)) 637 { 638 pL2Chunk->pL2BaseR3 = (PDBGFBPL2ENTRY)Req.pChunkBaseR3; 639 pL2Chunk->pbmAlloc = pbmAlloc; 640 pL2Chunk->cFree = DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK; 641 pL2Chunk->idChunk = idChunk; 642 return VINF_SUCCESS; 643 } 644 645 RTMemFree((void *)pbmAlloc); 646 } 647 else 648 rc = VERR_NO_MEMORY; 649 } 650 651 return rc; 652 } 653 654 655 /** 656 * Tries to allocate the given L2 table chunk which requires an EMT rendezvous. 657 * 658 * @returns VBox status code. 659 * @param pUVM The user mode VM handle. 660 * @param idChunk The chunk to allocate. 661 * 662 * @thread Any thread. 663 */ 664 DECLINLINE(int) dbgfR3BpL2TblChunkAlloc(PUVM pUVM, uint32_t idChunk) 665 { 666 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpL2TblChunkAllocEmtWorker, (void *)(uintptr_t)idChunk); 667 } 668 669 670 /** 671 * Tries to allocate a new breakpoint of the given type. 672 * 673 * @returns VBox status code. 674 * @param pUVM The user mode VM handle. 675 * @param pidxL2Tbl Where to return the L2 table entry index on success. 676 * @param ppL2TblEntry Where to return the pointer to the L2 table entry on success. 677 * 678 * @thread Any thread. 679 */ 680 static int dbgfR3BpL2TblEntryAlloc(PUVM pUVM, uint32_t *pidxL2Tbl, PDBGFBPL2ENTRY *ppL2TblEntry) 681 { 682 /* 683 * Search for a chunk having a free entry, allocating new chunks 684 * if the encountered ones are full. 685 * 686 * This can be called from multiple threads at the same time so special care 687 * has to be taken to not require any locking here. 688 */ 689 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++) 690 { 691 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i]; 692 693 uint32_t idChunk = ASMAtomicReadU32(&pL2Chunk->idChunk); 694 if (idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID) 695 { 696 int rc = dbgfR3BpL2TblChunkAlloc(pUVM, i); 697 if (RT_FAILURE(rc)) 698 { 699 LogRel(("DBGF/Bp: Allocating new breakpoint L2 lookup table chunk failed with %Rrc\n", rc)); 700 break; 701 } 702 703 idChunk = ASMAtomicReadU32(&pL2Chunk->idChunk); 704 Assert(idChunk == i); 705 } 706 707 /** @todo Optimize with some hinting if this turns out to be too slow. */ 708 for (;;) 709 { 710 uint32_t cFree = ASMAtomicReadU32(&pL2Chunk->cFree); 711 if (cFree) 712 { 713 /* 714 * Scan the associated bitmap for a free entry, if none can be found another thread 715 * raced us and we go to the next chunk. 716 */ 717 int32_t iClr = ASMBitFirstClear(pL2Chunk->pbmAlloc, DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK); 718 if (iClr != -1) 719 { 720 /* 721 * Try to allocate, we could get raced here as well. In that case 722 * we try again. 723 */ 724 if (!ASMAtomicBitTestAndSet(pL2Chunk->pbmAlloc, iClr)) 725 { 726 /* Success, immediately mark as allocated, initialize the breakpoint state and return. */ 727 ASMAtomicDecU32(&pL2Chunk->cFree); 728 729 PDBGFBPL2ENTRY pL2Entry = &pL2Chunk->pL2BaseR3[iClr]; 730 731 *pidxL2Tbl = DBGF_BP_L2_IDX_CREATE(idChunk, iClr); 732 *ppL2TblEntry = pL2Entry; 733 return VINF_SUCCESS; 734 } 735 /* else Retry with another spot. */ 736 } 737 else /* no free entry in bitmap, go to the next chunk */ 738 break; 739 } 740 else /* !cFree, go to the next chunk */ 741 break; 742 } 743 } 744 745 return VERR_DBGF_NO_MORE_BP_SLOTS; 746 } 747 748 749 /** 750 * Frees the given breakpoint handle. 751 * 752 * @returns nothing. 753 * @param pUVM The user mode VM handle. 754 * @param idxL2Tbl The L2 table index to free. 755 * @param pL2TblEntry The L2 table entry pointer to free. 756 */ 757 static void dbgfR3BpL2TblEntryFree(PUVM pUVM, uint32_t idxL2Tbl, PDBGFBPL2ENTRY pL2TblEntry) 758 { 759 uint32_t idChunk = DBGF_BP_L2_IDX_GET_CHUNK_ID(idxL2Tbl); 760 uint32_t idxEntry = DBGF_BP_L2_IDX_GET_ENTRY(idxL2Tbl); 761 762 AssertReturnVoid(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT); 763 AssertReturnVoid(idxEntry < DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK); 764 765 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk]; 766 AssertPtrReturnVoid(pL2Chunk->pbmAlloc); 767 AssertReturnVoid(ASMBitTest(pL2Chunk->pbmAlloc, idxEntry)); 768 769 memset(pL2TblEntry, 0, sizeof(*pL2TblEntry)); 770 771 ASMAtomicBitClear(pL2Chunk->pbmAlloc, idxEntry); 772 ASMAtomicIncU32(&pL2Chunk->cFree); 773 } 774 775 776 /** 565 777 * Sets the enabled flag of the given breakpoint to the given value. 566 778 * … … 881 1093 case DBGFBPTYPE_INT3: 882 1094 { 883 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);884 885 1095 /* 886 1096 * Check that the current byte is the int3 instruction, and restore the original one. … … 896 1106 { 897 1107 ASMAtomicDecU32(&pVM->dbgf.s.cEnabledInt3Breakpoints); 1108 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/); 898 1109 Log(("DBGF: Removed breakpoint at %RGv (Phys %RGp)\n", pBp->Pub.u.Int3.GCPtr, pBp->Pub.u.Int3.PhysAddr)); 899 1110 } 900 1111 } 901 902 if (RT_FAILURE(rc))903 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);904 905 1112 break; 906 1113 } -
trunk/src/VBox/VMM/VMMRZ/DBGFRZ.cpp
r86701 r86704 238 238 239 239 /** @todo Owner handling. */ 240 RT_NOREF(pVM, pRegFrame, pBpR0); 240 241 241 242 LogFlow(("dbgfRZBpHit: hit breakpoint %u at %04x:%RGv cHits=0x%RX64\n", -
trunk/src/VBox/VMM/include/DBGFInternal.h
r86701 r86704 56 56 57 57 #ifdef VBOX_WITH_LOTS_OF_DBGF_BPS 58 /** @name Breakpointhandling defines.58 /** @name Global breakpoint table handling defines. 59 59 * @{ */ 60 60 /** Maximum number of breakpoints supported (power of two). */ … … 66 66 /** Number of chunks required to support all breakpoints. */ 67 67 #define DBGF_BP_CHUNK_COUNT (DBGF_BP_COUNT_MAX / DBGF_BP_COUNT_PER_CHUNK) 68 /** @} */ 69 70 /** @name L2 lookup table limit defines. 71 * @{ */ 72 /** Maximum number of entreis in the L2 lookup table. */ 73 #define DBGF_BP_L2_TBL_ENTRY_COUNT_MAX _512K 74 /** Number of L2 entries handled in one chunk. */ 75 #define DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK _64K 76 /** Number of chunks required tp support all L2 lookup table entries. */ 77 #define DBGF_BP_L2_TBL_CHUNK_COUNT (DBGF_BP_L2_TBL_ENTRY_COUNT_MAX / DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK) 68 78 /** @} */ 69 79 #endif … … 916 926 /** Pointer to a breakpoint table chunk - Ring-0 Ptr. */ 917 927 typedef R0PTRTYPE(DBGFBPCHUNKR0 *) PDBGFBPCHUNKR0; 928 929 930 /** 931 * L2 lookup table entry. 932 * 933 * @remark The order of the members matters to be able to atomically update 934 * the AVL left/right pointers and depth with a single 64bit atomic write. 935 * @verbatim 936 * 7 6 5 4 3 2 1 0 937 * +--------+--------+--------+--------+--------+--------+--------+--------+ 938 * | hBp[15:0] | GCPtrKey[63:16] | 939 * +--------+--------+--------+--------+--------+--------+--------+--------+ 940 * | hBp[27:16] | iDepth | idxRight[21:0] | idxLeft[21:0] | 941 * +--------+--------+--------+--------+--------+--------+--------+--------+ 942 * \_8 bits_/ 943 * @endverbatim 944 */ 945 typedef struct DBGFBPL2ENTRY 946 { 947 /** The upper 6 bytes of the breakpoint address and the low 16 bits of the breakpoint handle. */ 948 volatile uint64_t u64GCPtrKeyAndBpHnd1; 949 /** Left/right lower index, tree depth and remaining 12 bits of the breakpoint handle. */ 950 volatile uint64_t u64LeftRightIdxDepthBpHnd2; 951 } DBGFBPL2ENTRY; 952 AssertCompileSize(DBGFBPL2ENTRY, 16); 953 /** Pointer to a L2 lookup table entry. */ 954 typedef DBGFBPL2ENTRY *PDBGFBPL2ENTRY; 955 /** Pointer to a const L2 lookup table entry. */ 956 typedef const DBGFBPL2ENTRY *PCDBGFBPL2ENTRY; 957 958 /** An invalid breakpoint chunk ID. */ 959 #define DBGF_BP_L2_IDX_CHUNK_ID_INVALID UINT32_MAX 960 /** Generates a unique breakpoint handle from the given chunk ID and entry inside the chunk. */ 961 #define DBGF_BP_L2_IDX_CREATE(a_idChunk, a_idEntry) RT_MAKE_U32(a_idEntry, a_idChunk); 962 /** Returns the chunk ID from the given breakpoint handle. */ 963 #define DBGF_BP_L2_IDX_GET_CHUNK_ID(a_idxL2) ((uint32_t)RT_HI_U16(a_idxL2)) 964 /** Returns the entry index inside a chunk from the given breakpoint handle. */ 965 #define DBGF_BP_L2_IDX_GET_ENTRY(a_idxL2) ((uint32_t)RT_LO_U16(a_idxL2)) 966 967 /** Number of bits for the left/right index pointers. */ 968 #define DBGF_BP_L2_ENTRY_LEFT_RIGHT_IDX_BITS 22 969 /** Index mask. */ 970 #define DBGF_BP_L2_ENTRY_LEFT_RIGHT_IDX_MASK (RT_BIT_32(DBGF_BP_L2_ENTRY_LEFT_RIGHT_IDX_BITS) - 1) 971 /** Returns the upper 6 bytes of the GC pointer from the given breakpoint entry. */ 972 #define DBGF_BP_L2_ENTRY_GET_GCPTR(a_u64GCPtrKeyAndBpHnd1) ((a_u64GCPtrKeyAndBpHnd1) & UINT64_C(0x0000ffffffffffff)) 973 /** Returns the breakpoint handle from both L2 entry members. */ 974 #define DBGF_BP_L2_ENTRY_GET_BP_HND(a_u64GCPtrKeyAndBpHnd1, a_u64LeftRightIdxDepthBpHnd2) \ 975 ((DBGFBP)(((a_u64GCPtrKeyAndBpHnd1) >> 48) | (((a_u64LeftRightIdxDepthBpHnd2) >> 52) << 16))) 976 /** Extracts the depth of the second 64bit L2 entry value. */ 977 #define DBGF_BP_L2_ENTRY_GET_DEPTH(a_u64LeftRightIdxDepthBpHnd2) ((uint8_t)(((a_u64LeftRightIdxDepthBpHnd2) >> 44) & UINT8_MAX)) 978 /** Extracts the lower right index value from the L2 entry value. */ 979 #define DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(a_u64LeftRightIdxDepthBpHnd2) \ 980 ((uint32_t)(((a_u64LeftRightIdxDepthBpHnd2) >> 22) & DBGF_BP_L2_ENTRY_LEFT_RIGHT_IDX_MASK)) 981 /** Extracts the lower left index value from the L2 entry value. */ 982 #define DBGF_BP_L2_ENTRY_GET_IDX_LEFT(a_u64LeftRightIdxDepthBpHnd2) \ 983 ((uint32_t)((a_u64LeftRightIdxDepthBpHnd2) & DBGF_BP_L2_ENTRY_LEFT_RIGHT_IDX_MASK)) 984 985 986 /** 987 * A breakpoint L2 lookup table chunk, ring-3 state. 988 */ 989 typedef struct DBGFBPL2TBLCHUNKR3 990 { 991 /** Pointer to the R3 base of the chunk. */ 992 R3PTRTYPE(PDBGFBPL2ENTRY) pL2BaseR3; 993 /** Bitmap of free/occupied breakpoint entries. */ 994 R3PTRTYPE(volatile void *) pbmAlloc; 995 /** Number of free entries in the chunk. */ 996 volatile uint32_t cFree; 997 /** The chunk index this tracking structure refers to. */ 998 uint32_t idChunk; 999 } DBGFBPL2TBLCHUNKR3; 1000 /** Pointer to a breakpoint L2 lookup table chunk - Ring-3 Ptr. */ 1001 typedef DBGFBPL2TBLCHUNKR3 *PDBGFBPL2TBLCHUNKR3; 1002 /** Pointer to a const breakpoint L2 lookup table chunk - Ring-3 Ptr. */ 1003 typedef const DBGFBPL2TBLCHUNKR3 *PCDBGFBPL2TBLCHUNKR3; 1004 1005 1006 /** 1007 * Breakpoint L2 lookup table chunk, ring-0 state. 1008 */ 1009 typedef struct DBGFBPL2TBLCHUNKR0 1010 { 1011 /** The chunks memory. */ 1012 RTR0MEMOBJ hMemObj; 1013 /** The ring-3 mapping object. */ 1014 RTR0MEMOBJ hMapObj; 1015 /** Pointer to the breakpoint entries base. */ 1016 R0PTRTYPE(PDBGFBPL2ENTRY) paBpL2TblBaseSharedR0; 1017 } DBGFBPL2TBLCHUNKR0; 1018 /** Pointer to a breakpoint L2 lookup table chunk - Ring-0 Ptr. */ 1019 typedef R0PTRTYPE(DBGFBPL2TBLCHUNKR0 *) PDBGFBPL2TBLCHUNKR0; 918 1020 #endif 919 1021 … … 1158 1260 /** Global breakpoint table chunk array. */ 1159 1261 DBGFBPCHUNKR0 aBpChunks[DBGF_BP_CHUNK_COUNT]; 1262 /** Breakpoint L2 lookup table chunk array. */ 1263 DBGFBPL2TBLCHUNKR0 aBpL2TblChunks[DBGF_BP_L2_TBL_CHUNK_COUNT]; 1160 1264 /** The L1 lookup tables memory object. */ 1161 1265 RTR0MEMOBJ hMemObjBpLocL1; … … 1247 1351 /** Global breakpoint table chunk array. */ 1248 1352 DBGFBPCHUNKR3 aBpChunks[DBGF_BP_CHUNK_COUNT]; 1353 /** Breakpoint L2 lookup table chunk array. */ 1354 DBGFBPL2TBLCHUNKR3 aBpL2TblChunks[DBGF_BP_L2_TBL_CHUNK_COUNT]; 1249 1355 /** Base pointer to the L1 locator table. */ 1250 1356 R3PTRTYPE(volatile uint32_t *) paBpLocL1R3; 1357 /** Fast mutex protecting the L2 table from concurrent write accesses (EMTs 1358 * can still do read accesses without holding it while traversing the trees). */ 1359 RTSEMFASTMUTEX hMtxBpL2Wr; 1251 1360 /** @} */ 1252 1361 #endif
Note:
See TracChangeset
for help on using the changeset viewer.