Changeset 87594 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Feb 3, 2021 8:23:46 PM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 142626
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 1 deleted
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r87477 r87594 71 71 ifdef VBOX_WITH_DBGF_TRACING 72 72 VBoxVMM_DEFS += VBOX_WITH_DBGF_TRACING 73 endif74 ifdef VBOX_WITH_LOTS_OF_DBGF_BPS75 VBoxVMM_DEFS += VBOX_WITH_LOTS_OF_DBGF_BPS76 73 endif 77 74 ifdef VBOX_WITH_DBGF_FLOW_TRACING … … 102 99 VMMR3/DBGFAddr.cpp \ 103 100 VMMR3/DBGFAddrSpace.cpp \ 104 $(if-expr defined(VBOX_WITH_LOTS_OF_DBGF_BPS), VMMR3/DBGFR3Bp.cpp, VMMR3/DBGFBp.cpp)\101 VMMR3/DBGFR3Bp.cpp \ 105 102 VMMR3/DBGFR3BugCheck.cpp \ 106 103 VMMR3/DBGFCoreWrite.cpp \ … … 176 173 VMMAll/CPUMAllMsrs.cpp \ 177 174 VMMAll/DBGFAll.cpp \ 178 $(if-expr defined(VBOX_WITH_LOTS_OF_DBGF_BPS), VMMAll/DBGFAllBp.cpp,)\175 VMMAll/DBGFAllBp.cpp \ 179 176 $(if-expr defined(VBOX_WITH_DBGF_TRACING), VMMAll/DBGFAllTracer.cpp,) \ 180 177 VMMAll/HMAll.cpp \ … … 463 460 VMMR0_DEFS += VBOX_WITH_DBGF_TRACING 464 461 endif 465 ifdef VBOX_WITH_LOTS_OF_DBGF_BPS466 VMMR0_DEFS += VBOX_WITH_LOTS_OF_DBGF_BPS467 endif468 462 ifdef VBOX_WITH_VMM_R0_SWITCH_STACK 469 463 VMMR0_DEFS += VMM_R0_SWITCH_STACK … … 488 482 VMMR0/CPUMR0A.asm \ 489 483 VMMR0/DBGFR0.cpp \ 490 $(if-expr defined(VBOX_WITH_LOTS_OF_DBGF_BPS), VMMR0/DBGFR0Bp.cpp,)\484 VMMR0/DBGFR0Bp.cpp \ 491 485 $(if-expr defined(VBOX_WITH_DBGF_TRACING), VMMR0/DBGFR0Tracer.cpp,) \ 492 486 VMMR0/GIMR0.cpp \ -
trunk/src/VBox/VMM/VMMAll/DBGFAll.cpp
r86667 r87594 50 50 { 51 51 RTGCUINTREG uDr7 = X86_DR7_GD | X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK; 52 #ifndef VBOX_WITH_LOTS_OF_DBGF_BPS53 PDBGFBP pBp = &pVM->dbgf.s.aHwBreakpoints[0];54 unsigned cLeft = RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints);55 while (cLeft-- > 0)56 {57 if ( pBp->enmType == DBGFBPTYPE_REG58 && pBp->fEnabled)59 {60 static const uint8_t s_au8Sizes[8] =61 {62 X86_DR7_LEN_BYTE, X86_DR7_LEN_BYTE, X86_DR7_LEN_WORD, X86_DR7_LEN_BYTE,63 X86_DR7_LEN_DWORD,X86_DR7_LEN_BYTE, X86_DR7_LEN_BYTE, X86_DR7_LEN_QWORD64 };65 uDr7 |= X86_DR7_G(pBp->u.Reg.iReg)66 | X86_DR7_RW(pBp->u.Reg.iReg, pBp->u.Reg.fType)67 | X86_DR7_LEN(pBp->u.Reg.iReg, s_au8Sizes[pBp->u.Reg.cb]);68 }69 pBp++;70 }71 #else72 52 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++) 73 53 { 74 PCDBGFBPHW pBp = &pVM->dbgf.s.aHwBreakpoints[i]; 75 76 if ( pBp->hBp != NIL_DBGFBP 77 && pBp->fEnabled) 54 if ( pVM->dbgf.s.aHwBreakpoints[i].fEnabled 55 && pVM->dbgf.s.aHwBreakpoints[i].hBp != NIL_DBGFBP) 78 56 { 79 57 static const uint8_t s_au8Sizes[8] = … … 83 61 }; 84 62 uDr7 |= X86_DR7_G(i) 85 | X86_DR7_RW(i, pBp->fType) 86 | X86_DR7_LEN(i, s_au8Sizes[pBp->cb]); 87 } 88 pBp++; 63 | X86_DR7_RW(i, pVM->dbgf.s.aHwBreakpoints[i].fType) 64 | X86_DR7_LEN(i, s_au8Sizes[pVM->dbgf.s.aHwBreakpoints[i].cb]); 65 } 89 66 } 90 #endif91 67 return uDr7; 92 68 } … … 101 77 VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR0(PVM pVM) 102 78 { 103 #ifndef VBOX_WITH_LOTS_OF_DBGF_BPS 104 PCDBGFBP pBp = &pVM->dbgf.s.aHwBreakpoints[0]; 105 Assert(pBp->u.Reg.iReg == 0); 106 return pBp->u.Reg.GCPtr; 107 #else 108 PCDBGFBPHW pBp = &pVM->dbgf.s.aHwBreakpoints[0]; 109 return pBp->GCPtr; 110 #endif 79 return pVM->dbgf.s.aHwBreakpoints[0].GCPtr; 111 80 } 112 81 … … 120 89 VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR1(PVM pVM) 121 90 { 122 #ifndef VBOX_WITH_LOTS_OF_DBGF_BPS 123 PCDBGFBP pBp = &pVM->dbgf.s.aHwBreakpoints[1]; 124 Assert(pBp->u.Reg.iReg == 1); 125 return pBp->u.Reg.GCPtr; 126 #else 127 PCDBGFBPHW pBp = &pVM->dbgf.s.aHwBreakpoints[1]; 128 return pBp->GCPtr; 129 #endif 91 return pVM->dbgf.s.aHwBreakpoints[1].GCPtr; 130 92 } 131 93 … … 139 101 VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR2(PVM pVM) 140 102 { 141 #ifndef VBOX_WITH_LOTS_OF_DBGF_BPS 142 PCDBGFBP pBp = &pVM->dbgf.s.aHwBreakpoints[2]; 143 Assert(pBp->u.Reg.iReg == 2); 144 return pBp->u.Reg.GCPtr; 145 #else 146 PCDBGFBPHW pBp = &pVM->dbgf.s.aHwBreakpoints[2]; 147 return pBp->GCPtr; 148 #endif 103 return pVM->dbgf.s.aHwBreakpoints[2].GCPtr; 149 104 } 150 105 … … 158 113 VMM_INT_DECL(RTGCUINTREG) DBGFBpGetDR3(PVM pVM) 159 114 { 160 #ifndef VBOX_WITH_LOTS_OF_DBGF_BPS 161 PCDBGFBP pBp = &pVM->dbgf.s.aHwBreakpoints[3]; 162 Assert(pBp->u.Reg.iReg == 3); 163 return pBp->u.Reg.GCPtr; 164 #else 165 PCDBGFBPHW pBp = &pVM->dbgf.s.aHwBreakpoints[3]; 166 return pBp->GCPtr; 167 #endif 115 return pVM->dbgf.s.aHwBreakpoints[3].GCPtr; 168 116 } 169 117 … … 204 152 VMM_INT_DECL(bool) DBGFBpIsInt3Armed(PVM pVM) 205 153 { 206 #ifndef VBOX_WITH_LOTS_OF_DBGF_BPS 154 /** @todo There was a todo here and returning false when I (bird) removed 155 * VBOX_WITH_LOTS_OF_DBGF_BPS, so this might not be correct. */ 207 156 return pVM->dbgf.s.cEnabledInt3Breakpoints > 0; 208 #else209 RT_NOREF(pVM);210 return false; /** @todo */211 #endif212 157 } 213 158 … … 222 167 * been updated appropriately. 223 168 * 224 * @param pVM The cross context VM structure.169 * @param pVM The cross context VM structure. 225 170 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 226 171 * @param pCtx The CPU context for the calling EMT. … … 241 186 for (unsigned iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++) 242 187 { 243 #ifndef VBOX_WITH_LOTS_OF_DBGF_BPS 244 if ( pVM->dbgf.s.aHwBreakpoints[iBp].u.Reg.fType == X86_DR7_RW_IO 188 if ( pVM->dbgf.s.aHwBreakpoints[iBp].fType == X86_DR7_RW_IO 245 189 && pVM->dbgf.s.aHwBreakpoints[iBp].fEnabled 246 && pVM->dbgf.s.aHwBreakpoints[iBp]. enmType == DBGFBPTYPE_REG )190 && pVM->dbgf.s.aHwBreakpoints[iBp].hBp == NIL_DBGFBP) /** @todo r=bird: this cannot be right... */ 247 191 { 248 uint8_t cbReg = pVM->dbgf.s.aHwBreakpoints[iBp]. u.Reg.cb; Assert(RT_IS_POWER_OF_TWO(cbReg));249 uint64_t uDrXFirst = pVM->dbgf.s.aHwBreakpoints[iBp]. u.Reg.GCPtr & ~(uint64_t)(cbReg - 1);192 uint8_t cbReg = pVM->dbgf.s.aHwBreakpoints[iBp].cb; Assert(RT_IS_POWER_OF_TWO(cbReg)); 193 uint64_t uDrXFirst = pVM->dbgf.s.aHwBreakpoints[iBp].GCPtr & ~(uint64_t)(cbReg - 1); 250 194 uint64_t uDrXLast = uDrXFirst + cbReg - 1; 251 195 if (uDrXFirst <= uIoPortLast && uDrXLast >= uIoPortFirst) 252 196 { 253 197 /* (See also DBGFRZTrap01Handler.) */ 254 pVCpu->dbgf.s.iActiveBp = pVM->dbgf.s.aHwBreakpoints[iBp].iBp; 255 pVCpu->dbgf.s.fSingleSteppingRaw = false; 256 257 LogFlow(("DBGFBpCheckIo: hit hw breakpoint %d at %04x:%RGv (iop %#x)\n", 258 pVM->dbgf.s.aHwBreakpoints[iBp].iBp, pCtx->cs.Sel, pCtx->rip, uIoPort)); 259 return VINF_EM_DBG_BREAKPOINT; 260 } 261 } 262 #else 263 PCDBGFBPHW pBp = &pVM->dbgf.s.aHwBreakpoints[iBp]; 264 265 if ( pBp->fType == X86_DR7_RW_IO 266 && pBp->hBp == NIL_DBGFBP 267 && pBp->fEnabled) 268 { 269 uint8_t cbReg = pBp->cb; Assert(RT_IS_POWER_OF_TWO(cbReg)); 270 uint64_t uDrXFirst = pBp->GCPtr & ~(uint64_t)(cbReg - 1); 271 uint64_t uDrXLast = uDrXFirst + cbReg - 1; 272 if (uDrXFirst <= uIoPortLast && uDrXLast >= uIoPortFirst) 273 { 274 /* (See also DBGFRZTrap01Handler.) */ 275 pVCpu->dbgf.s.hBpActive = pBp->hBp; 198 pVCpu->dbgf.s.hBpActive = pVM->dbgf.s.aHwBreakpoints[iBp].hBp; 276 199 pVCpu->dbgf.s.fSingleSteppingRaw = false; 277 200 … … 281 204 } 282 205 } 283 #endif284 206 } 285 207 } -
trunk/src/VBox/VMM/VMMAll/DBGFAllBp.cpp
r87132 r87594 32 32 #include <iprt/assert.h> 33 33 34 #ifdef VBOX_WITH_LOTS_OF_DBGF_BPS 35 # include "DBGFInline.h" 36 #endif 34 #include "DBGFInline.h" 37 35 38 36 … … 42 40 43 41 44 /*********************************************************************************************************************************45 * Internal Functions *46 *********************************************************************************************************************************/47 48 #ifdef VBOX_WITH_LOTS_OF_DBGF_BPS49 42 /** 50 43 * Returns the internal breakpoint state for the given handle. … … 56 49 * on success, optional. 57 50 */ 58 # 51 #ifdef IN_RING0 59 52 DECLINLINE(PDBGFBPINT) dbgfBpGetByHnd(PVMCC pVM, DBGFBP hBp, PDBGFBPINTR0 *ppBpR0) 60 # 53 #else 61 54 DECLINLINE(PDBGFBPINT) dbgfBpGetByHnd(PVMCC pVM, DBGFBP hBp) 62 # 55 #endif 63 56 { 64 57 uint32_t idChunk = DBGF_BP_HND_GET_CHUNK_ID(hBp); … … 68 61 AssertReturn(idxEntry < DBGF_BP_COUNT_PER_CHUNK, NULL); 69 62 70 # 63 #ifdef IN_RING0 71 64 PDBGFBPCHUNKR0 pBpChunk = &pVM->dbgfr0.s.aBpChunks[idChunk]; 72 65 AssertPtrReturn(pBpChunk->CTX_SUFF(paBpBaseShared), NULL); … … 75 68 *ppBpR0 = &pBpChunk->paBpBaseR0Only[idxEntry]; 76 69 return &pBpChunk->CTX_SUFF(paBpBaseShared)[idxEntry]; 77 # elif defined(IN_RING3) 70 71 #elif defined(IN_RING3) 78 72 PUVM pUVM = pVM->pUVM; 79 73 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk]; … … 81 75 82 76 return &pBpChunk->CTX_SUFF(pBpBase)[idxEntry]; 83 # else 84 # error "Unsupported host context" 85 # endif 77 78 #else 79 # error "Unsupported context" 80 #endif 86 81 } 87 82 … … 104 99 AssertReturn(idxEntry < DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK, NULL); 105 100 106 # 101 #ifdef IN_RING0 107 102 PDBGFBPL2TBLCHUNKR0 pL2Chunk = &pVM->dbgfr0.s.aBpL2TblChunks[idChunk]; 108 103 AssertPtrReturn(pL2Chunk->CTX_SUFF(paBpL2TblBaseShared), NULL); 109 104 110 105 return &pL2Chunk->CTX_SUFF(paBpL2TblBaseShared)[idxEntry]; 111 # 106 #elif defined(IN_RING3) 112 107 PUVM pUVM = pVM->pUVM; 113 108 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk]; … … 116 111 117 112 return &pL2Chunk->CTX_SUFF(pL2Base)[idxEntry]; 118 # 119 } 120 121 122 # 113 #endif 114 } 115 116 117 #ifdef IN_RING0 123 118 /** 124 119 * Returns the internal breakpoint owner state for the given handle. … … 140 135 return pBpOwnerR0; 141 136 } 142 # 137 #endif 143 138 144 139 … … 154 149 * @param pBpR0 The ring-0 only breakpoint state. 155 150 */ 156 # ifdef IN_RING0 157 DECLINLINE(int) dbgfBpHit(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, 158 DBGFBP hBp, PDBGFBPINT pBp, PDBGFBPINTR0 pBpR0) 159 # else 160 DECLINLINE(int) dbgfBpHit(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, 161 DBGFBP hBp, PDBGFBPINT pBp) 162 # endif 151 #ifdef IN_RING0 152 DECLINLINE(int) dbgfBpHit(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, DBGFBP hBp, PDBGFBPINT pBp, PDBGFBPINTR0 pBpR0) 153 #else 154 DECLINLINE(int) dbgfBpHit(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, DBGFBP hBp, PDBGFBPINT pBp) 155 #endif 163 156 { 164 157 uint64_t cHits = ASMAtomicIncU64(&pBp->Pub.cHits); RT_NOREF(cHits); … … 169 162 170 163 int rc = VINF_EM_DBG_BREAKPOINT; 171 # 164 #ifdef IN_RING0 172 165 PCDBGFBPOWNERINTR0 pBpOwnerR0 = dbgfR0BpOwnerGetByHnd(pVM, 173 166 pBpR0->fInUse … … 208 201 pVCpu->dbgf.s.hBpActive = hBp; 209 202 } 210 # 203 #else 211 204 RT_NOREF(pVM); 212 205 pVCpu->dbgf.s.fBpInvokeOwnerCallback = true; 213 206 pVCpu->dbgf.s.hBpActive = hBp; 214 # 207 #endif 215 208 216 209 return rc; … … 249 242 250 243 /* Query the internal breakpoint state from the handle. */ 251 # ifdef IN_RING0 244 #ifdef IN_RING3 245 PDBGFBPINT pBp = dbgfBpGetByHnd(pVM, hBp); 246 #else 252 247 PDBGFBPINTR0 pBpR0 = NULL; 253 PDBGFBPINT pBp = dbgfBpGetByHnd(pVM, hBp, &pBpR0); 254 # else 255 PDBGFBPINT pBp = dbgfBpGetByHnd(pVM, hBp); 256 # endif 248 PDBGFBPINT pBp = dbgfBpGetByHnd(pVM, hBp, &pBpR0); 249 #endif 257 250 if ( pBp 258 251 && DBGF_BP_PUB_GET_TYPE(pBp->Pub.fFlagsAndType) == DBGFBPTYPE_INT3) 259 return dbgfBpHit(pVM, pVCpu, pRegFrame, hBp, pBp 260 # ifdef IN_RING0 261 , pBpR0 262 # endif 263 ); 252 #ifdef IN_RING3 253 return dbgfBpHit(pVM, pVCpu, pRegFrame, hBp, pBp); 254 #else 255 return dbgfBpHit(pVM, pVCpu, pRegFrame, hBp, pBp, pBpR0); 256 #endif 264 257 265 258 /* The entry got corrupted, just abort. */ … … 280 273 return VERR_DBGF_BP_L2_LOOKUP_FAILED; 281 274 } 282 #endif /* !VBOX_WITH_LOTS_OF_DBGF_BPS */283 275 284 276 … … 309 301 { 310 302 for (unsigned iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++) 311 {312 #ifndef VBOX_WITH_LOTS_OF_DBGF_BPS313 if ( ((uint32_t)uDr6 & RT_BIT_32(iBp))314 && pVM->dbgf.s.aHwBreakpoints[iBp].enmType == DBGFBPTYPE_REG)315 {316 pVCpu->dbgf.s.iActiveBp = pVM->dbgf.s.aHwBreakpoints[iBp].iBp;317 pVCpu->dbgf.s.fSingleSteppingRaw = false;318 LogFlow(("DBGFRZTrap03Handler: hit hw breakpoint %d at %04x:%RGv\n",319 pVM->dbgf.s.aHwBreakpoints[iBp].iBp, pRegFrame->cs.Sel, pRegFrame->rip));320 321 return VINF_EM_DBG_BREAKPOINT;322 }323 #else324 303 if ( ((uint32_t)uDr6 & RT_BIT_32(iBp)) 325 304 && pVM->dbgf.s.aHwBreakpoints[iBp].hBp != NIL_DBGFBP) … … 332 311 return VINF_EM_DBG_BREAKPOINT; 333 312 } 334 #endif335 }336 313 } 337 314 … … 340 317 * Are we single stepping or is it the guest? 341 318 */ 342 if ( 343 && 319 if ( (uDr6 & X86_DR6_BS) 320 && (pVCpu->dbgf.s.fSingleSteppingRaw || fAltStepping)) 344 321 { 345 322 pVCpu->dbgf.s.fSingleSteppingRaw = false; … … 366 343 VMM_INT_DECL(int) DBGFTrap03Handler(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame) 367 344 { 368 #ifndef VBOX_WITH_LOTS_OF_DBGF_BPS 369 /* 370 * Get the trap address and look it up in the breakpoint table. 371 * Don't bother if we don't have any breakpoints. 372 */ 373 unsigned cToSearch = pVM->dbgf.s.Int3.cToSearch; 374 if (cToSearch > 0) 375 { 376 RTGCPTR pPc; 377 int rc = SELMValidateAndConvertCSAddr(pVCpu, pRegFrame->eflags, pRegFrame->ss.Sel, pRegFrame->cs.Sel, &pRegFrame->cs, 378 pRegFrame->rip /* no -1 in R0 */, 379 &pPc); 380 AssertRCReturn(rc, rc); 381 382 unsigned iBp = pVM->dbgf.s.Int3.iStartSearch; 383 while (cToSearch-- > 0) 384 { 385 if ( pVM->dbgf.s.aBreakpoints[iBp].u.GCPtr == (RTGCUINTPTR)pPc 386 && pVM->dbgf.s.aBreakpoints[iBp].enmType == DBGFBPTYPE_INT3) 387 { 388 pVM->dbgf.s.aBreakpoints[iBp].cHits++; 389 pVCpu->dbgf.s.iActiveBp = pVM->dbgf.s.aBreakpoints[iBp].iBp; 390 391 LogFlow(("DBGFRZTrap03Handler: hit breakpoint %d at %RGv (%04x:%RGv) cHits=0x%RX64\n", 392 pVM->dbgf.s.aBreakpoints[iBp].iBp, pPc, pRegFrame->cs.Sel, pRegFrame->rip, 393 pVM->dbgf.s.aBreakpoints[iBp].cHits)); 394 return VINF_EM_DBG_BREAKPOINT; 395 } 396 iBp++; 397 } 398 } 399 #else 400 # if defined(IN_RING0) 345 #if defined(IN_RING0) 401 346 uint32_t volatile *paBpLocL1 = pVM->dbgfr0.s.CTX_SUFF(paBpLocL1); 402 # 347 #elif defined(IN_RING3) 403 348 PUVM pUVM = pVM->pUVM; 404 349 uint32_t volatile *paBpLocL1 = pUVM->dbgf.s.CTX_SUFF(paBpLocL1); 405 # 406 # 407 # 350 #else 351 # error "Unsupported host context" 352 #endif 408 353 if (paBpLocL1) 409 354 { … … 427 372 428 373 /* Query the internal breakpoint state from the handle. */ 429 #ifdef IN_RING0 374 #ifdef IN_RING3 375 PDBGFBPINT pBp = dbgfBpGetByHnd(pVM, hBp); 376 #else 430 377 PDBGFBPINTR0 pBpR0 = NULL; 431 #endif 432 PDBGFBPINT pBp = dbgfBpGetByHnd(pVM, hBp 433 #ifdef IN_RING0 434 , &pBpR0 435 #endif 436 ); 378 PDBGFBPINT pBp = dbgfBpGetByHnd(pVM, hBp, &pBpR0); 379 #endif 437 380 if ( pBp 438 381 && DBGF_BP_PUB_GET_TYPE(pBp->Pub.fFlagsAndType) == DBGFBPTYPE_INT3) 439 382 { 440 383 if (pBp->Pub.u.Int3.GCPtr == (RTGCUINTPTR)GCPtrBp) 441 rc = dbgfBpHit(pVM, pVCpu, pRegFrame, hBp, pBp 442 #ifdef IN_RING0 443 , pBpR0 444 #endif 445 ); 384 #ifdef IN_RING3 385 rc = dbgfBpHit(pVM, pVCpu, pRegFrame, hBp, pBp); 386 #else 387 rc = dbgfBpHit(pVM, pVCpu, pRegFrame, hBp, pBp, pBpR0); 388 #endif 446 389 /* else: Genuine guest trap. */ 447 390 } … … 459 402 return rc; 460 403 } 461 #endif /* !VBOX_WITH_LOTS_OF_DBGF_BPS */462 404 463 405 return VINF_EM_RAW_GUEST_TRAP; -
trunk/src/VBox/VMM/VMMR0/DBGFR0.cpp
r86699 r87594 40 40 41 41 42 /*********************************************************************************************************************************43 * Internal Functions *44 *********************************************************************************************************************************/45 42 46 43 /** … … 56 53 pGVM->dbgfr0.s.pTracerR0 = NULL; 57 54 58 #ifdef VBOX_WITH_LOTS_OF_DBGF_BPS59 55 dbgfR0BpInit(pGVM); 60 #endif61 56 } 62 57 … … 73 68 #endif 74 69 75 #ifdef VBOX_WITH_LOTS_OF_DBGF_BPS76 70 dbgfR0BpDestroy(pGVM); 77 #endif78 71 } 79 72 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r87591 r87594 2257 2257 if (!pReqHdr || u64Arg) 2258 2258 return VERR_INVALID_PARAMETER; 2259 # if 0 /** @todo */2259 # if 0 /** @todo */ 2260 2260 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu); 2261 # else2261 # else 2262 2262 rc = VERR_NOT_IMPLEMENTED; 2263 #endif 2264 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2265 break; 2266 } 2267 #endif 2268 2269 #ifdef VBOX_WITH_LOTS_OF_DBGF_BPS 2263 # endif 2264 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2265 break; 2266 } 2267 #endif 2268 2270 2269 case VMMR0_DO_DBGF_BP_INIT: 2271 2270 { … … 2303 2302 break; 2304 2303 } 2305 #endif2306 2304 2307 2305 /* … … 2407 2405 case VMMR0_DO_IOM_GROW_IO_PORTS: 2408 2406 case VMMR0_DO_IOM_GROW_IO_PORT_STATS: 2409 2410 #ifdef VBOX_WITH_LOTS_OF_DBGF_BPS2411 2407 case VMMR0_DO_DBGF_BP_INIT: 2412 2408 case VMMR0_DO_DBGF_BP_CHUNK_ALLOC: 2413 2409 case VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC: 2414 #endif2415 2410 { 2416 2411 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; -
trunk/src/VBox/VMM/VMMR3/DBGF.cpp
r86755 r87594 149 149 if (RT_SUCCESS(rc)) 150 150 { 151 #ifndef VBOX_WITH_LOTS_OF_DBGF_BPS152 rc = dbgfR3BpInit(pVM);153 #else154 151 rc = dbgfR3BpInit(pUVM); 155 #endif156 152 if (RT_SUCCESS(rc)) 157 153 { … … 178 174 dbgfR3OSTermPart2(pUVM); 179 175 } 180 #ifdef VBOX_WITH_LOTS_OF_DBGF_BPS181 176 dbgfR3BpTerm(pUVM); 182 #endif183 177 } 184 178 dbgfR3AsTerm(pUVM); … … 210 204 dbgfR3PlugInTerm(pUVM); 211 205 dbgfR3OSTermPart2(pUVM); 212 #ifdef VBOX_WITH_LOTS_OF_DBGF_BPS213 206 dbgfR3BpTerm(pUVM); 214 #endif215 207 dbgfR3AsTerm(pUVM); 216 208 dbgfR3RegTerm(pUVM); … … 785 777 */ 786 778 DBGFEVENT DbgEvent; 787 #ifndef VBOX_WITH_LOTS_OF_DBGF_BPS788 RTUINT iBp = DbgEvent.u.Bp.hBp = pVCpu->dbgf.s.iActiveBp;789 pVCpu->dbgf.s.iActiveBp = ~0U;790 if (iBp != ~0U)791 {792 DbgEvent.enmCtx = DBGFEVENTCTX_RAW;793 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_RAW, &DbgEvent.u, sizeof(DbgEvent.u.Bp));794 }795 #else796 779 DbgEvent.u.Bp.hBp = pVCpu->dbgf.s.hBpActive; 797 780 pVCpu->dbgf.s.hBpActive = NIL_DBGFBP; … … 801 784 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_RAW, &DbgEvent.u, sizeof(DbgEvent.u.Bp)); 802 785 } 803 #endif 804 805 #ifndef VBOX_WITH_LOTS_OF_DBGF_BPS 806 AssertFailed(); /** @todo this should be obsolete now... */ 807 808 /* REM breakpoints has be been searched for. */ 809 #if 0 /** @todo get flat PC api! */ 810 uint32_t eip = CPUMGetGuestEIP(pVM); 811 #else 812 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 813 RTGCPTR eip = pCtx->rip + pCtx->cs.u64Base; 814 #endif 815 for (size_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aBreakpoints); i++) 816 if ( pVM->dbgf.s.aBreakpoints[i].enmType == DBGFBPTYPE_REM 817 && pVM->dbgf.s.aBreakpoints[i].u.Rem.GCPtr == eip) 818 { 819 DbgEvent.u.Bp.hBp = pVM->dbgf.s.aBreakpoints[i].iBp; 820 break; 821 } 822 AssertMsg(DbgEvent.u.Bp.hBp != ~0U, ("eip=%08x\n", eip)); 823 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_REM, &DbgEvent.u, sizeof(DbgEvent.u.Bp)); 824 #else 786 825 787 return VERR_DBGF_IPE_1; 826 #endif827 788 } 828 789 -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r87130 r87594 839 839 840 840 case VINF_EM_DBG_BREAKPOINT: 841 #ifdef VBOX_WITH_LOTS_OF_DBGF_BPS842 841 rc = DBGFR3BpHit(pVM, pVCpu); 843 #else844 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);845 #endif846 842 break; 847 843 -
trunk/src/VBox/VMM/include/DBGFInternal.h
r87130 r87594 55 55 56 56 57 #ifdef VBOX_WITH_LOTS_OF_DBGF_BPS58 57 /** @name Global breakpoint table handling defines. 59 58 * @{ */ … … 81 80 #define DBGF_BP_L2_TBL_CHUNK_COUNT (DBGF_BP_L2_TBL_ENTRY_COUNT_MAX / DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK) 82 81 /** @} */ 83 #endif84 82 85 83 … … 775 773 776 774 777 #ifndef VBOX_WITH_LOTS_OF_DBGF_BPS778 /**779 * Breakpoint search optimization.780 */781 typedef struct DBGFBPSEARCHOPT782 {783 /** Where to start searching for hits.784 * (First enabled is #DBGF::aBreakpoints[iStartSearch]). */785 uint32_t volatile iStartSearch;786 /** The number of aBreakpoints entries to search.787 * (Last enabled is #DBGF::aBreakpoints[iStartSearch + cToSearch - 1]) */788 uint32_t volatile cToSearch;789 } DBGFBPSEARCHOPT;790 /** Pointer to a breakpoint search optimziation structure. */791 typedef DBGFBPSEARCHOPT *PDBGFBPSEARCHOPT;792 #else793 794 775 /** An invalid breakpoint chunk ID. */ 795 776 #define DBGF_BP_CHUNK_ID_INVALID UINT32_MAX … … 1087 1068 /** Pointer to a breakpoint L2 lookup table chunk - Ring-0 Ptr. */ 1088 1069 typedef R0PTRTYPE(DBGFBPL2TBLCHUNKR0 *) PDBGFBPL2TBLCHUNKR0; 1089 #endif1090 1070 1091 1071 … … 1117 1097 /** The number of enabled hardware I/O breakpoints. */ 1118 1098 uint8_t cEnabledHwIoBreakpoints; 1119 #ifndef VBOX_WITH_LOTS_OF_DBGF_BPS 1099 uint8_t au8Alignment1[2]; /**< Alignment padding. */ 1120 1100 /** The number of enabled INT3 breakpoints. */ 1121 uint8_t cEnabledInt3Breakpoints; 1122 uint8_t abPadding; /**< Unused padding space up for grabs. */ 1123 uint32_t uPadding; 1124 #else 1125 uint16_t u16Pad; /**< Unused padding space up for grabs. */ 1126 /** The number of enabled INT3 breakpoints. */ 1127 volatile uint32_t cEnabledInt3Breakpoints; 1128 #endif 1101 uint32_t volatile cEnabledInt3Breakpoints; 1129 1102 1130 1103 /** Debugger Attached flag. … … 1161 1134 } SteppingFilter; 1162 1135 1163 uint32_t u32Padding[2]; /**< Alignment padding. */ 1164 1165 #ifndef VBOX_WITH_LOTS_OF_DBGF_BPS 1166 /** Array of hardware breakpoints. (0..3) 1167 * This is shared among all the CPUs because life is much simpler that way. */ 1168 DBGFBP aHwBreakpoints[4]; 1169 /** Array of int 3 and REM breakpoints. (4..) 1170 * @remark This is currently a fixed size array for reasons of simplicity. */ 1171 DBGFBP aBreakpoints[32]; 1172 1173 /** MMIO breakpoint search optimizations. */ 1174 DBGFBPSEARCHOPT Mmio; 1175 /** I/O port breakpoint search optimizations. */ 1176 DBGFBPSEARCHOPT PortIo; 1177 /** INT3 breakpoint search optimizations. */ 1178 DBGFBPSEARCHOPT Int3; 1179 #else 1136 uint32_t au32Alignment2[2]; /**< Alignment padding. */ 1137 1180 1138 /** @name Breakpoint handling related state. 1181 1139 * @{ */ 1182 1140 /** Array of hardware breakpoints (0..3). 1183 1141 * This is shared among all the CPUs because life is much simpler that way. */ 1184 DBGFBPHW 1142 DBGFBPHW aHwBreakpoints[4]; 1185 1143 /** @} */ 1186 #endif1187 1144 1188 1145 /** … … 1249 1206 uint32_t offVM; 1250 1207 1251 #ifndef VBOX_WITH_LOTS_OF_DBGF_BPS 1252 /** Current active breakpoint (id). 1253 * This is ~0U if not active. It is set when a execution engine 1254 * encounters a breakpoint and returns VINF_EM_DBG_BREAKPOINT. This is 1255 * currently not used for REM breakpoints because of the lazy coupling 1256 * between VBox and REM. 1257 * 1258 * @todo drop this in favor of aEvents! */ 1259 uint32_t iActiveBp; 1260 #else 1208 /** Flag whether the to invoke any owner handlers in ring-3 before dropping into the debugger. */ 1209 bool fBpInvokeOwnerCallback; 1210 /** Set if we're singlestepping in raw mode. 1211 * This is checked and cleared in the \#DB handler. */ 1212 bool fSingleSteppingRaw; 1213 /** Alignment padding. */ 1214 bool afPadding[2]; 1261 1215 /** Current active breakpoint handle. 1262 1216 * This is NIL_DBGFBP if not active. It is set when a execution engine … … 1265 1219 * @todo drop this in favor of aEvents! */ 1266 1220 DBGFBP hBpActive; 1267 /** Flag whether the to invoke any owner handlers in ring-3 before dropping into the debugger. */1268 bool fBpInvokeOwnerCallback;1269 #endif1270 /** Set if we're singlestepping in raw mode.1271 * This is checked and cleared in the \#DB handler. */1272 bool fSingleSteppingRaw;1273 1274 /** Alignment padding. */1275 bool afPadding[3];1276 1221 1277 1222 /** The number of events on the stack (aEvents). … … 1326 1271 R0PTRTYPE(struct DBGFTRACERINSR0 *) pTracerR0; 1327 1272 1328 #ifdef VBOX_WITH_LOTS_OF_DBGF_BPS1329 1273 /** @name Breakpoint handling related state, Ring-0 only part. 1330 1274 * @{ */ … … 1349 1293 bool fInit; 1350 1294 /** @} */ 1351 #endif1352 1295 } DBGFR0PERVM; 1353 1296 … … 1424 1367 /** @} */ 1425 1368 1426 #ifdef VBOX_WITH_LOTS_OF_DBGF_BPS1427 1369 /** @name Breakpoint handling related state. 1428 1370 * @{ */ … … 1442 1384 RTSEMFASTMUTEX hMtxBpL2Wr; 1443 1385 /** @} */ 1444 #endif1445 1386 1446 1387 /** The type database lock. */ … … 1490 1431 void dbgfR3AsTerm(PUVM pUVM); 1491 1432 void dbgfR3AsRelocate(PUVM pUVM, RTGCUINTPTR offDelta); 1492 #ifdef VBOX_WITH_LOTS_OF_DBGF_BPS1493 1433 DECLHIDDEN(int) dbgfR3BpInit(PUVM pUVM); 1494 1434 DECLHIDDEN(int) dbgfR3BpTerm(PUVM pUVM); 1495 #else1496 int dbgfR3BpInit(PVM pVM);1497 #endif1498 1435 int dbgfR3InfoInit(PUVM pUVM); 1499 1436 int dbgfR3InfoTerm(PUVM pUVM);
Note:
See TracChangeset
for help on using the changeset viewer.