VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGFR3Bp.cpp@ 86728

Last change on this file since 86728 was 86728, checked in by vboxsync, 5 years ago

VMM/DBGF: Implement L2 binary search tree node insertion and walking the tree in R0, bugref:9837 [doxygen fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 71.3 KB
Line 
1/* $Id: DBGFR3Bp.cpp 86728 2020-10-28 10:18:28Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, Breakpoint Management.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_dbgf_bp DBGF - The Debugger Facility, Breakpoint Management
20 *
21 * The debugger facilities breakpoint managers purpose is to efficiently manage
22 * large amounts of breakpoints for various use cases like dtrace like operations
23 * or execution flow tracing for instance. Especially execution flow tracing can
24 * require thousands of breakpoints which need to be managed efficiently to not slow
25 * down guest operation too much. Before the rewrite starting end of 2020, DBGF could
26 * only handle 32 breakpoints (+ 4 hardware assisted breakpoints). The new
27 * manager is supposed to be able to handle up to one million breakpoints.
28 *
29 * @see grp_dbgf
30 *
31 *
32 * @section sec_dbgf_bp_owner Breakpoint owners
33 *
34 * A single breakpoint owner has a mandatory ring-3 callback and an optional ring-0
35 * callback assigned which is called whenever a breakpoint with the owner assigned is hit.
36 * The common part of the owner is managed by a single table mapped into both ring-0
37 * and ring-3 and the handle being the index into the table. This allows resolving
38 * the handle to the internal structure efficiently. Searching for a free entry is
39 * done using a bitmap indicating free and occupied entries. For the optional
40 * ring-0 owner part there is a separate ring-0 only table for security reasons.
41 *
42 * The callback of the owner can be used to gather and log guest state information
43 * and decide whether to continue guest execution or stop and drop into the debugger.
44 * Breakpoints which don't have an owner assigned will always drop the VM right into
45 * the debugger.
46 *
47 *
48 * @section sec_dbgf_bp_bps Breakpoints
49 *
50 * Breakpoints are referenced by an opaque handle which acts as an index into a global table
51 * mapped into ring-3 and ring-0. Each entry contains the necessary state to manage the breakpoint
52 * like trigger conditions, type, owner, etc. If an owner is given an optional opaque user argument
53 * can be supplied which is passed in the respective owner callback. For owners with ring-0 callbacks
54 * a dedicated ring-0 table is held saving possible ring-0 user arguments.
55 *
56 * To keep memory consumption under control and still support large amounts of
57 * breakpoints the table is split into fixed sized chunks and the chunk index and index
58 * into the chunk can be derived from the handle with only a few logical operations.
59 *
60 *
61 * @section sec_dbgf_bp_resolv Resolving breakpoint addresses
62 *
63 * Whenever a \#BP(0) event is triggered DBGF needs to decide whether the event originated
64 * from within the guest or whether a DBGF breakpoint caused it. This has to happen as fast
65 * as possible. The following scheme is employed to achieve this:
66 *
67 * @verbatim
68 * 7 6 5 4 3 2 1 0
69 * +---+---+---+---+---+---+---+---+
70 * | | | | | | | | | BP address
71 * +---+---+---+---+---+---+---+---+
72 * \_____________________/ \_____/
73 * | |
74 * | +---------------+
75 * | |
76 * BP table | v
77 * +------------+ | +-----------+
78 * | hBp 0 | | X <- | 0 | xxxxx |
79 * | hBp 1 | <----------------+------------------------ | 1 | hBp 1 |
80 * | | | +--- | 2 | idxL2 |
81 * | hBp <m> | <---+ v | |...| ... |
82 * | | | +-----------+ | |...| ... |
83 * | | | | | | |...| ... |
84 * | hBp <n> | <-+ +----- | +> leaf | | | . |
85 * | | | | | | | | . |
86 * | | | | + root + | <------------+ | . |
87 * | | | | | | +-----------+
88 * | | +------- | leaf<+ | L1: 65536
89 * | . | | . |
90 * | . | | . |
91 * | . | | . |
92 * +------------+ +-----------+
93 * L2 idx AVL
94 * @endverbatim
95 *
96 * -# Take the lowest 16 bits of the breakpoint address and use it as an direct index
97 * into the L1 table. The L1 table is contiguous and consists of 4 byte entries
98 * resulting in 256KiB of memory used. The topmost 4 bits indicate how to proceed
99 * and the meaning of the remaining 28bits depends on the topmost 4 bits:
100 * - A 0 type entry means no breakpoint is registered with the matching lowest 16bits,
101 * so forward the event to the guest.
102 * - A 1 in the topmost 4 bits means that the remaining 28bits directly denote a breakpoint
103 * handle which can be resolved by extracting the chunk index and index into the chunk
104 * of the global breakpoint table. If the address matches the breakpoint is processed
105 * according to the configuration. Otherwise the breakpoint is again forwarded to the guest.
106 * - A 2 in the topmost 4 bits means that there are multiple breakpoints registered
107 * matching the lowest 16bits and the search must continue in the L2 table with the
108 * remaining 28bits acting as an index into the L2 table indicating the search root.
109 * -# The L2 table consists of multiple index based AVL trees, there is one for each reference
110 * from the L1 table. The key for the table are the upper 6 bytes of the breakpoint address
111 * used for searching. This tree is traversed until either a matching address is found and
112 * the breakpoint is being processed or again forwarded to the guest if it isn't successful.
113 * Each entry in the L2 table is 16 bytes big and densly packed to avoid excessive memory usage.
114 *
115 *
116 * @section sec_dbgf_bp_note Random thoughts and notes for the implementation
117 *
118 * - The assumption for this approach is that the lowest 16bits of the breakpoint address are
119 * hopefully the ones being the most varying ones across breakpoints so the traversal
120 * can skip the L2 table in most of the cases. Even if the L2 table must be taken the
121 * individual trees should be quite shallow resulting in low overhead when walking it
122 * (though only real world testing can assert this assumption).
123 * - Index based tables and trees are used instead of pointers because the tables
124 * are always mapped into ring-0 and ring-3 with different base addresses.
125 * - Efficent breakpoint allocation is done by having a global bitmap indicating free
126 * and occupied breakpoint entries. Same applies for the L2 AVL table.
127 * - Special care must be taken when modifying the L1 and L2 tables as other EMTs
128 * might still access it (want to try a lockless approach first using
129 * atomic updates, have to resort to locking if that turns out to be too difficult).
130 * - Each BP entry is supposed to be 64 byte big and each chunk should contain 65536
131 * breakpoints which results in 4MiB for each chunk plus the allocation bitmap.
132 * - ring-0 has to take special care when traversing the L2 AVL tree to not run into cycles
133 * and do strict bounds checking before accessing anything. The L1 and L2 table
134 * are written to from ring-3 only. Same goes for the breakpoint table with the
135 * exception being the opaque user argument for ring-0 which is stored in ring-0 only
136 * memory.
137 */
138
139
140/*********************************************************************************************************************************
141* Header Files *
142*********************************************************************************************************************************/
143#define LOG_GROUP LOG_GROUP_DBGF
144#include <VBox/vmm/dbgf.h>
145#include <VBox/vmm/selm.h>
146#include <VBox/vmm/iem.h>
147#include <VBox/vmm/mm.h>
148#include <VBox/vmm/iom.h>
149#include <VBox/vmm/hm.h>
150#include "DBGFInternal.h"
151#include <VBox/vmm/vm.h>
152#include <VBox/vmm/uvm.h>
153
154#include <VBox/err.h>
155#include <VBox/log.h>
156#include <iprt/assert.h>
157#include <iprt/mem.h>
158
159#include "DBGFInline.h"
160
161
162/*********************************************************************************************************************************
163* Structures and Typedefs *
164*********************************************************************************************************************************/
165
166
167/*********************************************************************************************************************************
168* Internal Functions *
169*********************************************************************************************************************************/
170RT_C_DECLS_BEGIN
171RT_C_DECLS_END
172
173
174/**
175 * Initialize the breakpoint mangement.
176 *
177 * @returns VBox status code.
178 * @param pUVM The user mode VM handle.
179 */
180DECLHIDDEN(int) dbgfR3BpInit(PUVM pUVM)
181{
182 PVM pVM = pUVM->pVM;
183
184 /* Init hardware breakpoint states. */
185 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
186 {
187 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
188
189 AssertCompileSize(DBGFBP, sizeof(uint32_t));
190 pHwBp->hBp = NIL_DBGFBP;
191 //pHwBp->fEnabled = false;
192 }
193
194 /* Now the global breakpoint table chunks. */
195 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
196 {
197 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
198
199 //pBpChunk->pBpBaseR3 = NULL;
200 //pBpChunk->pbmAlloc = NULL;
201 //pBpChunk->cBpsFree = 0;
202 pBpChunk->idChunk = DBGF_BP_CHUNK_ID_INVALID; /* Not allocated. */
203 }
204
205 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
206 {
207 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
208
209 //pL2Chunk->pL2BaseR3 = NULL;
210 //pL2Chunk->pbmAlloc = NULL;
211 //pL2Chunk->cFree = 0;
212 pL2Chunk->idChunk = DBGF_BP_CHUNK_ID_INVALID; /* Not allocated. */
213 }
214
215 //pUVM->dbgf.s.paBpLocL1R3 = NULL;
216 pUVM->dbgf.s.hMtxBpL2Wr = NIL_RTSEMFASTMUTEX;
217 return RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxBpL2Wr);
218}
219
220
221/**
222 * Terminates the breakpoint mangement.
223 *
224 * @returns VBox status code.
225 * @param pUVM The user mode VM handle.
226 */
227DECLHIDDEN(int) dbgfR3BpTerm(PUVM pUVM)
228{
229 /* Free all allocated chunk bitmaps (the chunks itself are destroyed during ring-0 VM destruction). */
230 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
231 {
232 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
233
234 if (pBpChunk->idChunk != DBGF_BP_CHUNK_ID_INVALID)
235 {
236 AssertPtr(pBpChunk->pbmAlloc);
237 RTMemFree((void *)pBpChunk->pbmAlloc);
238 pBpChunk->pbmAlloc = NULL;
239 pBpChunk->idChunk = DBGF_BP_CHUNK_ID_INVALID;
240 }
241 }
242
243 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
244 {
245 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
246
247 if (pL2Chunk->idChunk != DBGF_BP_CHUNK_ID_INVALID)
248 {
249 AssertPtr(pL2Chunk->pbmAlloc);
250 RTMemFree((void *)pL2Chunk->pbmAlloc);
251 pL2Chunk->pbmAlloc = NULL;
252 pL2Chunk->idChunk = DBGF_BP_CHUNK_ID_INVALID;
253 }
254 }
255
256 if (pUVM->dbgf.s.hMtxBpL2Wr != NIL_RTSEMFASTMUTEX)
257 {
258 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxBpL2Wr);
259 pUVM->dbgf.s.hMtxBpL2Wr = NIL_RTSEMFASTMUTEX;
260 }
261
262 return VINF_SUCCESS;
263}
264
265
266/**
267 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
268 */
269static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
270{
271 RT_NOREF(pvUser);
272
273 VMCPU_ASSERT_EMT(pVCpu);
274 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
275
276 /*
277 * The initialization will be done on EMT(0). It is possible that multiple
278 * initialization attempts are done because dbgfR3BpEnsureInit() can be called
279 * from racing non EMT threads when trying to set a breakpoint for the first time.
280 * Just fake success if the L1 is already present which means that a previous rendezvous
281 * successfully initialized the breakpoint manager.
282 */
283 PUVM pUVM = pVM->pUVM;
284 if ( pVCpu->idCpu == 0
285 && !pUVM->dbgf.s.paBpLocL1R3)
286 {
287 DBGFBPINITREQ Req;
288 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
289 Req.Hdr.cbReq = sizeof(Req);
290 Req.paBpLocL1R3 = NULL;
291 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_INIT, 0 /*u64Arg*/, &Req.Hdr);
292 AssertLogRelMsgRCReturn(rc, ("VMMR0_DO_DBGF_BP_INIT failed: %Rrc\n", rc), rc);
293 pUVM->dbgf.s.paBpLocL1R3 = Req.paBpLocL1R3;
294 }
295
296 return VINF_SUCCESS;
297}
298
299
300/**
301 * Ensures that the breakpoint manager is fully initialized.
302 *
303 * @returns VBox status code.
304 * @param pUVM The user mode VM handle.
305 *
306 * @thread Any thread.
307 */
308static int dbgfR3BpEnsureInit(PUVM pUVM)
309{
310 /* If the L1 lookup table is allocated initialization succeeded before. */
311 if (RT_LIKELY(pUVM->dbgf.s.paBpLocL1R3))
312 return VINF_SUCCESS;
313
314 /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
315 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpInitEmtWorker, NULL /*pvUser*/);
316}
317
318
319/**
320 * Returns the internal breakpoint state for the given handle.
321 *
322 * @returns Pointer to the internal breakpoint state or NULL if the handle is invalid.
323 * @param pUVM The user mode VM handle.
324 * @param hBp The breakpoint handle to resolve.
325 */
326DECLINLINE(PDBGFBPINT) dbgfR3BpGetByHnd(PUVM pUVM, DBGFBP hBp)
327{
328 uint32_t idChunk = DBGF_BP_HND_GET_CHUNK_ID(hBp);
329 uint32_t idxEntry = DBGF_BP_HND_GET_ENTRY(hBp);
330
331 AssertReturn(idChunk < DBGF_BP_CHUNK_COUNT, NULL);
332 AssertReturn(idxEntry < DBGF_BP_COUNT_PER_CHUNK, NULL);
333
334 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
335 AssertReturn(pBpChunk->idChunk == idChunk, NULL);
336 AssertPtrReturn(pBpChunk->pbmAlloc, NULL);
337 AssertReturn(ASMBitTest(pBpChunk->pbmAlloc, idxEntry), NULL);
338
339 return &pBpChunk->pBpBaseR3[idxEntry];
340}
341
342
343/**
344 * Get a breakpoint give by address.
345 *
346 * @returns The breakpoint handle on success or NIL_DBGF if not found.
347 * @param pUVM The user mode VM handle.
348 * @param enmType The breakpoint type.
349 * @param GCPtr The breakpoint address.
350 * @param ppBp Where to store the pointer to the internal breakpoint state on success, optional.
351 */
352static DBGFBP dbgfR3BpGetByAddr(PUVM pUVM, DBGFBPTYPE enmType, RTGCUINTPTR GCPtr, PDBGFBPINT *ppBp)
353{
354 DBGFBP hBp = NIL_DBGFBP;
355
356 switch (enmType)
357 {
358 case DBGFBPTYPE_REG:
359 {
360 PVM pVM = pUVM->pVM;
361 VM_ASSERT_VALID_EXT_RETURN(pVM, NIL_DBGFBP);
362
363 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
364 {
365 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
366
367 AssertCompileSize(DBGFBP, sizeof(uint32_t));
368 DBGFBP hBpTmp = ASMAtomicReadU32(&pHwBp->hBp);
369 if ( pHwBp->GCPtr == GCPtr
370 && hBpTmp != NIL_DBGFBP)
371 {
372 hBp = hBpTmp;
373 break;
374 }
375 }
376
377 break;
378 }
379
380 case DBGFBPTYPE_INT3:
381 break;
382
383 default:
384 AssertMsgFailed(("enmType=%d\n", enmType));
385 break;
386 }
387
388 if ( hBp != NIL_DBGFBP
389 && ppBp)
390 *ppBp = dbgfR3BpGetByHnd(pUVM, hBp);
391 return hBp;
392}
393
394
395/**
396 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
397 */
398static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpChunkAllocEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
399{
400 uint32_t idChunk = (uint32_t)(uintptr_t)pvUser;
401
402 VMCPU_ASSERT_EMT(pVCpu);
403 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
404
405 AssertReturn(idChunk < DBGF_BP_CHUNK_COUNT, VERR_DBGF_BP_IPE_1);
406
407 PUVM pUVM = pVM->pUVM;
408 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
409
410 AssertReturn( pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID
411 || pBpChunk->idChunk == idChunk,
412 VERR_DBGF_BP_IPE_2);
413
414 /*
415 * The initialization will be done on EMT(0). It is possible that multiple
416 * allocation attempts are done when multiple racing non EMT threads try to
417 * allocate a breakpoint and a new chunk needs to be allocated.
418 * Ignore the request and succeed if the chunk is allocated meaning that a
419 * previous rendezvous successfully allocated the chunk.
420 */
421 int rc = VINF_SUCCESS;
422 if ( pVCpu->idCpu == 0
423 && pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID)
424 {
425 /* Allocate the bitmap first so we can skip calling into VMMR0 if it fails. */
426 AssertCompile(!(DBGF_BP_COUNT_PER_CHUNK % 8));
427 volatile void *pbmAlloc = RTMemAllocZ(DBGF_BP_COUNT_PER_CHUNK / 8);
428 if (RT_LIKELY(pbmAlloc))
429 {
430 DBGFBPCHUNKALLOCREQ Req;
431 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
432 Req.Hdr.cbReq = sizeof(Req);
433 Req.idChunk = idChunk;
434 Req.pChunkBaseR3 = NULL;
435 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_CHUNK_ALLOC, 0 /*u64Arg*/, &Req.Hdr);
436 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_CHUNK_ALLOC failed: %Rrc\n", rc));
437 if (RT_SUCCESS(rc))
438 {
439 pBpChunk->pBpBaseR3 = (PDBGFBPINT)Req.pChunkBaseR3;
440 pBpChunk->pbmAlloc = pbmAlloc;
441 pBpChunk->cBpsFree = DBGF_BP_COUNT_PER_CHUNK;
442 pBpChunk->idChunk = idChunk;
443 return VINF_SUCCESS;
444 }
445
446 RTMemFree((void *)pbmAlloc);
447 }
448 else
449 rc = VERR_NO_MEMORY;
450 }
451
452 return rc;
453}
454
455
456/**
457 * Tries to allocate the given chunk which requires an EMT rendezvous.
458 *
459 * @returns VBox status code.
460 * @param pUVM The user mode VM handle.
461 * @param idChunk The chunk to allocate.
462 *
463 * @thread Any thread.
464 */
465DECLINLINE(int) dbgfR3BpChunkAlloc(PUVM pUVM, uint32_t idChunk)
466{
467 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpChunkAllocEmtWorker, (void *)(uintptr_t)idChunk);
468}
469
470
471/**
472 * Tries to allocate a new breakpoint of the given type.
473 *
474 * @returns VBox status code.
475 * @param pUVM The user mode VM handle.
476 * @param hOwner The owner handle, NIL_DBGFBPOWNER if none assigned.
477 * @param pvUser Opaque user data passed in the owner callback.
478 * @param enmType Breakpoint type to allocate.
479 * @param iHitTrigger The hit count at which the breakpoint start triggering.
480 * Use 0 (or 1) if it's gonna trigger at once.
481 * @param iHitDisable The hit count which disables the breakpoint.
482 * Use ~(uint64_t) if it's never gonna be disabled.
483 * @param phBp Where to return the opaque breakpoint handle on success.
484 * @param ppBp Where to return the pointer to the internal breakpoint state on success.
485 *
486 * @thread Any thread.
487 */
488static int dbgfR3BpAlloc(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser, DBGFBPTYPE enmType,
489 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp,
490 PDBGFBPINT *ppBp)
491{
492 /*
493 * Search for a chunk having a free entry, allocating new chunks
494 * if the encountered ones are full.
495 *
496 * This can be called from multiple threads at the same time so special care
497 * has to be taken to not require any locking here.
498 */
499 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
500 {
501 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
502
503 uint32_t idChunk = ASMAtomicReadU32(&pBpChunk->idChunk);
504 if (idChunk == DBGF_BP_CHUNK_ID_INVALID)
505 {
506 int rc = dbgfR3BpChunkAlloc(pUVM, i);
507 if (RT_FAILURE(rc))
508 {
509 LogRel(("DBGF/Bp: Allocating new breakpoint table chunk failed with %Rrc\n", rc));
510 break;
511 }
512
513 idChunk = ASMAtomicReadU32(&pBpChunk->idChunk);
514 Assert(idChunk == i);
515 }
516
517 /** @todo Optimize with some hinting if this turns out to be too slow. */
518 for (;;)
519 {
520 uint32_t cBpsFree = ASMAtomicReadU32(&pBpChunk->cBpsFree);
521 if (cBpsFree)
522 {
523 /*
524 * Scan the associated bitmap for a free entry, if none can be found another thread
525 * raced us and we go to the next chunk.
526 */
527 int32_t iClr = ASMBitFirstClear(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK);
528 if (iClr != -1)
529 {
530 /*
531 * Try to allocate, we could get raced here as well. In that case
532 * we try again.
533 */
534 if (!ASMAtomicBitTestAndSet(pBpChunk->pbmAlloc, iClr))
535 {
536 /* Success, immediately mark as allocated, initialize the breakpoint state and return. */
537 ASMAtomicDecU32(&pBpChunk->cBpsFree);
538
539 PDBGFBPINT pBp = &pBpChunk->pBpBaseR3[iClr];
540 pBp->Pub.cHits = 0;
541 pBp->Pub.iHitTrigger = iHitTrigger;
542 pBp->Pub.iHitDisable = iHitDisable;
543 pBp->Pub.hOwner = hOwner;
544 pBp->Pub.fFlagsAndType = DBGF_BP_PUB_SET_FLAGS_AND_TYPE(enmType, DBGF_BP_F_DEFAULT);
545 pBp->pvUserR3 = pvUser;
546
547 /** @todo Owner handling (reference and call ring-0 if it has an ring-0 callback). */
548
549 *phBp = DBGF_BP_HND_CREATE(idChunk, iClr);
550 *ppBp = pBp;
551 return VINF_SUCCESS;
552 }
553 /* else Retry with another spot. */
554 }
555 else /* no free entry in bitmap, go to the next chunk */
556 break;
557 }
558 else /* !cBpsFree, go to the next chunk */
559 break;
560 }
561 }
562
563 return VERR_DBGF_NO_MORE_BP_SLOTS;
564}
565
566
567/**
568 * Frees the given breakpoint handle.
569 *
570 * @returns nothing.
571 * @param pUVM The user mode VM handle.
572 * @param hBp The breakpoint handle to free.
573 * @param pBp The internal breakpoint state pointer.
574 */
575static void dbgfR3BpFree(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
576{
577 uint32_t idChunk = DBGF_BP_HND_GET_CHUNK_ID(hBp);
578 uint32_t idxEntry = DBGF_BP_HND_GET_ENTRY(hBp);
579
580 AssertReturnVoid(idChunk < DBGF_BP_CHUNK_COUNT);
581 AssertReturnVoid(idxEntry < DBGF_BP_COUNT_PER_CHUNK);
582
583 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
584 AssertPtrReturnVoid(pBpChunk->pbmAlloc);
585 AssertReturnVoid(ASMBitTest(pBpChunk->pbmAlloc, idxEntry));
586
587 /** @todo Need a trip to Ring-0 if an owner is assigned with a Ring-0 part to clear the breakpoint. */
588 /** @todo Release owner. */
589 memset(pBp, 0, sizeof(*pBp));
590
591 ASMAtomicBitClear(pBpChunk->pbmAlloc, idxEntry);
592 ASMAtomicIncU32(&pBpChunk->cBpsFree);
593}
594
595
596/**
597 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
598 */
599static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpL2TblChunkAllocEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
600{
601 uint32_t idChunk = (uint32_t)(uintptr_t)pvUser;
602
603 VMCPU_ASSERT_EMT(pVCpu);
604 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
605
606 AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, VERR_DBGF_BP_IPE_1);
607
608 PUVM pUVM = pVM->pUVM;
609 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
610
611 AssertReturn( pL2Chunk->idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID
612 || pL2Chunk->idChunk == idChunk,
613 VERR_DBGF_BP_IPE_2);
614
615 /*
616 * The initialization will be done on EMT(0). It is possible that multiple
617 * allocation attempts are done when multiple racing non EMT threads try to
618 * allocate a breakpoint and a new chunk needs to be allocated.
619 * Ignore the request and succeed if the chunk is allocated meaning that a
620 * previous rendezvous successfully allocated the chunk.
621 */
622 int rc = VINF_SUCCESS;
623 if ( pVCpu->idCpu == 0
624 && pL2Chunk->idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID)
625 {
626 /* Allocate the bitmap first so we can skip calling into VMMR0 if it fails. */
627 AssertCompile(!(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK % 8));
628 volatile void *pbmAlloc = RTMemAllocZ(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK / 8);
629 if (RT_LIKELY(pbmAlloc))
630 {
631 DBGFBPL2TBLCHUNKALLOCREQ Req;
632 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
633 Req.Hdr.cbReq = sizeof(Req);
634 Req.idChunk = idChunk;
635 Req.pChunkBaseR3 = NULL;
636 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC, 0 /*u64Arg*/, &Req.Hdr);
637 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC failed: %Rrc\n", rc));
638 if (RT_SUCCESS(rc))
639 {
640 pL2Chunk->pL2BaseR3 = (PDBGFBPL2ENTRY)Req.pChunkBaseR3;
641 pL2Chunk->pbmAlloc = pbmAlloc;
642 pL2Chunk->cFree = DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK;
643 pL2Chunk->idChunk = idChunk;
644 return VINF_SUCCESS;
645 }
646
647 RTMemFree((void *)pbmAlloc);
648 }
649 else
650 rc = VERR_NO_MEMORY;
651 }
652
653 return rc;
654}
655
656
657/**
658 * Tries to allocate the given L2 table chunk which requires an EMT rendezvous.
659 *
660 * @returns VBox status code.
661 * @param pUVM The user mode VM handle.
662 * @param idChunk The chunk to allocate.
663 *
664 * @thread Any thread.
665 */
666DECLINLINE(int) dbgfR3BpL2TblChunkAlloc(PUVM pUVM, uint32_t idChunk)
667{
668 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpL2TblChunkAllocEmtWorker, (void *)(uintptr_t)idChunk);
669}
670
671
672/**
673 * Tries to allocate a new breakpoint of the given type.
674 *
675 * @returns VBox status code.
676 * @param pUVM The user mode VM handle.
677 * @param pidxL2Tbl Where to return the L2 table entry index on success.
678 * @param ppL2TblEntry Where to return the pointer to the L2 table entry on success.
679 *
680 * @thread Any thread.
681 */
682static int dbgfR3BpL2TblEntryAlloc(PUVM pUVM, uint32_t *pidxL2Tbl, PDBGFBPL2ENTRY *ppL2TblEntry)
683{
684 /*
685 * Search for a chunk having a free entry, allocating new chunks
686 * if the encountered ones are full.
687 *
688 * This can be called from multiple threads at the same time so special care
689 * has to be taken to not require any locking here.
690 */
691 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
692 {
693 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
694
695 uint32_t idChunk = ASMAtomicReadU32(&pL2Chunk->idChunk);
696 if (idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID)
697 {
698 int rc = dbgfR3BpL2TblChunkAlloc(pUVM, i);
699 if (RT_FAILURE(rc))
700 {
701 LogRel(("DBGF/Bp: Allocating new breakpoint L2 lookup table chunk failed with %Rrc\n", rc));
702 break;
703 }
704
705 idChunk = ASMAtomicReadU32(&pL2Chunk->idChunk);
706 Assert(idChunk == i);
707 }
708
709 /** @todo Optimize with some hinting if this turns out to be too slow. */
710 for (;;)
711 {
712 uint32_t cFree = ASMAtomicReadU32(&pL2Chunk->cFree);
713 if (cFree)
714 {
715 /*
716 * Scan the associated bitmap for a free entry, if none can be found another thread
717 * raced us and we go to the next chunk.
718 */
719 int32_t iClr = ASMBitFirstClear(pL2Chunk->pbmAlloc, DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK);
720 if (iClr != -1)
721 {
722 /*
723 * Try to allocate, we could get raced here as well. In that case
724 * we try again.
725 */
726 if (!ASMAtomicBitTestAndSet(pL2Chunk->pbmAlloc, iClr))
727 {
728 /* Success, immediately mark as allocated, initialize the breakpoint state and return. */
729 ASMAtomicDecU32(&pL2Chunk->cFree);
730
731 PDBGFBPL2ENTRY pL2Entry = &pL2Chunk->pL2BaseR3[iClr];
732
733 *pidxL2Tbl = DBGF_BP_L2_IDX_CREATE(idChunk, iClr);
734 *ppL2TblEntry = pL2Entry;
735 return VINF_SUCCESS;
736 }
737 /* else Retry with another spot. */
738 }
739 else /* no free entry in bitmap, go to the next chunk */
740 break;
741 }
742 else /* !cFree, go to the next chunk */
743 break;
744 }
745 }
746
747 return VERR_DBGF_NO_MORE_BP_SLOTS;
748}
749
750
751/**
752 * Frees the given breakpoint handle.
753 *
754 * @returns nothing.
755 * @param pUVM The user mode VM handle.
756 * @param idxL2Tbl The L2 table index to free.
757 * @param pL2TblEntry The L2 table entry pointer to free.
758 */
759static void dbgfR3BpL2TblEntryFree(PUVM pUVM, uint32_t idxL2Tbl, PDBGFBPL2ENTRY pL2TblEntry)
760{
761 uint32_t idChunk = DBGF_BP_L2_IDX_GET_CHUNK_ID(idxL2Tbl);
762 uint32_t idxEntry = DBGF_BP_L2_IDX_GET_ENTRY(idxL2Tbl);
763
764 AssertReturnVoid(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT);
765 AssertReturnVoid(idxEntry < DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK);
766
767 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
768 AssertPtrReturnVoid(pL2Chunk->pbmAlloc);
769 AssertReturnVoid(ASMBitTest(pL2Chunk->pbmAlloc, idxEntry));
770
771 memset(pL2TblEntry, 0, sizeof(*pL2TblEntry));
772
773 ASMAtomicBitClear(pL2Chunk->pbmAlloc, idxEntry);
774 ASMAtomicIncU32(&pL2Chunk->cFree);
775}
776
777
778/**
779 * Sets the enabled flag of the given breakpoint to the given value.
780 *
781 * @returns nothing.
782 * @param pBp The breakpoint to set the state.
783 * @param fEnabled Enabled status.
784 */
785DECLINLINE(void) dbgfR3BpSetEnabled(PDBGFBPINT pBp, bool fEnabled)
786{
787 DBGFBPTYPE enmType = DBGF_BP_PUB_GET_TYPE(pBp->Pub.fFlagsAndType);
788 if (fEnabled)
789 pBp->Pub.fFlagsAndType = DBGF_BP_PUB_SET_FLAGS_AND_TYPE(enmType, DBGF_BP_F_ENABLED);
790 else
791 pBp->Pub.fFlagsAndType = DBGF_BP_PUB_SET_FLAGS_AND_TYPE(enmType, 0 /*fFlags*/);
792}
793
794
795/**
796 * Assigns a hardware breakpoint state to the given register breakpoint.
797 *
798 * @returns VBox status code.
799 * @param pVM The cross-context VM structure pointer.
800 * @param hBp The breakpoint handle to assign.
801 * @param pBp The internal breakpoint state.
802 *
803 * @thread Any thread.
804 */
805static int dbgfR3BpRegAssign(PVM pVM, DBGFBP hBp, PDBGFBPINT pBp)
806{
807 AssertReturn(pBp->Pub.u.Reg.iReg == UINT8_MAX, VERR_DBGF_BP_IPE_3);
808
809 for (uint8_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
810 {
811 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
812
813 AssertCompileSize(DBGFBP, sizeof(uint32_t));
814 if (ASMAtomicCmpXchgU32(&pHwBp->hBp, hBp, NIL_DBGFBP))
815 {
816 pHwBp->GCPtr = pBp->Pub.u.Reg.GCPtr;
817 pHwBp->fType = pBp->Pub.u.Reg.fType;
818 pHwBp->cb = pBp->Pub.u.Reg.cb;
819 pHwBp->fEnabled = DBGF_BP_PUB_IS_ENABLED(pBp->Pub.fFlagsAndType);
820
821 pBp->Pub.u.Reg.iReg = i;
822 return VINF_SUCCESS;
823 }
824 }
825
826 return VERR_DBGF_NO_MORE_BP_SLOTS;
827}
828
829
830/**
831 * Removes the assigned hardware breakpoint state from the given register breakpoint.
832 *
833 * @returns VBox status code.
834 * @param pVM The cross-context VM structure pointer.
835 * @param hBp The breakpoint handle to remove.
836 * @param pBp The internal breakpoint state.
837 *
838 * @thread Any thread.
839 */
840static int dbgfR3BpRegRemove(PVM pVM, DBGFBP hBp, PDBGFBPINT pBp)
841{
842 AssertReturn(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints), VERR_DBGF_BP_IPE_3);
843
844 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
845 AssertReturn(pHwBp->hBp == hBp, VERR_DBGF_BP_IPE_4);
846 AssertReturn(!pHwBp->fEnabled, VERR_DBGF_BP_IPE_5);
847
848 pHwBp->GCPtr = 0;
849 pHwBp->fType = 0;
850 pHwBp->cb = 0;
851 ASMCompilerBarrier();
852
853 ASMAtomicWriteU32(&pHwBp->hBp, NIL_DBGFBP);
854 return VINF_SUCCESS;
855}
856
857
858/**
859 * Returns the pointer to the L2 table entry from the given index.
860 *
861 * @returns Current context pointer to the L2 table entry or NULL if the provided index value is invalid.
862 * @param pUVM The user mode VM handle.
863 * @param idxL2 The L2 table index to resolve.
864 *
865 * @note The content of the resolved L2 table entry is not validated!.
866 */
867DECLINLINE(PDBGFBPL2ENTRY) dbgfR3BpL2GetByIdx(PUVM pUVM, uint32_t idxL2)
868{
869 uint32_t idChunk = DBGF_BP_L2_IDX_GET_CHUNK_ID(idxL2);
870 uint32_t idxEntry = DBGF_BP_L2_IDX_GET_ENTRY(idxL2);
871
872 AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, NULL);
873 AssertReturn(idxEntry < DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK, NULL);
874
875 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
876 AssertPtrReturn(pL2Chunk->pbmAlloc, NULL);
877 AssertReturn(ASMBitTest(pL2Chunk->pbmAlloc, idxEntry), NULL);
878
879 return &pL2Chunk->CTX_SUFF(pL2Base)[idxEntry];
880}
881
882
883/**
884 * Creates a binary search tree with the given root and leaf nodes.
885 *
886 * @returns VBox status code.
887 * @param pUVM The user mode VM handle.
888 * @param idxL1 The index into the L1 table where the created tree should be linked into.
889 * @param u32EntryOld The old entry in the L1 table used to compare with in the atomic update.
890 * @param hBpRoot The root node DBGF handle to assign.
891 * @param GCPtrRoot The root nodes GC pointer to use as a key.
892 * @param hBpLeaf The leafs node DBGF handle to assign.
893 * @param GCPtrLeaf The leafs node GC pointer to use as a key.
894 */
895static int dbgfR3BpInt3L2BstCreate(PUVM pUVM, uint32_t idxL1, uint32_t u32EntryOld,
896 DBGFBP hBpRoot, RTGCUINTPTR GCPtrRoot,
897 DBGFBP hBpLeaf, RTGCUINTPTR GCPtrLeaf)
898{
899 AssertReturn(GCPtrRoot != GCPtrLeaf, VERR_DBGF_BP_IPE_9);
900 Assert(DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtrRoot) == DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtrLeaf));
901
902 /* Allocate two nodes. */
903 uint32_t idxL2Root = 0;
904 PDBGFBPL2ENTRY pL2Root = NULL;
905 int rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Root, &pL2Root);
906 if (RT_SUCCESS(rc))
907 {
908 uint32_t idxL2Leaf = 0;
909 PDBGFBPL2ENTRY pL2Leaf = NULL;
910 rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Leaf, &pL2Leaf);
911 if (RT_SUCCESS(rc))
912 {
913 dbgfBpL2TblEntryInit(pL2Leaf, hBpLeaf, GCPtrLeaf, DBGF_BP_L2_ENTRY_IDX_END, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
914 if (GCPtrLeaf < GCPtrRoot)
915 dbgfBpL2TblEntryInit(pL2Root, hBpRoot, GCPtrRoot, idxL2Leaf, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
916 else
917 dbgfBpL2TblEntryInit(pL2Root, hBpRoot, GCPtrRoot, DBGF_BP_L2_ENTRY_IDX_END, idxL2Leaf, 0 /*iDepth*/);
918
919 uint32_t const u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_L2_IDX(idxL2Root);
920 if (ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], u32Entry, u32EntryOld))
921 return VINF_SUCCESS;
922
923 /* The L1 entry has changed due to another thread racing us during insertion, free nodes and try again. */
924 rc = VINF_TRY_AGAIN;
925 dbgfR3BpL2TblEntryFree(pUVM, idxL2Leaf, pL2Leaf);
926 }
927
928 dbgfR3BpL2TblEntryFree(pUVM, idxL2Root, pL2Root);
929 }
930
931 return rc;
932}
933
934
935/**
936 * Inserts the given breakpoint handle into an existing binary search tree.
937 *
938 * @returns VBox status code.
939 * @param pUVM The user mode VM handle.
940 * @param idxL2Root The index of the tree root in the L2 table.
941 * @param hBp The node DBGF handle to insert.
942 * @param GCPtr The nodes GC pointer to use as a key.
943 */
944static int dbgfR3BpInt2L2BstNodeInsert(PUVM pUVM, uint32_t idxL2Root, DBGFBP hBp, RTGCUINTPTR GCPtr)
945{
946 /* Allocate a new node first. */
947 uint32_t idxL2Nd = 0;
948 PDBGFBPL2ENTRY pL2Nd = NULL;
949 int rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Nd, &pL2Nd);
950 if (RT_SUCCESS(rc))
951 {
952 /* Walk the tree and find the correct node to insert to. */
953 PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Root);
954 while (RT_LIKELY(pL2Entry))
955 {
956 /* Make a copy of the entry. */
957 DBGFBPL2ENTRY L2Entry;
958 L2Entry.u64GCPtrKeyAndBpHnd1 = ASMAtomicReadU64((volatile uint64_t *)&pL2Entry->u64GCPtrKeyAndBpHnd1);
959 L2Entry.u64LeftRightIdxDepthBpHnd2 = ASMAtomicReadU64((volatile uint64_t *)&pL2Entry->u64LeftRightIdxDepthBpHnd2);
960
961 RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(L2Entry.u64GCPtrKeyAndBpHnd1);
962 AssertBreak(GCPtr != GCPtrL2Entry);
963
964 /* Not found, get to the next level. */
965 uint32_t idxL2Next = (GCPtr < GCPtrL2Entry)
966 ? DBGF_BP_L2_ENTRY_GET_IDX_LEFT(L2Entry.u64LeftRightIdxDepthBpHnd2)
967 : DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(L2Entry.u64LeftRightIdxDepthBpHnd2);
968 if (idxL2Next == DBGF_BP_L2_ENTRY_IDX_END)
969 {
970 /* Insert the new node here. */
971 dbgfBpL2TblEntryInit(pL2Nd, hBp, GCPtr, DBGF_BP_L2_ENTRY_IDX_END, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
972 if (GCPtr < GCPtrL2Entry)
973 dbgfBpL2TblEntryUpdateLeft(pL2Entry, idxL2Next, 0 /*iDepth*/);
974 else
975 dbgfBpL2TblEntryUpdateRight(pL2Entry, idxL2Next, 0 /*iDepth*/);
976 return VINF_SUCCESS;
977 }
978
979 pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Next);
980 }
981
982 rc = VERR_DBGF_BP_L2_LOOKUP_FAILED;
983 dbgfR3BpL2TblEntryFree(pUVM, idxL2Nd, pL2Nd);
984 }
985
986 return rc;
987}
988
989
990/**
991 * Adds the given breakpoint handle keyed with the GC pointer to the proper L2 binary search tree
992 * possibly creating a new tree.
993 *
994 * @returns VBox status code.
995 * @param pUVM The user mode VM handle.
996 * @param idxL1 The index into the L1 table the breakpoint uses.
997 * @param hBp The breakpoint handle which is to be added.
998 * @param GCPtr The GC pointer the breakpoint is keyed with.
999 */
1000static int dbgfR3BpInt3L2BstNodeAdd(PUVM pUVM, uint32_t idxL1, DBGFBP hBp, RTGCUINTPTR GCPtr)
1001{
1002 int rc = RTSemFastMutexRequest(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc);
1003
1004 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]); /* Re-read, could get raced by a remove operation. */
1005 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
1006 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
1007 {
1008 /* Create a new search tree, gather the necessary information first. */
1009 DBGFBP hBp2 = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32Entry);
1010 PDBGFBPINT pBp2 = dbgfR3BpGetByHnd(pUVM, hBp2);
1011 AssertStmt(VALID_PTR(pBp2), rc = VERR_DBGF_BP_IPE_7);
1012 if (RT_SUCCESS(rc))
1013 rc = dbgfR3BpInt3L2BstCreate(pUVM, idxL1, u32Entry, hBp, GCPtr, hBp2, pBp2->Pub.u.Int3.GCPtr);
1014 }
1015 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
1016 rc = dbgfR3BpInt2L2BstNodeInsert(pUVM, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry), hBp, GCPtr);
1017
1018 int rc2 = RTSemFastMutexRelease(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc2);
1019 return rc;
1020}
1021
1022
1023/**
1024 * Removes the given breakpoint handle keyed with the GC pointer from the L2 binary search tree
1025 * pointed to by the given L2 root index.
1026 *
1027 * @returns VBox status code.
1028 * @param pUVM The user mode VM handle.
1029 * @param idxL1 The index into the L1 table pointing to the binary search tree.
1030 * @param idxL2Root The L2 table index where the tree root is located.
1031 * @param hBp The breakpoint handle which is to be removed.
1032 * @param GCPtr The GC pointer the breakpoint is keyed with.
1033 */
1034static int dbgfR3BpInt2L2BstNodeRemove(PUVM pUVM, uint32_t idxL1, uint32_t idxL2Root, DBGFBP hBp, RTGCUINTPTR GCPtr)
1035{
1036 int rc = RTSemFastMutexRequest(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc);
1037
1038 RT_NOREF(idxL1, idxL2Root, hBp, GCPtr);
1039
1040 int rc2 = RTSemFastMutexRelease(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc2);
1041
1042 return rc;
1043}
1044
1045
1046/**
1047 * Adds the given int3 breakpoint to the appropriate lookup tables.
1048 *
1049 * @returns VBox status code.
1050 * @param pUVM The user mode VM handle.
1051 * @param hBp The breakpoint handle to add.
1052 * @param pBp The internal breakpoint state.
1053 */
1054static int dbgfR3BpInt3Add(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1055{
1056 AssertReturn(DBGF_BP_PUB_GET_TYPE(pBp->Pub.fFlagsAndType) == DBGFBPTYPE_INT3, VERR_DBGF_BP_IPE_3);
1057
1058 int rc = VINF_SUCCESS;
1059 uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(pBp->Pub.u.Int3.GCPtr);
1060 uint8_t cTries = 16;
1061
1062 while (cTries--)
1063 {
1064 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1065
1066 if (u32Entry == DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
1067 {
1068 /*
1069 * No breakpoint assigned so far for this entry, create an entry containing
1070 * the direct breakpoint handle and try to exchange it atomically.
1071 */
1072 u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp);
1073 if (ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], u32Entry, DBGF_BP_INT3_L1_ENTRY_TYPE_NULL))
1074 break;
1075 }
1076 else
1077 {
1078 rc = dbgfR3BpInt3L2BstNodeAdd(pUVM, idxL1, hBp, pBp->Pub.u.Int3.GCPtr);
1079 if (rc == VINF_TRY_AGAIN)
1080 continue;
1081
1082 break;
1083 }
1084 }
1085
1086 if ( RT_SUCCESS(rc)
1087 && !cTries) /* Too much contention, abort with an error. */
1088 rc = VERR_DBGF_BP_INT3_ADD_TRIES_REACHED;
1089
1090 return rc;
1091}
1092
1093
1094/**
1095 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
1096 */
1097static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpInt3RemoveEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
1098{
1099 DBGFBP hBp = (DBGFBP)(uintptr_t)pvUser;
1100
1101 VMCPU_ASSERT_EMT(pVCpu);
1102 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1103
1104 PUVM pUVM = pVM->pUVM;
1105 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
1106 AssertPtrReturn(pBp, VERR_DBGF_BP_IPE_8);
1107
1108 int rc = VINF_SUCCESS;
1109 if (pVCpu->idCpu == 0)
1110 {
1111 uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(pBp->Pub.u.Int3.GCPtr);
1112 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1113 AssertReturn(u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, VERR_DBGF_BP_IPE_6);
1114
1115 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
1116 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
1117 {
1118 /* Single breakpoint, just exchange atomically with the null value. */
1119 if (!ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry))
1120 {
1121 /*
1122 * A breakpoint addition must have raced us converting the L1 entry to an L2 index type, re-read
1123 * and remove the node from the created binary search tree.
1124 *
1125 * This works because after the entry was converted to an L2 index it can only be converted back
1126 * to a direct handle by removing one or more nodes which always goes through the fast mutex
1127 * protecting the L2 table. Likewise adding a new breakpoint requires grabbing the mutex as well
1128 * so there is serialization here and the node can be removed safely without having to worry about
1129 * concurrent tree modifications.
1130 */
1131 u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1132 AssertReturn(DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry) == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX, VERR_DBGF_BP_IPE_9);
1133
1134 rc = dbgfR3BpInt2L2BstNodeRemove(pUVM, idxL1, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry),
1135 hBp, pBp->Pub.u.Int3.GCPtr);
1136 }
1137 }
1138 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
1139 rc = dbgfR3BpInt2L2BstNodeRemove(pUVM, idxL1, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry),
1140 hBp, pBp->Pub.u.Int3.GCPtr);
1141 }
1142
1143 return rc;
1144}
1145
1146
1147/**
1148 * Removes the given int3 breakpoint from all lookup tables.
1149 *
1150 * @returns VBox status code.
1151 * @param pUVM The user mode VM handle.
1152 * @param hBp The breakpoint handle to remove.
1153 * @param pBp The internal breakpoint state.
1154 */
1155static int dbgfR3BpInt3Remove(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1156{
1157 AssertReturn(DBGF_BP_PUB_GET_TYPE(pBp->Pub.fFlagsAndType) == DBGFBPTYPE_INT3, VERR_DBGF_BP_IPE_3);
1158
1159 /*
1160 * This has to be done by an EMT rendezvous in order to not have an EMT traversing
1161 * any L2 trees while it is being removed.
1162 */
1163 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpInt3RemoveEmtWorker, (void *)(uintptr_t)hBp);
1164}
1165
1166
1167/**
1168 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
1169 */
1170static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpRegRecalcOnCpu(PVM pVM, PVMCPU pVCpu, void *pvUser)
1171{
1172 RT_NOREF(pvUser);
1173
1174 /*
1175 * CPU 0 updates the enabled hardware breakpoint counts.
1176 */
1177 if (pVCpu->idCpu == 0)
1178 {
1179 pVM->dbgf.s.cEnabledHwBreakpoints = 0;
1180 pVM->dbgf.s.cEnabledHwIoBreakpoints = 0;
1181
1182 for (uint32_t iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++)
1183 {
1184 if (pVM->dbgf.s.aHwBreakpoints[iBp].fEnabled)
1185 {
1186 pVM->dbgf.s.cEnabledHwBreakpoints += 1;
1187 pVM->dbgf.s.cEnabledHwIoBreakpoints += pVM->dbgf.s.aHwBreakpoints[iBp].fType == X86_DR7_RW_IO;
1188 }
1189 }
1190 }
1191
1192 return CPUMRecalcHyperDRx(pVCpu, UINT8_MAX, false);
1193}
1194
1195
1196/**
1197 * Arms the given breakpoint.
1198 *
1199 * @returns VBox status code.
1200 * @param pUVM The user mode VM handle.
1201 * @param hBp The breakpoint handle to arm.
1202 * @param pBp The internal breakpoint state pointer for the handle.
1203 *
1204 * @thread Any thread.
1205 */
1206static int dbgfR3BpArm(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1207{
1208 int rc = VINF_SUCCESS;
1209 PVM pVM = pUVM->pVM;
1210
1211 Assert(!DBGF_BP_PUB_IS_ENABLED(pBp->Pub.fFlagsAndType));
1212 switch (DBGF_BP_PUB_GET_TYPE(pBp->Pub.fFlagsAndType))
1213 {
1214 case DBGFBPTYPE_REG:
1215 {
1216 Assert(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints));
1217 PDBGFBPHW pBpHw = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
1218 Assert(pBpHw->hBp == hBp); RT_NOREF(hBp);
1219
1220 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1221 ASMAtomicWriteBool(&pBpHw->fEnabled, true);
1222 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpRegRecalcOnCpu, NULL);
1223 if (RT_FAILURE(rc))
1224 {
1225 ASMAtomicWriteBool(&pBpHw->fEnabled, false);
1226 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1227 }
1228 break;
1229 }
1230 case DBGFBPTYPE_INT3:
1231 {
1232 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1233
1234 /** @todo When we enable the first int3 breakpoint we should do this in an EMT rendezvous
1235 * as the VMX code intercepts #BP only when at least one int3 breakpoint is enabled.
1236 * A racing vCPU might trigger it and forward it to the guest causing panics/crashes/havoc. */
1237 /*
1238 * Save current byte and write the int3 instruction byte.
1239 */
1240 rc = PGMPhysSimpleReadGCPhys(pVM, &pBp->Pub.u.Int3.bOrg, pBp->Pub.u.Int3.PhysAddr, sizeof(pBp->Pub.u.Int3.bOrg));
1241 if (RT_SUCCESS(rc))
1242 {
1243 static const uint8_t s_bInt3 = 0xcc;
1244 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Int3.PhysAddr, &s_bInt3, sizeof(s_bInt3));
1245 if (RT_SUCCESS(rc))
1246 {
1247 ASMAtomicIncU32(&pVM->dbgf.s.cEnabledInt3Breakpoints);
1248 Log(("DBGF: Set breakpoint at %RGv (Phys %RGp)\n", pBp->Pub.u.Int3.GCPtr, pBp->Pub.u.Int3.PhysAddr));
1249 }
1250 }
1251
1252 if (RT_FAILURE(rc))
1253 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1254
1255 break;
1256 }
1257 case DBGFBPTYPE_PORT_IO:
1258 case DBGFBPTYPE_MMIO:
1259 rc = VERR_NOT_IMPLEMENTED;
1260 break;
1261 default:
1262 AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(pBp->Pub.fFlagsAndType)),
1263 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1264 }
1265
1266 return rc;
1267}
1268
1269
1270/**
1271 * Disarms the given breakpoint.
1272 *
1273 * @returns VBox status code.
1274 * @param pUVM The user mode VM handle.
1275 * @param hBp The breakpoint handle to disarm.
1276 * @param pBp The internal breakpoint state pointer for the handle.
1277 *
1278 * @thread Any thread.
1279 */
1280static int dbgfR3BpDisarm(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1281{
1282 int rc = VINF_SUCCESS;
1283 PVM pVM = pUVM->pVM;
1284
1285 Assert(DBGF_BP_PUB_IS_ENABLED(pBp->Pub.fFlagsAndType));
1286 switch (DBGF_BP_PUB_GET_TYPE(pBp->Pub.fFlagsAndType))
1287 {
1288 case DBGFBPTYPE_REG:
1289 {
1290 Assert(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints));
1291 PDBGFBPHW pBpHw = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
1292 Assert(pBpHw->hBp == hBp); RT_NOREF(hBp);
1293
1294 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1295 ASMAtomicWriteBool(&pBpHw->fEnabled, false);
1296 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpRegRecalcOnCpu, NULL);
1297 if (RT_FAILURE(rc))
1298 {
1299 ASMAtomicWriteBool(&pBpHw->fEnabled, true);
1300 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1301 }
1302 break;
1303 }
1304 case DBGFBPTYPE_INT3:
1305 {
1306 /*
1307 * Check that the current byte is the int3 instruction, and restore the original one.
1308 * We currently ignore invalid bytes.
1309 */
1310 uint8_t bCurrent = 0;
1311 rc = PGMPhysSimpleReadGCPhys(pVM, &bCurrent, pBp->Pub.u.Int3.PhysAddr, sizeof(bCurrent));
1312 if ( RT_SUCCESS(rc)
1313 && bCurrent == 0xcc)
1314 {
1315 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Int3.PhysAddr, &pBp->Pub.u.Int3.bOrg, sizeof(pBp->Pub.u.Int3.bOrg));
1316 if (RT_SUCCESS(rc))
1317 {
1318 ASMAtomicDecU32(&pVM->dbgf.s.cEnabledInt3Breakpoints);
1319 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1320 Log(("DBGF: Removed breakpoint at %RGv (Phys %RGp)\n", pBp->Pub.u.Int3.GCPtr, pBp->Pub.u.Int3.PhysAddr));
1321 }
1322 }
1323 break;
1324 }
1325 case DBGFBPTYPE_PORT_IO:
1326 case DBGFBPTYPE_MMIO:
1327 rc = VERR_NOT_IMPLEMENTED;
1328 break;
1329 default:
1330 AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(pBp->Pub.fFlagsAndType)),
1331 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1332 }
1333
1334 return rc;
1335}
1336
1337
1338/**
1339 * Creates a new breakpoint owner returning a handle which can be used when setting breakpoints.
1340 *
1341 * @returns VBox status code.
1342 * @param pUVM The user mode VM handle.
1343 * @param pfnBpHit The R3 callback which is called when a breakpoint with the owner handle is hit.
1344 * @param phBpOwner Where to store the owner handle on success.
1345 */
1346VMMR3DECL(int) DBGFR3BpOwnerCreate(PUVM pUVM, PFNDBGFBPHIT pfnBpHit, PDBGFBPOWNER phBpOwner)
1347{
1348 /*
1349 * Validate the input.
1350 */
1351 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1352 AssertPtrReturn(pfnBpHit, VERR_INVALID_PARAMETER);
1353 AssertPtrReturn(phBpOwner, VERR_INVALID_POINTER);
1354
1355 return VERR_NOT_IMPLEMENTED;
1356}
1357
1358
1359/**
1360 * Destroys the owner identified by the given handle.
1361 *
1362 * @returns VBox status code.
1363 * @retval VERR_DBGF_OWNER_BUSY if there are still breakpoints set with the given owner handle.
1364 * @param pUVM The user mode VM handle.
1365 * @param hBpOwner The breakpoint owner handle to destroy.
1366 */
1367VMMR3DECL(int) DBGFR3BpOwnerDestroy(PUVM pUVM, DBGFBPOWNER hBpOwner)
1368{
1369 /*
1370 * Validate the input.
1371 */
1372 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1373 AssertReturn(hBpOwner != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
1374
1375 return VERR_NOT_IMPLEMENTED;
1376}
1377
1378
1379/**
1380 * Sets a breakpoint (int 3 based).
1381 *
1382 * @returns VBox status code.
1383 * @param pUVM The user mode VM handle.
1384 * @param idSrcCpu The ID of the virtual CPU used for the
1385 * breakpoint address resolution.
1386 * @param pAddress The address of the breakpoint.
1387 * @param iHitTrigger The hit count at which the breakpoint start triggering.
1388 * Use 0 (or 1) if it's gonna trigger at once.
1389 * @param iHitDisable The hit count which disables the breakpoint.
1390 * Use ~(uint64_t) if it's never gonna be disabled.
1391 * @param phBp Where to store the breakpoint handle on success.
1392 *
1393 * @thread Any thread.
1394 */
1395VMMR3DECL(int) DBGFR3BpSetInt3(PUVM pUVM, VMCPUID idSrcCpu, PCDBGFADDRESS pAddress,
1396 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
1397{
1398 return DBGFR3BpSetInt3Ex(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, idSrcCpu, pAddress,
1399 iHitTrigger, iHitDisable, phBp);
1400}
1401
1402
1403/**
1404 * Sets a breakpoint (int 3 based) - extended version.
1405 *
1406 * @returns VBox status code.
1407 * @param pUVM The user mode VM handle.
1408 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
1409 * @param pvUser Opaque user data to pass in the owner callback.
1410 * @param idSrcCpu The ID of the virtual CPU used for the
1411 * breakpoint address resolution.
1412 * @param pAddress The address of the breakpoint.
1413 * @param iHitTrigger The hit count at which the breakpoint start triggering.
1414 * Use 0 (or 1) if it's gonna trigger at once.
1415 * @param iHitDisable The hit count which disables the breakpoint.
1416 * Use ~(uint64_t) if it's never gonna be disabled.
1417 * @param phBp Where to store the breakpoint handle on success.
1418 *
1419 * @thread Any thread.
1420 */
1421VMMR3DECL(int) DBGFR3BpSetInt3Ex(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
1422 VMCPUID idSrcCpu, PCDBGFADDRESS pAddress,
1423 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
1424{
1425 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1426 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
1427 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
1428 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
1429 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
1430
1431 int rc = dbgfR3BpEnsureInit(pUVM);
1432 AssertRCReturn(rc, rc);
1433
1434 DBGFBP hBp = NIL_DBGFBP;
1435 PDBGFBPINT pBp = NULL;
1436 rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_INT3, iHitTrigger, iHitDisable, &hBp, &pBp);
1437 if (RT_SUCCESS(rc))
1438 {
1439 /*
1440 * Translate & save the breakpoint address into a guest-physical address.
1441 */
1442 rc = DBGFR3AddrToPhys(pUVM, idSrcCpu, pAddress, &pBp->Pub.u.Int3.PhysAddr);
1443 if (RT_SUCCESS(rc))
1444 {
1445 /*
1446 * The physical address from DBGFR3AddrToPhys() is the start of the page,
1447 * we need the exact byte offset into the page while writing to it in dbgfR3BpInt3Arm().
1448 */
1449 pBp->Pub.u.Int3.PhysAddr |= (pAddress->FlatPtr & X86_PAGE_OFFSET_MASK);
1450 pBp->Pub.u.Int3.GCPtr = pAddress->FlatPtr;
1451
1452 /* Add the breakpoint to the lookup tables. */
1453 rc = dbgfR3BpInt3Add(pUVM, hBp, pBp);
1454 if (RT_SUCCESS(rc))
1455 {
1456 /* Enable the breakpoint. */
1457 rc = dbgfR3BpArm(pUVM, hBp, pBp);
1458 if (RT_SUCCESS(rc))
1459 {
1460 *phBp = hBp;
1461 return VINF_SUCCESS;
1462 }
1463
1464 int rc2 = dbgfR3BpInt3Remove(pUVM, hBp, pBp); AssertRC(rc2);
1465 }
1466 }
1467
1468 dbgfR3BpFree(pUVM, hBp, pBp);
1469 }
1470
1471 return rc;
1472}
1473
1474
1475/**
1476 * Sets a register breakpoint.
1477 *
1478 * @returns VBox status code.
1479 * @param pUVM The user mode VM handle.
1480 * @param pAddress The address of the breakpoint.
1481 * @param iHitTrigger The hit count at which the breakpoint start triggering.
1482 * Use 0 (or 1) if it's gonna trigger at once.
1483 * @param iHitDisable The hit count which disables the breakpoint.
1484 * Use ~(uint64_t) if it's never gonna be disabled.
1485 * @param fType The access type (one of the X86_DR7_RW_* defines).
1486 * @param cb The access size - 1,2,4 or 8 (the latter is AMD64 long mode only.
1487 * Must be 1 if fType is X86_DR7_RW_EO.
1488 * @param phBp Where to store the breakpoint handle.
1489 *
1490 * @thread Any thread.
1491 */
1492VMMR3DECL(int) DBGFR3BpSetReg(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger,
1493 uint64_t iHitDisable, uint8_t fType, uint8_t cb, PDBGFBP phBp)
1494{
1495 return DBGFR3BpSetRegEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, pAddress,
1496 iHitTrigger, iHitDisable, fType, cb, phBp);
1497}
1498
1499
1500/**
1501 * Sets a register breakpoint - extended version.
1502 *
1503 * @returns VBox status code.
1504 * @param pUVM The user mode VM handle.
1505 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
1506 * @param pvUser Opaque user data to pass in the owner callback.
1507 * @param pAddress The address of the breakpoint.
1508 * @param iHitTrigger The hit count at which the breakpoint start triggering.
1509 * Use 0 (or 1) if it's gonna trigger at once.
1510 * @param iHitDisable The hit count which disables the breakpoint.
1511 * Use ~(uint64_t) if it's never gonna be disabled.
1512 * @param fType The access type (one of the X86_DR7_RW_* defines).
1513 * @param cb The access size - 1,2,4 or 8 (the latter is AMD64 long mode only.
1514 * Must be 1 if fType is X86_DR7_RW_EO.
1515 * @param phBp Where to store the breakpoint handle.
1516 *
1517 * @thread Any thread.
1518 */
1519VMMR3DECL(int) DBGFR3BpSetRegEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
1520 PCDBGFADDRESS pAddress, uint64_t iHitTrigger, uint64_t iHitDisable,
1521 uint8_t fType, uint8_t cb, PDBGFBP phBp)
1522{
1523 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1524 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
1525 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
1526 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
1527 AssertReturn(cb > 0 && cb <= 8 && RT_IS_POWER_OF_TWO(cb), VERR_INVALID_PARAMETER);
1528 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
1529 switch (fType)
1530 {
1531 case X86_DR7_RW_EO:
1532 if (cb == 1)
1533 break;
1534 AssertMsgFailedReturn(("fType=%#x cb=%d != 1\n", fType, cb), VERR_INVALID_PARAMETER);
1535 case X86_DR7_RW_IO:
1536 case X86_DR7_RW_RW:
1537 case X86_DR7_RW_WO:
1538 break;
1539 default:
1540 AssertMsgFailedReturn(("fType=%#x\n", fType), VERR_INVALID_PARAMETER);
1541 }
1542
1543 int rc = dbgfR3BpEnsureInit(pUVM);
1544 AssertRCReturn(rc, rc);
1545
1546 PDBGFBPINT pBp = NULL;
1547 DBGFBP hBp = dbgfR3BpGetByAddr(pUVM, DBGFBPTYPE_REG, pAddress->FlatPtr, &pBp);
1548 if ( hBp != NIL_DBGFBP
1549 && pBp->Pub.u.Reg.cb == cb
1550 && pBp->Pub.u.Reg.fType == fType)
1551 {
1552 rc = VINF_SUCCESS;
1553 if (!DBGF_BP_PUB_IS_ENABLED(pBp->Pub.fFlagsAndType))
1554 rc = dbgfR3BpArm(pUVM, hBp, pBp);
1555 if (RT_SUCCESS(rc))
1556 {
1557 rc = VINF_DBGF_BP_ALREADY_EXIST;
1558 if (phBp)
1559 *phBp = hBp;
1560 }
1561 return rc;
1562 }
1563
1564 /* Allocate new breakpoint. */
1565 rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_REG, iHitTrigger, iHitDisable, &hBp, &pBp);
1566 if (RT_SUCCESS(rc))
1567 {
1568 pBp->Pub.u.Reg.GCPtr = pAddress->FlatPtr;
1569 pBp->Pub.u.Reg.fType = fType;
1570 pBp->Pub.u.Reg.cb = cb;
1571 pBp->Pub.u.Reg.iReg = UINT8_MAX;
1572 ASMCompilerBarrier();
1573
1574 /* Assign the proper hardware breakpoint. */
1575 rc = dbgfR3BpRegAssign(pUVM->pVM, hBp, pBp);
1576 if (RT_SUCCESS(rc))
1577 {
1578 /* Arm the breakpoint. */
1579 rc = dbgfR3BpArm(pUVM, hBp, pBp);
1580 if (RT_SUCCESS(rc))
1581 {
1582 if (phBp)
1583 *phBp = hBp;
1584 return VINF_SUCCESS;
1585 }
1586 else
1587 {
1588 int rc2 = dbgfR3BpRegRemove(pUVM->pVM, hBp, pBp);
1589 AssertRC(rc2); RT_NOREF(rc2);
1590 }
1591 }
1592
1593 dbgfR3BpFree(pUVM, hBp, pBp);
1594 }
1595
1596 return rc;
1597}
1598
1599
1600/**
1601 * This is only kept for now to not mess with the debugger implementation at this point,
1602 * recompiler breakpoints are not supported anymore (IEM has some API but it isn't implemented
1603 * and should probably be merged with the DBGF breakpoints).
1604 */
1605VMMR3DECL(int) DBGFR3BpSetREM(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger,
1606 uint64_t iHitDisable, PDBGFBP phBp)
1607{
1608 RT_NOREF(pUVM, pAddress, iHitTrigger, iHitDisable, phBp);
1609 return VERR_NOT_SUPPORTED;
1610}
1611
1612
1613/**
1614 * Sets an I/O port breakpoint.
1615 *
1616 * @returns VBox status code.
1617 * @param pUVM The user mode VM handle.
1618 * @param uPort The first I/O port.
1619 * @param cPorts The number of I/O ports, see DBGFBPIOACCESS_XXX.
1620 * @param fAccess The access we want to break on.
1621 * @param iHitTrigger The hit count at which the breakpoint start
1622 * triggering. Use 0 (or 1) if it's gonna trigger at
1623 * once.
1624 * @param iHitDisable The hit count which disables the breakpoint.
1625 * Use ~(uint64_t) if it's never gonna be disabled.
1626 * @param phBp Where to store the breakpoint handle.
1627 *
1628 * @thread Any thread.
1629 */
1630VMMR3DECL(int) DBGFR3BpSetPortIo(PUVM pUVM, RTIOPORT uPort, RTIOPORT cPorts, uint32_t fAccess,
1631 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
1632{
1633 return DBGFR3BpSetPortIoEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, uPort, cPorts,
1634 fAccess, iHitTrigger, iHitDisable, phBp);
1635}
1636
1637
1638/**
1639 * Sets an I/O port breakpoint - extended version.
1640 *
1641 * @returns VBox status code.
1642 * @param pUVM The user mode VM handle.
1643 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
1644 * @param pvUser Opaque user data to pass in the owner callback.
1645 * @param uPort The first I/O port.
1646 * @param cPorts The number of I/O ports, see DBGFBPIOACCESS_XXX.
1647 * @param fAccess The access we want to break on.
1648 * @param iHitTrigger The hit count at which the breakpoint start
1649 * triggering. Use 0 (or 1) if it's gonna trigger at
1650 * once.
1651 * @param iHitDisable The hit count which disables the breakpoint.
1652 * Use ~(uint64_t) if it's never gonna be disabled.
1653 * @param phBp Where to store the breakpoint handle.
1654 *
1655 * @thread Any thread.
1656 */
1657VMMR3DECL(int) DBGFR3BpSetPortIoEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
1658 RTIOPORT uPort, RTIOPORT cPorts, uint32_t fAccess,
1659 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
1660{
1661 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1662 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
1663 AssertReturn(!(fAccess & ~DBGFBPIOACCESS_VALID_MASK_PORT_IO), VERR_INVALID_FLAGS);
1664 AssertReturn(fAccess, VERR_INVALID_FLAGS);
1665 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
1666 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
1667 AssertReturn(cPorts > 0, VERR_OUT_OF_RANGE);
1668 AssertReturn((RTIOPORT)(uPort + cPorts) < uPort, VERR_OUT_OF_RANGE);
1669
1670 int rc = dbgfR3BpEnsureInit(pUVM);
1671 AssertRCReturn(rc, rc);
1672
1673 return VERR_NOT_IMPLEMENTED;
1674}
1675
1676
1677/**
1678 * Sets a memory mapped I/O breakpoint.
1679 *
1680 * @returns VBox status code.
1681 * @param pUVM The user mode VM handle.
1682 * @param GCPhys The first MMIO address.
1683 * @param cb The size of the MMIO range to break on.
1684 * @param fAccess The access we want to break on.
1685 * @param iHitTrigger The hit count at which the breakpoint start
1686 * triggering. Use 0 (or 1) if it's gonna trigger at
1687 * once.
1688 * @param iHitDisable The hit count which disables the breakpoint.
1689 * Use ~(uint64_t) if it's never gonna be disabled.
1690 * @param phBp Where to store the breakpoint handle.
1691 *
1692 * @thread Any thread.
1693 */
1694VMMR3DECL(int) DBGFR3BpSetMmio(PUVM pUVM, RTGCPHYS GCPhys, uint32_t cb, uint32_t fAccess,
1695 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
1696{
1697 return DBGFR3BpSetMmioEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, GCPhys, cb, fAccess,
1698 iHitTrigger, iHitDisable, phBp);
1699}
1700
1701
1702/**
1703 * Sets a memory mapped I/O breakpoint - extended version.
1704 *
1705 * @returns VBox status code.
1706 * @param pUVM The user mode VM handle.
1707 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
1708 * @param pvUser Opaque user data to pass in the owner callback.
1709 * @param GCPhys The first MMIO address.
1710 * @param cb The size of the MMIO range to break on.
1711 * @param fAccess The access we want to break on.
1712 * @param iHitTrigger The hit count at which the breakpoint start
1713 * triggering. Use 0 (or 1) if it's gonna trigger at
1714 * once.
1715 * @param iHitDisable The hit count which disables the breakpoint.
1716 * Use ~(uint64_t) if it's never gonna be disabled.
1717 * @param phBp Where to store the breakpoint handle.
1718 *
1719 * @thread Any thread.
1720 */
1721VMMR3DECL(int) DBGFR3BpSetMmioEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
1722 RTGCPHYS GCPhys, uint32_t cb, uint32_t fAccess,
1723 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
1724{
1725 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1726 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
1727 AssertReturn(!(fAccess & ~DBGFBPIOACCESS_VALID_MASK_MMIO), VERR_INVALID_FLAGS);
1728 AssertReturn(fAccess, VERR_INVALID_FLAGS);
1729 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
1730 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
1731 AssertReturn(cb, VERR_OUT_OF_RANGE);
1732 AssertReturn(GCPhys + cb < GCPhys, VERR_OUT_OF_RANGE);
1733
1734 int rc = dbgfR3BpEnsureInit(pUVM);
1735 AssertRCReturn(rc, rc);
1736
1737 return VERR_NOT_IMPLEMENTED;
1738}
1739
1740
1741/**
1742 * Clears a breakpoint.
1743 *
1744 * @returns VBox status code.
1745 * @param pUVM The user mode VM handle.
1746 * @param hBp The handle of the breakpoint which should be removed (cleared).
1747 *
1748 * @thread Any thread.
1749 */
1750VMMR3DECL(int) DBGFR3BpClear(PUVM pUVM, DBGFBP hBp)
1751{
1752 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1753 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
1754
1755 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
1756 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
1757
1758 /* Disarm the breakpoint when it is enabled. */
1759 if (DBGF_BP_PUB_IS_ENABLED(pBp->Pub.fFlagsAndType))
1760 {
1761 int rc = dbgfR3BpDisarm(pUVM, hBp, pBp);
1762 AssertRC(rc);
1763 }
1764
1765 switch (DBGF_BP_PUB_GET_TYPE(pBp->Pub.fFlagsAndType))
1766 {
1767 case DBGFBPTYPE_REG:
1768 {
1769 int rc = dbgfR3BpRegRemove(pUVM->pVM, hBp, pBp);
1770 AssertRC(rc);
1771 break;
1772 }
1773 default:
1774 break;
1775 }
1776
1777 dbgfR3BpFree(pUVM, hBp, pBp);
1778 return VINF_SUCCESS;
1779}
1780
1781
1782/**
1783 * Enables a breakpoint.
1784 *
1785 * @returns VBox status code.
1786 * @param pUVM The user mode VM handle.
1787 * @param hBp The handle of the breakpoint which should be enabled.
1788 *
1789 * @thread Any thread.
1790 */
1791VMMR3DECL(int) DBGFR3BpEnable(PUVM pUVM, DBGFBP hBp)
1792{
1793 /*
1794 * Validate the input.
1795 */
1796 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1797 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
1798
1799 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
1800 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
1801
1802 int rc = VINF_SUCCESS;
1803 if (!DBGF_BP_PUB_IS_ENABLED(pBp->Pub.fFlagsAndType))
1804 rc = dbgfR3BpArm(pUVM, hBp, pBp);
1805 else
1806 rc = VINF_DBGF_BP_ALREADY_ENABLED;
1807
1808 return rc;
1809}
1810
1811
1812/**
1813 * Disables a breakpoint.
1814 *
1815 * @returns VBox status code.
1816 * @param pUVM The user mode VM handle.
1817 * @param hBp The handle of the breakpoint which should be disabled.
1818 *
1819 * @thread Any thread.
1820 */
1821VMMR3DECL(int) DBGFR3BpDisable(PUVM pUVM, DBGFBP hBp)
1822{
1823 /*
1824 * Validate the input.
1825 */
1826 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1827 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
1828
1829 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
1830 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
1831
1832 int rc = VINF_SUCCESS;
1833 if (DBGF_BP_PUB_IS_ENABLED(pBp->Pub.fFlagsAndType))
1834 rc = dbgfR3BpDisarm(pUVM, hBp, pBp);
1835 else
1836 rc = VINF_DBGF_BP_ALREADY_DISABLED;
1837
1838 return rc;
1839}
1840
1841
1842/**
1843 * EMT worker for DBGFR3BpEnum().
1844 *
1845 * @returns VBox status code.
1846 * @param pUVM The user mode VM handle.
1847 * @param pfnCallback The callback function.
1848 * @param pvUser The user argument to pass to the callback.
1849 *
1850 * @thread EMT
1851 * @internal
1852 */
1853static DECLCALLBACK(int) dbgfR3BpEnum(PUVM pUVM, PFNDBGFBPENUM pfnCallback, void *pvUser)
1854{
1855 /*
1856 * Validate input.
1857 */
1858 PVM pVM = pUVM->pVM;
1859 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1860 AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
1861
1862 RT_NOREF(pvUser);
1863
1864 return VERR_NOT_IMPLEMENTED;
1865}
1866
1867
1868/**
1869 * Enumerate the breakpoints.
1870 *
1871 * @returns VBox status code.
1872 * @param pUVM The user mode VM handle.
1873 * @param pfnCallback The callback function.
1874 * @param pvUser The user argument to pass to the callback.
1875 *
1876 * @thread Any thread but the callback will be called from EMT.
1877 */
1878VMMR3DECL(int) DBGFR3BpEnum(PUVM pUVM, PFNDBGFBPENUM pfnCallback, void *pvUser)
1879{
1880 /*
1881 * This must be done on EMT.
1882 */
1883 int rc = VMR3ReqPriorityCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)dbgfR3BpEnum, 3, pUVM, pfnCallback, pvUser);
1884 LogFlow(("DBGFR3BpEnum: returns %Rrc\n", rc));
1885 return rc;
1886}
1887
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette