VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGFR3Bp.cpp@ 97169

Last change on this file since 97169 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 106.3 KB
Line 
1/* $Id: DBGFR3Bp.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, Breakpoint Management.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_dbgf_bp DBGF - The Debugger Facility, Breakpoint Management
30 *
31 * The debugger facilities breakpoint managers purpose is to efficiently manage
32 * large amounts of breakpoints for various use cases like dtrace like operations
33 * or execution flow tracing for instance. Especially execution flow tracing can
34 * require thousands of breakpoints which need to be managed efficiently to not slow
35 * down guest operation too much. Before the rewrite starting end of 2020, DBGF could
36 * only handle 32 breakpoints (+ 4 hardware assisted breakpoints). The new
37 * manager is supposed to be able to handle up to one million breakpoints.
38 *
39 * @see grp_dbgf
40 *
41 *
42 * @section sec_dbgf_bp_owner Breakpoint owners
43 *
44 * A single breakpoint owner has a mandatory ring-3 callback and an optional ring-0
45 * callback assigned which is called whenever a breakpoint with the owner assigned is hit.
46 * The common part of the owner is managed by a single table mapped into both ring-0
47 * and ring-3 and the handle being the index into the table. This allows resolving
48 * the handle to the internal structure efficiently. Searching for a free entry is
49 * done using a bitmap indicating free and occupied entries. For the optional
50 * ring-0 owner part there is a separate ring-0 only table for security reasons.
51 *
52 * The callback of the owner can be used to gather and log guest state information
53 * and decide whether to continue guest execution or stop and drop into the debugger.
54 * Breakpoints which don't have an owner assigned will always drop the VM right into
55 * the debugger.
56 *
57 *
58 * @section sec_dbgf_bp_bps Breakpoints
59 *
60 * Breakpoints are referenced by an opaque handle which acts as an index into a global table
61 * mapped into ring-3 and ring-0. Each entry contains the necessary state to manage the breakpoint
62 * like trigger conditions, type, owner, etc. If an owner is given an optional opaque user argument
63 * can be supplied which is passed in the respective owner callback. For owners with ring-0 callbacks
64 * a dedicated ring-0 table is held saving possible ring-0 user arguments.
65 *
66 * To keep memory consumption under control and still support large amounts of
67 * breakpoints the table is split into fixed sized chunks and the chunk index and index
68 * into the chunk can be derived from the handle with only a few logical operations.
69 *
70 *
71 * @section sec_dbgf_bp_resolv Resolving breakpoint addresses
72 *
73 * Whenever a \#BP(0) event is triggered DBGF needs to decide whether the event originated
74 * from within the guest or whether a DBGF breakpoint caused it. This has to happen as fast
75 * as possible. The following scheme is employed to achieve this:
76 *
77 * @verbatim
78 * 7 6 5 4 3 2 1 0
79 * +---+---+---+---+---+---+---+---+
80 * | | | | | | | | | BP address
81 * +---+---+---+---+---+---+---+---+
82 * \_____________________/ \_____/
83 * | |
84 * | +---------------+
85 * | |
86 * BP table | v
87 * +------------+ | +-----------+
88 * | hBp 0 | | X <- | 0 | xxxxx |
89 * | hBp 1 | <----------------+------------------------ | 1 | hBp 1 |
90 * | | | +--- | 2 | idxL2 |
91 * | hBp <m> | <---+ v | |...| ... |
92 * | | | +-----------+ | |...| ... |
93 * | | | | | | |...| ... |
94 * | hBp <n> | <-+ +----- | +> leaf | | | . |
95 * | | | | | | | | . |
96 * | | | | + root + | <------------+ | . |
97 * | | | | | | +-----------+
98 * | | +------- | leaf<+ | L1: 65536
99 * | . | | . |
100 * | . | | . |
101 * | . | | . |
102 * +------------+ +-----------+
103 * L2 idx BST
104 * @endverbatim
105 *
106 * -# Take the lowest 16 bits of the breakpoint address and use it as an direct index
107 * into the L1 table. The L1 table is contiguous and consists of 4 byte entries
108 * resulting in 256KiB of memory used. The topmost 4 bits indicate how to proceed
109 * and the meaning of the remaining 28bits depends on the topmost 4 bits:
110 * - A 0 type entry means no breakpoint is registered with the matching lowest 16bits,
111 * so forward the event to the guest.
112 * - A 1 in the topmost 4 bits means that the remaining 28bits directly denote a breakpoint
113 * handle which can be resolved by extracting the chunk index and index into the chunk
114 * of the global breakpoint table. If the address matches the breakpoint is processed
115 * according to the configuration. Otherwise the breakpoint is again forwarded to the guest.
116 * - A 2 in the topmost 4 bits means that there are multiple breakpoints registered
117 * matching the lowest 16bits and the search must continue in the L2 table with the
118 * remaining 28bits acting as an index into the L2 table indicating the search root.
119 * -# The L2 table consists of multiple index based binary search trees, there is one for each reference
120 * from the L1 table. The key for the table are the upper 6 bytes of the breakpoint address
121 * used for searching. This tree is traversed until either a matching address is found and
122 * the breakpoint is being processed or again forwarded to the guest if it isn't successful.
123 * Each entry in the L2 table is 16 bytes big and densly packed to avoid excessive memory usage.
124 *
125 * @section sec_dbgf_bp_ioport Handling I/O port breakpoints
126 *
127 * Because of the limited amount of I/O ports being available (65536) a single table with 65536 entries,
128 * each 4 byte big will be allocated. This amounts to 256KiB of memory being used additionally as soon as
129 * an I/O breakpoint is enabled. The entries contain the breakpoint handle directly allowing only one breakpoint
130 * per port right now, which is something we accept as a limitation right now to keep things relatively simple.
131 * When there is at least one I/O breakpoint active IOM will be notified and it will afterwards call the DBGF API
132 * whenever the guest does an I/O port access to decide whether a breakpoint was hit. This keeps the overhead small
133 * when there is no I/O port breakpoint enabled.
134 *
135 * @section sec_dbgf_bp_note Random thoughts and notes for the implementation
136 *
137 * - The assumption for this approach is that the lowest 16bits of the breakpoint address are
138 * hopefully the ones being the most varying ones across breakpoints so the traversal
139 * can skip the L2 table in most of the cases. Even if the L2 table must be taken the
140 * individual trees should be quite shallow resulting in low overhead when walking it
141 * (though only real world testing can assert this assumption).
142 * - Index based tables and trees are used instead of pointers because the tables
143 * are always mapped into ring-0 and ring-3 with different base addresses.
144 * - Efficent breakpoint allocation is done by having a global bitmap indicating free
145 * and occupied breakpoint entries. Same applies for the L2 BST table.
146 * - Special care must be taken when modifying the L1 and L2 tables as other EMTs
147 * might still access it (want to try a lockless approach first using
148 * atomic updates, have to resort to locking if that turns out to be too difficult).
149 * - Each BP entry is supposed to be 64 byte big and each chunk should contain 65536
150 * breakpoints which results in 4MiB for each chunk plus the allocation bitmap.
151 * - ring-0 has to take special care when traversing the L2 BST to not run into cycles
152 * and do strict bounds checking before accessing anything. The L1 and L2 table
153 * are written to from ring-3 only. Same goes for the breakpoint table with the
154 * exception being the opaque user argument for ring-0 which is stored in ring-0 only
155 * memory.
156 */
157
158
159/*********************************************************************************************************************************
160* Header Files *
161*********************************************************************************************************************************/
162#define LOG_GROUP LOG_GROUP_DBGF
163#define VMCPU_INCL_CPUM_GST_CTX
164#include <VBox/vmm/dbgf.h>
165#include <VBox/vmm/selm.h>
166#include <VBox/vmm/iem.h>
167#include <VBox/vmm/mm.h>
168#include <VBox/vmm/iom.h>
169#include <VBox/vmm/hm.h>
170#include "DBGFInternal.h"
171#include <VBox/vmm/vm.h>
172#include <VBox/vmm/uvm.h>
173
174#include <VBox/err.h>
175#include <VBox/log.h>
176#include <iprt/assert.h>
177#include <iprt/mem.h>
178
179#include "DBGFInline.h"
180
181
182/*********************************************************************************************************************************
183* Structures and Typedefs *
184*********************************************************************************************************************************/
185
186
187/*********************************************************************************************************************************
188* Internal Functions *
189*********************************************************************************************************************************/
190RT_C_DECLS_BEGIN
191RT_C_DECLS_END
192
193
194/**
195 * Initialize the breakpoint mangement.
196 *
197 * @returns VBox status code.
198 * @param pUVM The user mode VM handle.
199 */
200DECLHIDDEN(int) dbgfR3BpInit(PUVM pUVM)
201{
202 PVM pVM = pUVM->pVM;
203
204 //pUVM->dbgf.s.paBpOwnersR3 = NULL;
205 //pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
206
207 /* Init hardware breakpoint states. */
208 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
209 {
210 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
211
212 AssertCompileSize(DBGFBP, sizeof(uint32_t));
213 pHwBp->hBp = NIL_DBGFBP;
214 //pHwBp->fEnabled = false;
215 }
216
217 /* Now the global breakpoint table chunks. */
218 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
219 {
220 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
221
222 //pBpChunk->pBpBaseR3 = NULL;
223 //pBpChunk->pbmAlloc = NULL;
224 //pBpChunk->cBpsFree = 0;
225 pBpChunk->idChunk = DBGF_BP_CHUNK_ID_INVALID; /* Not allocated. */
226 }
227
228 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
229 {
230 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
231
232 //pL2Chunk->pL2BaseR3 = NULL;
233 //pL2Chunk->pbmAlloc = NULL;
234 //pL2Chunk->cFree = 0;
235 pL2Chunk->idChunk = DBGF_BP_CHUNK_ID_INVALID; /* Not allocated. */
236 }
237
238 //pUVM->dbgf.s.paBpLocL1R3 = NULL;
239 //pUVM->dbgf.s.paBpLocPortIoR3 = NULL;
240 pUVM->dbgf.s.hMtxBpL2Wr = NIL_RTSEMFASTMUTEX;
241 return RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxBpL2Wr);
242}
243
244
245/**
246 * Terminates the breakpoint mangement.
247 *
248 * @returns VBox status code.
249 * @param pUVM The user mode VM handle.
250 */
251DECLHIDDEN(int) dbgfR3BpTerm(PUVM pUVM)
252{
253 if (pUVM->dbgf.s.pbmBpOwnersAllocR3)
254 {
255 RTMemFree((void *)pUVM->dbgf.s.pbmBpOwnersAllocR3);
256 pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
257 }
258
259 /* Free all allocated chunk bitmaps (the chunks itself are destroyed during ring-0 VM destruction). */
260 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
261 {
262 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
263
264 if (pBpChunk->idChunk != DBGF_BP_CHUNK_ID_INVALID)
265 {
266 AssertPtr(pBpChunk->pbmAlloc);
267 RTMemFree((void *)pBpChunk->pbmAlloc);
268 pBpChunk->pbmAlloc = NULL;
269 pBpChunk->idChunk = DBGF_BP_CHUNK_ID_INVALID;
270 }
271 }
272
273 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
274 {
275 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
276
277 if (pL2Chunk->idChunk != DBGF_BP_CHUNK_ID_INVALID)
278 {
279 AssertPtr(pL2Chunk->pbmAlloc);
280 RTMemFree((void *)pL2Chunk->pbmAlloc);
281 pL2Chunk->pbmAlloc = NULL;
282 pL2Chunk->idChunk = DBGF_BP_CHUNK_ID_INVALID;
283 }
284 }
285
286 if (pUVM->dbgf.s.hMtxBpL2Wr != NIL_RTSEMFASTMUTEX)
287 {
288 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxBpL2Wr);
289 pUVM->dbgf.s.hMtxBpL2Wr = NIL_RTSEMFASTMUTEX;
290 }
291
292 return VINF_SUCCESS;
293}
294
295
296/**
297 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
298 */
299static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
300{
301 RT_NOREF(pvUser);
302
303 VMCPU_ASSERT_EMT(pVCpu);
304 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
305
306 /*
307 * The initialization will be done on EMT(0). It is possible that multiple
308 * initialization attempts are done because dbgfR3BpEnsureInit() can be called
309 * from racing non EMT threads when trying to set a breakpoint for the first time.
310 * Just fake success if the L1 is already present which means that a previous rendezvous
311 * successfully initialized the breakpoint manager.
312 */
313 PUVM pUVM = pVM->pUVM;
314 if ( pVCpu->idCpu == 0
315 && !pUVM->dbgf.s.paBpLocL1R3)
316 {
317 if (!SUPR3IsDriverless())
318 {
319 DBGFBPINITREQ Req;
320 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
321 Req.Hdr.cbReq = sizeof(Req);
322 Req.paBpLocL1R3 = NULL;
323 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_INIT, 0 /*u64Arg*/, &Req.Hdr);
324 AssertLogRelMsgRCReturn(rc, ("VMMR0_DO_DBGF_BP_INIT failed: %Rrc\n", rc), rc);
325 pUVM->dbgf.s.paBpLocL1R3 = Req.paBpLocL1R3;
326 }
327 else
328 {
329 /* Driverless: Do dbgfR0BpInitWorker here, ring-3 style. */
330 uint32_t const cbL1Loc = RT_ALIGN_32(UINT16_MAX * sizeof(uint32_t), HOST_PAGE_SIZE);
331 pUVM->dbgf.s.paBpLocL1R3 = (uint32_t *)RTMemPageAllocZ(cbL1Loc);
332 AssertLogRelMsgReturn(pUVM->dbgf.s.paBpLocL1R3, ("cbL1Loc=%#x\n", cbL1Loc), VERR_NO_PAGE_MEMORY);
333 }
334 }
335
336 return VINF_SUCCESS;
337}
338
339
340/**
341 * Ensures that the breakpoint manager is fully initialized.
342 *
343 * @returns VBox status code.
344 * @param pUVM The user mode VM handle.
345 *
346 * @thread Any thread.
347 */
348static int dbgfR3BpEnsureInit(PUVM pUVM)
349{
350 /* If the L1 lookup table is allocated initialization succeeded before. */
351 if (RT_LIKELY(pUVM->dbgf.s.paBpLocL1R3))
352 return VINF_SUCCESS;
353
354 /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
355 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpInitEmtWorker, NULL /*pvUser*/);
356}
357
358
359/**
360 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
361 */
362static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpPortIoInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
363{
364 RT_NOREF(pvUser);
365
366 VMCPU_ASSERT_EMT(pVCpu);
367 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
368
369 /*
370 * The initialization will be done on EMT(0). It is possible that multiple
371 * initialization attempts are done because dbgfR3BpPortIoEnsureInit() can be called
372 * from racing non EMT threads when trying to set a breakpoint for the first time.
373 * Just fake success if the L1 is already present which means that a previous rendezvous
374 * successfully initialized the breakpoint manager.
375 */
376 PUVM pUVM = pVM->pUVM;
377 if ( pVCpu->idCpu == 0
378 && !pUVM->dbgf.s.paBpLocPortIoR3)
379 {
380 if (!SUPR3IsDriverless())
381 {
382 DBGFBPINITREQ Req;
383 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
384 Req.Hdr.cbReq = sizeof(Req);
385 Req.paBpLocL1R3 = NULL;
386 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_PORTIO_INIT, 0 /*u64Arg*/, &Req.Hdr);
387 AssertLogRelMsgRCReturn(rc, ("VMMR0_DO_DBGF_BP_PORTIO_INIT failed: %Rrc\n", rc), rc);
388 pUVM->dbgf.s.paBpLocPortIoR3 = Req.paBpLocL1R3;
389 }
390 else
391 {
392 /* Driverless: Do dbgfR0BpPortIoInitWorker here, ring-3 style. */
393 uint32_t const cbPortIoLoc = RT_ALIGN_32(UINT16_MAX * sizeof(uint32_t), HOST_PAGE_SIZE);
394 pUVM->dbgf.s.paBpLocPortIoR3 = (uint32_t *)RTMemPageAllocZ(cbPortIoLoc);
395 AssertLogRelMsgReturn(pUVM->dbgf.s.paBpLocPortIoR3, ("cbPortIoLoc=%#x\n", cbPortIoLoc), VERR_NO_PAGE_MEMORY);
396 }
397 }
398
399 return VINF_SUCCESS;
400}
401
402
403/**
404 * Ensures that the breakpoint manager is initialized to handle I/O port breakpoint.
405 *
406 * @returns VBox status code.
407 * @param pUVM The user mode VM handle.
408 *
409 * @thread Any thread.
410 */
411static int dbgfR3BpPortIoEnsureInit(PUVM pUVM)
412{
413 /* If the L1 lookup table is allocated initialization succeeded before. */
414 if (RT_LIKELY(pUVM->dbgf.s.paBpLocPortIoR3))
415 return VINF_SUCCESS;
416
417 /* Ensure that the breakpoint manager is initialized. */
418 int rc = dbgfR3BpEnsureInit(pUVM);
419 if (RT_FAILURE(rc))
420 return rc;
421
422 /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
423 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpPortIoInitEmtWorker, NULL /*pvUser*/);
424}
425
426
427/**
428 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
429 */
430static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpOwnerInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
431{
432 RT_NOREF(pvUser);
433
434 VMCPU_ASSERT_EMT(pVCpu);
435 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
436
437 /*
438 * The initialization will be done on EMT(0). It is possible that multiple
439 * initialization attempts are done because dbgfR3BpOwnerEnsureInit() can be called
440 * from racing non EMT threads when trying to create a breakpoint owner for the first time.
441 * Just fake success if the pointers are initialized already, meaning that a previous rendezvous
442 * successfully initialized the breakpoint owner table.
443 */
444 int rc = VINF_SUCCESS;
445 PUVM pUVM = pVM->pUVM;
446 if ( pVCpu->idCpu == 0
447 && !pUVM->dbgf.s.pbmBpOwnersAllocR3)
448 {
449 AssertCompile(!(DBGF_BP_OWNER_COUNT_MAX % 64));
450 pUVM->dbgf.s.pbmBpOwnersAllocR3 = RTMemAllocZ(DBGF_BP_OWNER_COUNT_MAX / 8);
451 if (pUVM->dbgf.s.pbmBpOwnersAllocR3)
452 {
453 if (!SUPR3IsDriverless())
454 {
455 DBGFBPOWNERINITREQ Req;
456 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
457 Req.Hdr.cbReq = sizeof(Req);
458 Req.paBpOwnerR3 = NULL;
459 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_OWNER_INIT, 0 /*u64Arg*/, &Req.Hdr);
460 if (RT_SUCCESS(rc))
461 {
462 pUVM->dbgf.s.paBpOwnersR3 = (PDBGFBPOWNERINT)Req.paBpOwnerR3;
463 return VINF_SUCCESS;
464 }
465 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_OWNER_INIT failed: %Rrc\n", rc));
466 }
467 else
468 {
469 /* Driverless: Do dbgfR0BpOwnerInitWorker here, ring-3 style. */
470 uint32_t const cbBpOwnerR3 = RT_ALIGN_32(DBGF_BP_OWNER_COUNT_MAX * sizeof(DBGFBPOWNERINT), HOST_PAGE_SIZE);
471 pUVM->dbgf.s.paBpLocPortIoR3 = (uint32_t *)RTMemPageAllocZ(cbBpOwnerR3);
472 if (pUVM->dbgf.s.paBpLocPortIoR3)
473 return VINF_SUCCESS;
474 AssertLogRelMsgFailed(("cbBpOwnerR3=%#x\n", cbBpOwnerR3));
475 rc = VERR_NO_PAGE_MEMORY;
476 }
477
478 RTMemFree((void *)pUVM->dbgf.s.pbmBpOwnersAllocR3);
479 pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
480 }
481 else
482 rc = VERR_NO_MEMORY;
483 }
484
485 return rc;
486}
487
488
489/**
490 * Ensures that the breakpoint manager is fully initialized.
491 *
492 * @returns VBox status code.
493 * @param pUVM The user mode VM handle.
494 *
495 * @thread Any thread.
496 */
497static int dbgfR3BpOwnerEnsureInit(PUVM pUVM)
498{
499 /* If the allocation bitmap is allocated initialization succeeded before. */
500 if (RT_LIKELY(pUVM->dbgf.s.pbmBpOwnersAllocR3))
501 return VINF_SUCCESS;
502
503 /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
504 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpOwnerInitEmtWorker, NULL /*pvUser*/);
505}
506
507
508/**
509 * Retains the given breakpoint owner handle for use.
510 *
511 * @returns VBox status code.
512 * @retval VERR_INVALID_HANDLE if the given breakpoint owner handle is invalid.
513 * @param pUVM The user mode VM handle.
514 * @param hBpOwner The breakpoint owner handle to retain, NIL_DBGFOWNER is accepted without doing anything.
515 * @param fIo Flag whether the owner must have the I/O handler set because it used by an I/O breakpoint.
516 */
517DECLINLINE(int) dbgfR3BpOwnerRetain(PUVM pUVM, DBGFBPOWNER hBpOwner, bool fIo)
518{
519 if (hBpOwner == NIL_DBGFBPOWNER)
520 return VINF_SUCCESS;
521
522 PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
523 if (pBpOwner)
524 {
525 AssertReturn ( ( fIo
526 && pBpOwner->pfnBpIoHitR3)
527 || ( !fIo
528 && pBpOwner->pfnBpHitR3),
529 VERR_INVALID_HANDLE);
530 ASMAtomicIncU32(&pBpOwner->cRefs);
531 return VINF_SUCCESS;
532 }
533
534 return VERR_INVALID_HANDLE;
535}
536
537
538/**
539 * Releases the given breakpoint owner handle.
540 *
541 * @returns VBox status code.
542 * @retval VERR_INVALID_HANDLE if the given breakpoint owner handle is invalid.
543 * @param pUVM The user mode VM handle.
544 * @param hBpOwner The breakpoint owner handle to retain, NIL_DBGFOWNER is accepted without doing anything.
545 */
546DECLINLINE(int) dbgfR3BpOwnerRelease(PUVM pUVM, DBGFBPOWNER hBpOwner)
547{
548 if (hBpOwner == NIL_DBGFBPOWNER)
549 return VINF_SUCCESS;
550
551 PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
552 if (pBpOwner)
553 {
554 Assert(pBpOwner->cRefs > 1);
555 ASMAtomicDecU32(&pBpOwner->cRefs);
556 return VINF_SUCCESS;
557 }
558
559 return VERR_INVALID_HANDLE;
560}
561
562
563/**
564 * Returns the internal breakpoint state for the given handle.
565 *
566 * @returns Pointer to the internal breakpoint state or NULL if the handle is invalid.
567 * @param pUVM The user mode VM handle.
568 * @param hBp The breakpoint handle to resolve.
569 */
570DECLINLINE(PDBGFBPINT) dbgfR3BpGetByHnd(PUVM pUVM, DBGFBP hBp)
571{
572 uint32_t idChunk = DBGF_BP_HND_GET_CHUNK_ID(hBp);
573 uint32_t idxEntry = DBGF_BP_HND_GET_ENTRY(hBp);
574
575 AssertReturn(idChunk < DBGF_BP_CHUNK_COUNT, NULL);
576 AssertReturn(idxEntry < DBGF_BP_COUNT_PER_CHUNK, NULL);
577
578 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
579 AssertReturn(pBpChunk->idChunk == idChunk, NULL);
580 AssertPtrReturn(pBpChunk->pbmAlloc, NULL);
581 AssertReturn(ASMBitTest(pBpChunk->pbmAlloc, idxEntry), NULL);
582
583 return &pBpChunk->pBpBaseR3[idxEntry];
584}
585
586
587/**
588 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
589 */
590static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpChunkAllocEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
591{
592 uint32_t idChunk = (uint32_t)(uintptr_t)pvUser;
593
594 VMCPU_ASSERT_EMT(pVCpu);
595 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
596
597 AssertReturn(idChunk < DBGF_BP_CHUNK_COUNT, VERR_DBGF_BP_IPE_1);
598
599 PUVM pUVM = pVM->pUVM;
600 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
601
602 AssertReturn( pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID
603 || pBpChunk->idChunk == idChunk,
604 VERR_DBGF_BP_IPE_2);
605
606 /*
607 * The initialization will be done on EMT(0). It is possible that multiple
608 * allocation attempts are done when multiple racing non EMT threads try to
609 * allocate a breakpoint and a new chunk needs to be allocated.
610 * Ignore the request and succeed if the chunk is allocated meaning that a
611 * previous rendezvous successfully allocated the chunk.
612 */
613 int rc = VINF_SUCCESS;
614 if ( pVCpu->idCpu == 0
615 && pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID)
616 {
617 /* Allocate the bitmap first so we can skip calling into VMMR0 if it fails. */
618 AssertCompile(!(DBGF_BP_COUNT_PER_CHUNK % 64));
619 void *pbmAlloc = RTMemAllocZ(DBGF_BP_COUNT_PER_CHUNK / 8);
620 if (RT_LIKELY(pbmAlloc))
621 {
622 if (!SUPR3IsDriverless())
623 {
624 DBGFBPCHUNKALLOCREQ Req;
625 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
626 Req.Hdr.cbReq = sizeof(Req);
627 Req.idChunk = idChunk;
628 Req.pChunkBaseR3 = NULL;
629 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_CHUNK_ALLOC, 0 /*u64Arg*/, &Req.Hdr);
630 if (RT_SUCCESS(rc))
631 pBpChunk->pBpBaseR3 = (PDBGFBPINT)Req.pChunkBaseR3;
632 else
633 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_CHUNK_ALLOC failed: %Rrc\n", rc));
634 }
635 else
636 {
637 /* Driverless: Do dbgfR0BpChunkAllocWorker here, ring-3 style. */
638 uint32_t const cbShared = RT_ALIGN_32(DBGF_BP_COUNT_PER_CHUNK * sizeof(DBGFBPINT), HOST_PAGE_SIZE);
639 pBpChunk->pBpBaseR3 = (PDBGFBPINT)RTMemPageAllocZ(cbShared);
640 AssertLogRelMsgStmt(pBpChunk->pBpBaseR3, ("cbShared=%#x\n", cbShared), rc = VERR_NO_PAGE_MEMORY);
641 }
642 if (RT_SUCCESS(rc))
643 {
644 pBpChunk->pbmAlloc = (void volatile *)pbmAlloc;
645 pBpChunk->cBpsFree = DBGF_BP_COUNT_PER_CHUNK;
646 pBpChunk->idChunk = idChunk;
647 return VINF_SUCCESS;
648 }
649
650 RTMemFree(pbmAlloc);
651 }
652 else
653 rc = VERR_NO_MEMORY;
654 }
655
656 return rc;
657}
658
659
660/**
661 * Tries to allocate the given chunk which requires an EMT rendezvous.
662 *
663 * @returns VBox status code.
664 * @param pUVM The user mode VM handle.
665 * @param idChunk The chunk to allocate.
666 *
667 * @thread Any thread.
668 */
669DECLINLINE(int) dbgfR3BpChunkAlloc(PUVM pUVM, uint32_t idChunk)
670{
671 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpChunkAllocEmtWorker, (void *)(uintptr_t)idChunk);
672}
673
674
675/**
676 * Tries to allocate a new breakpoint of the given type.
677 *
678 * @returns VBox status code.
679 * @param pUVM The user mode VM handle.
680 * @param hOwner The owner handle, NIL_DBGFBPOWNER if none assigned.
681 * @param pvUser Opaque user data passed in the owner callback.
682 * @param enmType Breakpoint type to allocate.
683 * @param fFlags Flags assoicated with the allocated breakpoint.
684 * @param iHitTrigger The hit count at which the breakpoint start triggering.
685 * Use 0 (or 1) if it's gonna trigger at once.
686 * @param iHitDisable The hit count which disables the breakpoint.
687 * Use ~(uint64_t) if it's never gonna be disabled.
688 * @param phBp Where to return the opaque breakpoint handle on success.
689 * @param ppBp Where to return the pointer to the internal breakpoint state on success.
690 *
691 * @thread Any thread.
692 */
693static int dbgfR3BpAlloc(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser, DBGFBPTYPE enmType,
694 uint16_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp,
695 PDBGFBPINT *ppBp)
696{
697 bool fIo = enmType == DBGFBPTYPE_PORT_IO
698 || enmType == DBGFBPTYPE_MMIO;
699 int rc = dbgfR3BpOwnerRetain(pUVM, hOwner, fIo);
700 if (RT_FAILURE(rc))
701 return rc;
702
703 /*
704 * Search for a chunk having a free entry, allocating new chunks
705 * if the encountered ones are full.
706 *
707 * This can be called from multiple threads at the same time so special care
708 * has to be taken to not require any locking here.
709 */
710 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
711 {
712 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
713
714 uint32_t idChunk = ASMAtomicReadU32(&pBpChunk->idChunk);
715 if (idChunk == DBGF_BP_CHUNK_ID_INVALID)
716 {
717 rc = dbgfR3BpChunkAlloc(pUVM, i);
718 if (RT_FAILURE(rc))
719 {
720 LogRel(("DBGF/Bp: Allocating new breakpoint table chunk failed with %Rrc\n", rc));
721 break;
722 }
723
724 idChunk = ASMAtomicReadU32(&pBpChunk->idChunk);
725 Assert(idChunk == i);
726 }
727
728 /** @todo Optimize with some hinting if this turns out to be too slow. */
729 for (;;)
730 {
731 uint32_t cBpsFree = ASMAtomicReadU32(&pBpChunk->cBpsFree);
732 if (cBpsFree)
733 {
734 /*
735 * Scan the associated bitmap for a free entry, if none can be found another thread
736 * raced us and we go to the next chunk.
737 */
738 int32_t iClr = ASMBitFirstClear(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK);
739 if (iClr != -1)
740 {
741 /*
742 * Try to allocate, we could get raced here as well. In that case
743 * we try again.
744 */
745 if (!ASMAtomicBitTestAndSet(pBpChunk->pbmAlloc, iClr))
746 {
747 /* Success, immediately mark as allocated, initialize the breakpoint state and return. */
748 ASMAtomicDecU32(&pBpChunk->cBpsFree);
749
750 PDBGFBPINT pBp = &pBpChunk->pBpBaseR3[iClr];
751 pBp->Pub.cHits = 0;
752 pBp->Pub.iHitTrigger = iHitTrigger;
753 pBp->Pub.iHitDisable = iHitDisable;
754 pBp->Pub.hOwner = hOwner;
755 pBp->Pub.u16Type = DBGF_BP_PUB_MAKE_TYPE(enmType);
756 pBp->Pub.fFlags = fFlags & ~DBGF_BP_F_ENABLED; /* The enabled flag is handled in the respective APIs. */
757 pBp->pvUserR3 = pvUser;
758
759 /** @todo Owner handling (reference and call ring-0 if it has an ring-0 callback). */
760
761 *phBp = DBGF_BP_HND_CREATE(idChunk, iClr);
762 *ppBp = pBp;
763 return VINF_SUCCESS;
764 }
765 /* else Retry with another spot. */
766 }
767 else /* no free entry in bitmap, go to the next chunk */
768 break;
769 }
770 else /* !cBpsFree, go to the next chunk */
771 break;
772 }
773 }
774
775 rc = dbgfR3BpOwnerRelease(pUVM, hOwner); AssertRC(rc);
776 return VERR_DBGF_NO_MORE_BP_SLOTS;
777}
778
779
780/**
781 * Frees the given breakpoint handle.
782 *
783 * @returns nothing.
784 * @param pUVM The user mode VM handle.
785 * @param hBp The breakpoint handle to free.
786 * @param pBp The internal breakpoint state pointer.
787 */
788static void dbgfR3BpFree(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
789{
790 uint32_t idChunk = DBGF_BP_HND_GET_CHUNK_ID(hBp);
791 uint32_t idxEntry = DBGF_BP_HND_GET_ENTRY(hBp);
792
793 AssertReturnVoid(idChunk < DBGF_BP_CHUNK_COUNT);
794 AssertReturnVoid(idxEntry < DBGF_BP_COUNT_PER_CHUNK);
795
796 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
797 AssertPtrReturnVoid(pBpChunk->pbmAlloc);
798 AssertReturnVoid(ASMBitTest(pBpChunk->pbmAlloc, idxEntry));
799
800 /** @todo Need a trip to Ring-0 if an owner is assigned with a Ring-0 part to clear the breakpoint. */
801 int rc = dbgfR3BpOwnerRelease(pUVM, pBp->Pub.hOwner); AssertRC(rc); RT_NOREF(rc);
802 memset(pBp, 0, sizeof(*pBp));
803
804 ASMAtomicBitClear(pBpChunk->pbmAlloc, idxEntry);
805 ASMAtomicIncU32(&pBpChunk->cBpsFree);
806}
807
808
809/**
810 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
811 */
812static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpL2TblChunkAllocEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
813{
814 uint32_t idChunk = (uint32_t)(uintptr_t)pvUser;
815
816 VMCPU_ASSERT_EMT(pVCpu);
817 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
818
819 AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, VERR_DBGF_BP_IPE_1);
820
821 PUVM pUVM = pVM->pUVM;
822 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
823
824 AssertReturn( pL2Chunk->idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID
825 || pL2Chunk->idChunk == idChunk,
826 VERR_DBGF_BP_IPE_2);
827
828 /*
829 * The initialization will be done on EMT(0). It is possible that multiple
830 * allocation attempts are done when multiple racing non EMT threads try to
831 * allocate a breakpoint and a new chunk needs to be allocated.
832 * Ignore the request and succeed if the chunk is allocated meaning that a
833 * previous rendezvous successfully allocated the chunk.
834 */
835 int rc = VINF_SUCCESS;
836 if ( pVCpu->idCpu == 0
837 && pL2Chunk->idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID)
838 {
839 /* Allocate the bitmap first so we can skip calling into VMMR0 if it fails. */
840 AssertCompile(!(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK % 64));
841 void *pbmAlloc = RTMemAllocZ(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK / 8);
842 if (RT_LIKELY(pbmAlloc))
843 {
844 if (!SUPR3IsDriverless())
845 {
846 DBGFBPL2TBLCHUNKALLOCREQ Req;
847 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
848 Req.Hdr.cbReq = sizeof(Req);
849 Req.idChunk = idChunk;
850 Req.pChunkBaseR3 = NULL;
851 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC, 0 /*u64Arg*/, &Req.Hdr);
852 if (RT_SUCCESS(rc))
853 pL2Chunk->pL2BaseR3 = (PDBGFBPL2ENTRY)Req.pChunkBaseR3;
854 else
855 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC failed: %Rrc\n", rc));
856 }
857 else
858 {
859 /* Driverless: Do dbgfR0BpL2TblChunkAllocWorker here, ring-3 style. */
860 uint32_t const cbTotal = RT_ALIGN_32(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK * sizeof(DBGFBPL2ENTRY), HOST_PAGE_SIZE);
861 pL2Chunk->pL2BaseR3 = (PDBGFBPL2ENTRY)RTMemPageAllocZ(cbTotal);
862 AssertLogRelMsgStmt(pL2Chunk->pL2BaseR3, ("cbTotal=%#x\n", cbTotal), rc = VERR_NO_PAGE_MEMORY);
863 }
864 if (RT_SUCCESS(rc))
865 {
866 pL2Chunk->pbmAlloc = (void volatile *)pbmAlloc;
867 pL2Chunk->cFree = DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK;
868 pL2Chunk->idChunk = idChunk;
869 return VINF_SUCCESS;
870 }
871
872 RTMemFree(pbmAlloc);
873 }
874 else
875 rc = VERR_NO_MEMORY;
876 }
877
878 return rc;
879}
880
881
882/**
883 * Tries to allocate the given L2 table chunk which requires an EMT rendezvous.
884 *
885 * @returns VBox status code.
886 * @param pUVM The user mode VM handle.
887 * @param idChunk The chunk to allocate.
888 *
889 * @thread Any thread.
890 */
891DECLINLINE(int) dbgfR3BpL2TblChunkAlloc(PUVM pUVM, uint32_t idChunk)
892{
893 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpL2TblChunkAllocEmtWorker, (void *)(uintptr_t)idChunk);
894}
895
896
897/**
898 * Tries to allocate a new breakpoint of the given type.
899 *
900 * @returns VBox status code.
901 * @param pUVM The user mode VM handle.
902 * @param pidxL2Tbl Where to return the L2 table entry index on success.
903 * @param ppL2TblEntry Where to return the pointer to the L2 table entry on success.
904 *
905 * @thread Any thread.
906 */
907static int dbgfR3BpL2TblEntryAlloc(PUVM pUVM, uint32_t *pidxL2Tbl, PDBGFBPL2ENTRY *ppL2TblEntry)
908{
909 /*
910 * Search for a chunk having a free entry, allocating new chunks
911 * if the encountered ones are full.
912 *
913 * This can be called from multiple threads at the same time so special care
914 * has to be taken to not require any locking here.
915 */
916 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
917 {
918 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
919
920 uint32_t idChunk = ASMAtomicReadU32(&pL2Chunk->idChunk);
921 if (idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID)
922 {
923 int rc = dbgfR3BpL2TblChunkAlloc(pUVM, i);
924 if (RT_FAILURE(rc))
925 {
926 LogRel(("DBGF/Bp: Allocating new breakpoint L2 lookup table chunk failed with %Rrc\n", rc));
927 break;
928 }
929
930 idChunk = ASMAtomicReadU32(&pL2Chunk->idChunk);
931 Assert(idChunk == i);
932 }
933
934 /** @todo Optimize with some hinting if this turns out to be too slow. */
935 for (;;)
936 {
937 uint32_t cFree = ASMAtomicReadU32(&pL2Chunk->cFree);
938 if (cFree)
939 {
940 /*
941 * Scan the associated bitmap for a free entry, if none can be found another thread
942 * raced us and we go to the next chunk.
943 */
944 int32_t iClr = ASMBitFirstClear(pL2Chunk->pbmAlloc, DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK);
945 if (iClr != -1)
946 {
947 /*
948 * Try to allocate, we could get raced here as well. In that case
949 * we try again.
950 */
951 if (!ASMAtomicBitTestAndSet(pL2Chunk->pbmAlloc, iClr))
952 {
953 /* Success, immediately mark as allocated, initialize the breakpoint state and return. */
954 ASMAtomicDecU32(&pL2Chunk->cFree);
955
956 PDBGFBPL2ENTRY pL2Entry = &pL2Chunk->pL2BaseR3[iClr];
957
958 *pidxL2Tbl = DBGF_BP_L2_IDX_CREATE(idChunk, iClr);
959 *ppL2TblEntry = pL2Entry;
960 return VINF_SUCCESS;
961 }
962 /* else Retry with another spot. */
963 }
964 else /* no free entry in bitmap, go to the next chunk */
965 break;
966 }
967 else /* !cFree, go to the next chunk */
968 break;
969 }
970 }
971
972 return VERR_DBGF_NO_MORE_BP_SLOTS;
973}
974
975
976/**
977 * Frees the given breakpoint handle.
978 *
979 * @returns nothing.
980 * @param pUVM The user mode VM handle.
981 * @param idxL2Tbl The L2 table index to free.
982 * @param pL2TblEntry The L2 table entry pointer to free.
983 */
984static void dbgfR3BpL2TblEntryFree(PUVM pUVM, uint32_t idxL2Tbl, PDBGFBPL2ENTRY pL2TblEntry)
985{
986 uint32_t idChunk = DBGF_BP_L2_IDX_GET_CHUNK_ID(idxL2Tbl);
987 uint32_t idxEntry = DBGF_BP_L2_IDX_GET_ENTRY(idxL2Tbl);
988
989 AssertReturnVoid(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT);
990 AssertReturnVoid(idxEntry < DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK);
991
992 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
993 AssertPtrReturnVoid(pL2Chunk->pbmAlloc);
994 AssertReturnVoid(ASMBitTest(pL2Chunk->pbmAlloc, idxEntry));
995
996 memset(pL2TblEntry, 0, sizeof(*pL2TblEntry));
997
998 ASMAtomicBitClear(pL2Chunk->pbmAlloc, idxEntry);
999 ASMAtomicIncU32(&pL2Chunk->cFree);
1000}
1001
1002
1003/**
1004 * Sets the enabled flag of the given breakpoint to the given value.
1005 *
1006 * @returns nothing.
1007 * @param pBp The breakpoint to set the state.
1008 * @param fEnabled Enabled status.
1009 */
1010DECLINLINE(void) dbgfR3BpSetEnabled(PDBGFBPINT pBp, bool fEnabled)
1011{
1012 if (fEnabled)
1013 pBp->Pub.fFlags |= DBGF_BP_F_ENABLED;
1014 else
1015 pBp->Pub.fFlags &= ~DBGF_BP_F_ENABLED;
1016}
1017
1018
1019/**
1020 * Assigns a hardware breakpoint state to the given register breakpoint.
1021 *
1022 * @returns VBox status code.
1023 * @param pVM The cross-context VM structure pointer.
1024 * @param hBp The breakpoint handle to assign.
1025 * @param pBp The internal breakpoint state.
1026 *
1027 * @thread Any thread.
1028 */
1029static int dbgfR3BpRegAssign(PVM pVM, DBGFBP hBp, PDBGFBPINT pBp)
1030{
1031 AssertReturn(pBp->Pub.u.Reg.iReg == UINT8_MAX, VERR_DBGF_BP_IPE_3);
1032
1033 for (uint8_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
1034 {
1035 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
1036
1037 AssertCompileSize(DBGFBP, sizeof(uint32_t));
1038 if (ASMAtomicCmpXchgU32(&pHwBp->hBp, hBp, NIL_DBGFBP))
1039 {
1040 pHwBp->GCPtr = pBp->Pub.u.Reg.GCPtr;
1041 pHwBp->fType = pBp->Pub.u.Reg.fType;
1042 pHwBp->cb = pBp->Pub.u.Reg.cb;
1043 pHwBp->fEnabled = DBGF_BP_PUB_IS_ENABLED(&pBp->Pub);
1044
1045 pBp->Pub.u.Reg.iReg = i;
1046 return VINF_SUCCESS;
1047 }
1048 }
1049
1050 return VERR_DBGF_NO_MORE_BP_SLOTS;
1051}
1052
1053
1054/**
1055 * Removes the assigned hardware breakpoint state from the given register breakpoint.
1056 *
1057 * @returns VBox status code.
1058 * @param pVM The cross-context VM structure pointer.
1059 * @param hBp The breakpoint handle to remove.
1060 * @param pBp The internal breakpoint state.
1061 *
1062 * @thread Any thread.
1063 */
1064static int dbgfR3BpRegRemove(PVM pVM, DBGFBP hBp, PDBGFBPINT pBp)
1065{
1066 AssertReturn(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints), VERR_DBGF_BP_IPE_3);
1067
1068 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
1069 AssertReturn(pHwBp->hBp == hBp, VERR_DBGF_BP_IPE_4);
1070 AssertReturn(!pHwBp->fEnabled, VERR_DBGF_BP_IPE_5);
1071
1072 pHwBp->GCPtr = 0;
1073 pHwBp->fType = 0;
1074 pHwBp->cb = 0;
1075 ASMCompilerBarrier();
1076
1077 ASMAtomicWriteU32(&pHwBp->hBp, NIL_DBGFBP);
1078 return VINF_SUCCESS;
1079}
1080
1081
1082/**
1083 * Returns the pointer to the L2 table entry from the given index.
1084 *
1085 * @returns Current context pointer to the L2 table entry or NULL if the provided index value is invalid.
1086 * @param pUVM The user mode VM handle.
1087 * @param idxL2 The L2 table index to resolve.
1088 *
1089 * @note The content of the resolved L2 table entry is not validated!.
1090 */
1091DECLINLINE(PDBGFBPL2ENTRY) dbgfR3BpL2GetByIdx(PUVM pUVM, uint32_t idxL2)
1092{
1093 uint32_t idChunk = DBGF_BP_L2_IDX_GET_CHUNK_ID(idxL2);
1094 uint32_t idxEntry = DBGF_BP_L2_IDX_GET_ENTRY(idxL2);
1095
1096 AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, NULL);
1097 AssertReturn(idxEntry < DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK, NULL);
1098
1099 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
1100 AssertPtrReturn(pL2Chunk->pbmAlloc, NULL);
1101 AssertReturn(ASMBitTest(pL2Chunk->pbmAlloc, idxEntry), NULL);
1102
1103 return &pL2Chunk->CTX_SUFF(pL2Base)[idxEntry];
1104}
1105
1106
1107/**
1108 * Creates a binary search tree with the given root and leaf nodes.
1109 *
1110 * @returns VBox status code.
1111 * @param pUVM The user mode VM handle.
1112 * @param idxL1 The index into the L1 table where the created tree should be linked into.
1113 * @param u32EntryOld The old entry in the L1 table used to compare with in the atomic update.
1114 * @param hBpRoot The root node DBGF handle to assign.
1115 * @param GCPtrRoot The root nodes GC pointer to use as a key.
1116 * @param hBpLeaf The leafs node DBGF handle to assign.
1117 * @param GCPtrLeaf The leafs node GC pointer to use as a key.
1118 */
1119static int dbgfR3BpInt3L2BstCreate(PUVM pUVM, uint32_t idxL1, uint32_t u32EntryOld,
1120 DBGFBP hBpRoot, RTGCUINTPTR GCPtrRoot,
1121 DBGFBP hBpLeaf, RTGCUINTPTR GCPtrLeaf)
1122{
1123 AssertReturn(GCPtrRoot != GCPtrLeaf, VERR_DBGF_BP_IPE_9);
1124 Assert(DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtrRoot) == DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtrLeaf));
1125
1126 /* Allocate two nodes. */
1127 uint32_t idxL2Root = 0;
1128 PDBGFBPL2ENTRY pL2Root = NULL;
1129 int rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Root, &pL2Root);
1130 if (RT_SUCCESS(rc))
1131 {
1132 uint32_t idxL2Leaf = 0;
1133 PDBGFBPL2ENTRY pL2Leaf = NULL;
1134 rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Leaf, &pL2Leaf);
1135 if (RT_SUCCESS(rc))
1136 {
1137 dbgfBpL2TblEntryInit(pL2Leaf, hBpLeaf, GCPtrLeaf, DBGF_BP_L2_ENTRY_IDX_END, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
1138 if (GCPtrLeaf < GCPtrRoot)
1139 dbgfBpL2TblEntryInit(pL2Root, hBpRoot, GCPtrRoot, idxL2Leaf, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
1140 else
1141 dbgfBpL2TblEntryInit(pL2Root, hBpRoot, GCPtrRoot, DBGF_BP_L2_ENTRY_IDX_END, idxL2Leaf, 0 /*iDepth*/);
1142
1143 uint32_t const u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_L2_IDX(idxL2Root);
1144 if (ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], u32Entry, u32EntryOld))
1145 return VINF_SUCCESS;
1146
1147 /* The L1 entry has changed due to another thread racing us during insertion, free nodes and try again. */
1148 dbgfR3BpL2TblEntryFree(pUVM, idxL2Leaf, pL2Leaf);
1149 rc = VINF_TRY_AGAIN;
1150 }
1151
1152 dbgfR3BpL2TblEntryFree(pUVM, idxL2Root, pL2Root);
1153 }
1154
1155 return rc;
1156}
1157
1158
1159/**
1160 * Inserts the given breakpoint handle into an existing binary search tree.
1161 *
1162 * @returns VBox status code.
1163 * @param pUVM The user mode VM handle.
1164 * @param idxL2Root The index of the tree root in the L2 table.
1165 * @param hBp The node DBGF handle to insert.
1166 * @param GCPtr The nodes GC pointer to use as a key.
1167 */
1168static int dbgfR3BpInt2L2BstNodeInsert(PUVM pUVM, uint32_t idxL2Root, DBGFBP hBp, RTGCUINTPTR GCPtr)
1169{
1170 GCPtr = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
1171
1172 /* Allocate a new node first. */
1173 uint32_t idxL2Nd = 0;
1174 PDBGFBPL2ENTRY pL2Nd = NULL;
1175 int rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Nd, &pL2Nd);
1176 if (RT_SUCCESS(rc))
1177 {
1178 /* Walk the tree and find the correct node to insert to. */
1179 PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Root);
1180 while (RT_LIKELY(pL2Entry))
1181 {
1182 /* Make a copy of the entry. */
1183 DBGFBPL2ENTRY L2Entry;
1184 L2Entry.u64GCPtrKeyAndBpHnd1 = ASMAtomicReadU64(&pL2Entry->u64GCPtrKeyAndBpHnd1);
1185 L2Entry.u64LeftRightIdxDepthBpHnd2 = ASMAtomicReadU64(&pL2Entry->u64LeftRightIdxDepthBpHnd2);
1186
1187 RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(L2Entry.u64GCPtrKeyAndBpHnd1);
1188 AssertBreak(GCPtr != GCPtrL2Entry);
1189
1190 /* Not found, get to the next level. */
1191 uint32_t idxL2Next = GCPtr < GCPtrL2Entry
1192 ? DBGF_BP_L2_ENTRY_GET_IDX_LEFT(L2Entry.u64LeftRightIdxDepthBpHnd2)
1193 : DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(L2Entry.u64LeftRightIdxDepthBpHnd2);
1194 if (idxL2Next == DBGF_BP_L2_ENTRY_IDX_END)
1195 {
1196 /* Insert the new node here. */
1197 dbgfBpL2TblEntryInit(pL2Nd, hBp, GCPtr, DBGF_BP_L2_ENTRY_IDX_END, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
1198 if (GCPtr < GCPtrL2Entry)
1199 dbgfBpL2TblEntryUpdateLeft(pL2Entry, idxL2Next, 0 /*iDepth*/);
1200 else
1201 dbgfBpL2TblEntryUpdateRight(pL2Entry, idxL2Next, 0 /*iDepth*/);
1202 return VINF_SUCCESS;
1203 }
1204
1205 pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Next);
1206 }
1207
1208 dbgfR3BpL2TblEntryFree(pUVM, idxL2Nd, pL2Nd);
1209 rc = VERR_DBGF_BP_L2_LOOKUP_FAILED;
1210 }
1211
1212 return rc;
1213}
1214
1215
1216/**
1217 * Adds the given breakpoint handle keyed with the GC pointer to the proper L2 binary search tree
1218 * possibly creating a new tree.
1219 *
1220 * @returns VBox status code.
1221 * @param pUVM The user mode VM handle.
1222 * @param idxL1 The index into the L1 table the breakpoint uses.
1223 * @param hBp The breakpoint handle which is to be added.
1224 * @param GCPtr The GC pointer the breakpoint is keyed with.
1225 */
1226static int dbgfR3BpInt3L2BstNodeAdd(PUVM pUVM, uint32_t idxL1, DBGFBP hBp, RTGCUINTPTR GCPtr)
1227{
1228 int rc = RTSemFastMutexRequest(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc);
1229
1230 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]); /* Re-read, could get raced by a remove operation. */
1231 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
1232 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
1233 {
1234 /* Create a new search tree, gather the necessary information first. */
1235 DBGFBP hBp2 = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32Entry);
1236 PDBGFBPINT pBp2 = dbgfR3BpGetByHnd(pUVM, hBp2);
1237 AssertStmt(RT_VALID_PTR(pBp2), rc = VERR_DBGF_BP_IPE_7);
1238 if (RT_SUCCESS(rc))
1239 rc = dbgfR3BpInt3L2BstCreate(pUVM, idxL1, u32Entry, hBp, GCPtr, hBp2, pBp2->Pub.u.Int3.GCPtr);
1240 }
1241 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
1242 rc = dbgfR3BpInt2L2BstNodeInsert(pUVM, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry), hBp, GCPtr);
1243
1244 int rc2 = RTSemFastMutexRelease(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc2);
1245 return rc;
1246}
1247
1248
1249/**
1250 * Gets the leftmost from the given tree node start index.
1251 *
1252 * @returns VBox status code.
1253 * @param pUVM The user mode VM handle.
1254 * @param idxL2Start The start index to walk from.
1255 * @param pidxL2Leftmost Where to store the L2 table index of the leftmost entry.
1256 * @param ppL2NdLeftmost Where to store the pointer to the leftmost L2 table entry.
1257 * @param pidxL2NdLeftParent Where to store the L2 table index of the leftmost entries parent.
1258 * @param ppL2NdLeftParent Where to store the pointer to the leftmost L2 table entries parent.
1259 */
1260static int dbgfR33BpInt3BstGetLeftmostEntryFromNode(PUVM pUVM, uint32_t idxL2Start,
1261 uint32_t *pidxL2Leftmost, PDBGFBPL2ENTRY *ppL2NdLeftmost,
1262 uint32_t *pidxL2NdLeftParent, PDBGFBPL2ENTRY *ppL2NdLeftParent)
1263{
1264 uint32_t idxL2Parent = DBGF_BP_L2_ENTRY_IDX_END;
1265 PDBGFBPL2ENTRY pL2NdParent = NULL;
1266
1267 for (;;)
1268 {
1269 PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Start);
1270 AssertPtr(pL2Entry);
1271
1272 uint32_t idxL2Left = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
1273 if (idxL2Start == DBGF_BP_L2_ENTRY_IDX_END)
1274 {
1275 *pidxL2Leftmost = idxL2Start;
1276 *ppL2NdLeftmost = pL2Entry;
1277 *pidxL2NdLeftParent = idxL2Parent;
1278 *ppL2NdLeftParent = pL2NdParent;
1279 break;
1280 }
1281
1282 idxL2Parent = idxL2Start;
1283 idxL2Start = idxL2Left;
1284 pL2NdParent = pL2Entry;
1285 }
1286
1287 return VINF_SUCCESS;
1288}
1289
1290
1291/**
1292 * Removes the given node rearranging the tree.
1293 *
1294 * @returns VBox status code.
1295 * @param pUVM The user mode VM handle.
1296 * @param idxL1 The index into the L1 table pointing to the binary search tree containing the node.
1297 * @param idxL2Root The L2 table index where the tree root is located.
1298 * @param idxL2Nd The node index to remove.
1299 * @param pL2Nd The L2 table entry to remove.
1300 * @param idxL2NdParent The parents index, can be DBGF_BP_L2_ENTRY_IDX_END if the root is about to be removed.
1301 * @param pL2NdParent The parents L2 table entry, can be NULL if the root is about to be removed.
1302 * @param fLeftChild Flag whether the node is the left child of the parent or the right one.
1303 */
1304static int dbgfR3BpInt3BstNodeRemove(PUVM pUVM, uint32_t idxL1, uint32_t idxL2Root,
1305 uint32_t idxL2Nd, PDBGFBPL2ENTRY pL2Nd,
1306 uint32_t idxL2NdParent, PDBGFBPL2ENTRY pL2NdParent,
1307 bool fLeftChild)
1308{
1309 /*
1310 * If there are only two nodes remaining the tree will get destroyed and the
1311 * L1 entry will be converted to the direct handle type.
1312 */
1313 uint32_t idxL2Left = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
1314 uint32_t idxL2Right = DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
1315
1316 Assert(idxL2NdParent != DBGF_BP_L2_ENTRY_IDX_END || !pL2NdParent); RT_NOREF(idxL2NdParent);
1317 uint32_t idxL2ParentNew = DBGF_BP_L2_ENTRY_IDX_END;
1318 if (idxL2Right == DBGF_BP_L2_ENTRY_IDX_END)
1319 idxL2ParentNew = idxL2Left;
1320 else
1321 {
1322 /* Find the leftmost entry of the right subtree and move it to the to be removed nodes location in the tree. */
1323 PDBGFBPL2ENTRY pL2NdLeftmostParent = NULL;
1324 PDBGFBPL2ENTRY pL2NdLeftmost = NULL;
1325 uint32_t idxL2NdLeftmostParent = DBGF_BP_L2_ENTRY_IDX_END;
1326 uint32_t idxL2Leftmost = DBGF_BP_L2_ENTRY_IDX_END;
1327 int rc = dbgfR33BpInt3BstGetLeftmostEntryFromNode(pUVM, idxL2Right, &idxL2Leftmost ,&pL2NdLeftmost,
1328 &idxL2NdLeftmostParent, &pL2NdLeftmostParent);
1329 AssertRCReturn(rc, rc);
1330
1331 if (pL2NdLeftmostParent)
1332 {
1333 /* Rearrange the leftmost entries parents pointer. */
1334 dbgfBpL2TblEntryUpdateLeft(pL2NdLeftmostParent, DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2NdLeftmost->u64LeftRightIdxDepthBpHnd2), 0 /*iDepth*/);
1335 dbgfBpL2TblEntryUpdateRight(pL2NdLeftmost, idxL2Right, 0 /*iDepth*/);
1336 }
1337
1338 dbgfBpL2TblEntryUpdateLeft(pL2NdLeftmost, idxL2Left, 0 /*iDepth*/);
1339
1340 /* Update the remove nodes parent to point to the new node. */
1341 idxL2ParentNew = idxL2Leftmost;
1342 }
1343
1344 if (pL2NdParent)
1345 {
1346 /* Asssign the new L2 index to proper parents left or right pointer. */
1347 if (fLeftChild)
1348 dbgfBpL2TblEntryUpdateLeft(pL2NdParent, idxL2ParentNew, 0 /*iDepth*/);
1349 else
1350 dbgfBpL2TblEntryUpdateRight(pL2NdParent, idxL2ParentNew, 0 /*iDepth*/);
1351 }
1352 else
1353 {
1354 /* The root node is removed, set the new root in the L1 table. */
1355 Assert(idxL2ParentNew != DBGF_BP_L2_ENTRY_IDX_END);
1356 idxL2Root = idxL2ParentNew;
1357 ASMAtomicXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_CREATE_L2_IDX(idxL2Left));
1358 }
1359
1360 /* Free the node. */
1361 dbgfR3BpL2TblEntryFree(pUVM, idxL2Nd, pL2Nd);
1362
1363 /*
1364 * Check whether the old/new root is the only node remaining and convert the L1
1365 * table entry to a direct breakpoint handle one in that case.
1366 */
1367 pL2Nd = dbgfR3BpL2GetByIdx(pUVM, idxL2Root);
1368 AssertPtr(pL2Nd);
1369 if ( DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2) == DBGF_BP_L2_ENTRY_IDX_END
1370 && DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2) == DBGF_BP_L2_ENTRY_IDX_END)
1371 {
1372 DBGFBP hBp = DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Nd->u64GCPtrKeyAndBpHnd1, pL2Nd->u64LeftRightIdxDepthBpHnd2);
1373 dbgfR3BpL2TblEntryFree(pUVM, idxL2Root, pL2Nd);
1374 ASMAtomicXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp));
1375 }
1376
1377 return VINF_SUCCESS;
1378}
1379
1380
1381/**
1382 * Removes the given breakpoint handle keyed with the GC pointer from the L2 binary search tree
1383 * pointed to by the given L2 root index.
1384 *
1385 * @returns VBox status code.
1386 * @param pUVM The user mode VM handle.
1387 * @param idxL1 The index into the L1 table pointing to the binary search tree.
1388 * @param idxL2Root The L2 table index where the tree root is located.
1389 * @param hBp The breakpoint handle which is to be removed.
1390 * @param GCPtr The GC pointer the breakpoint is keyed with.
1391 */
1392static int dbgfR3BpInt3L2BstRemove(PUVM pUVM, uint32_t idxL1, uint32_t idxL2Root, DBGFBP hBp, RTGCUINTPTR GCPtr)
1393{
1394 GCPtr = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
1395
1396 int rc = RTSemFastMutexRequest(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc);
1397
1398 uint32_t idxL2Cur = idxL2Root;
1399 uint32_t idxL2Parent = DBGF_BP_L2_ENTRY_IDX_END;
1400 bool fLeftChild = false;
1401 PDBGFBPL2ENTRY pL2EntryParent = NULL;
1402 for (;;)
1403 {
1404 PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Cur);
1405 AssertPtr(pL2Entry);
1406
1407 /* Check whether this node is to be removed.. */
1408 RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(pL2Entry->u64GCPtrKeyAndBpHnd1);
1409 if (GCPtrL2Entry == GCPtr)
1410 {
1411 Assert(DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Entry->u64GCPtrKeyAndBpHnd1, pL2Entry->u64LeftRightIdxDepthBpHnd2) == hBp); RT_NOREF(hBp);
1412
1413 rc = dbgfR3BpInt3BstNodeRemove(pUVM, idxL1, idxL2Root, idxL2Cur, pL2Entry, idxL2Parent, pL2EntryParent, fLeftChild);
1414 break;
1415 }
1416
1417 pL2EntryParent = pL2Entry;
1418 idxL2Parent = idxL2Cur;
1419
1420 if (GCPtrL2Entry < GCPtr)
1421 {
1422 fLeftChild = true;
1423 idxL2Cur = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
1424 }
1425 else
1426 {
1427 fLeftChild = false;
1428 idxL2Cur = DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
1429 }
1430
1431 AssertBreakStmt(idxL2Cur != DBGF_BP_L2_ENTRY_IDX_END, rc = VERR_DBGF_BP_L2_LOOKUP_FAILED);
1432 }
1433
1434 int rc2 = RTSemFastMutexRelease(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc2);
1435
1436 return rc;
1437}
1438
1439
1440/**
1441 * Adds the given int3 breakpoint to the appropriate lookup tables.
1442 *
1443 * @returns VBox status code.
1444 * @param pUVM The user mode VM handle.
1445 * @param hBp The breakpoint handle to add.
1446 * @param pBp The internal breakpoint state.
1447 */
1448static int dbgfR3BpInt3Add(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1449{
1450 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_INT3, VERR_DBGF_BP_IPE_3);
1451
1452 int rc = VINF_SUCCESS;
1453 uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(pBp->Pub.u.Int3.GCPtr);
1454 uint8_t cTries = 16;
1455
1456 while (cTries--)
1457 {
1458 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1459 if (u32Entry == DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
1460 {
1461 /*
1462 * No breakpoint assigned so far for this entry, create an entry containing
1463 * the direct breakpoint handle and try to exchange it atomically.
1464 */
1465 u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp);
1466 if (ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], u32Entry, DBGF_BP_INT3_L1_ENTRY_TYPE_NULL))
1467 break;
1468 }
1469 else
1470 {
1471 rc = dbgfR3BpInt3L2BstNodeAdd(pUVM, idxL1, hBp, pBp->Pub.u.Int3.GCPtr);
1472 if (rc != VINF_TRY_AGAIN)
1473 break;
1474 }
1475 }
1476
1477 if ( RT_SUCCESS(rc)
1478 && !cTries) /* Too much contention, abort with an error. */
1479 rc = VERR_DBGF_BP_INT3_ADD_TRIES_REACHED;
1480
1481 return rc;
1482}
1483
1484
1485/**
1486 * Adds the given port I/O breakpoint to the appropriate lookup tables.
1487 *
1488 * @returns VBox status code.
1489 * @param pUVM The user mode VM handle.
1490 * @param hBp The breakpoint handle to add.
1491 * @param pBp The internal breakpoint state.
1492 */
1493static int dbgfR3BpPortIoAdd(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1494{
1495 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_PORT_IO, VERR_DBGF_BP_IPE_3);
1496
1497 uint16_t uPortExcl = pBp->Pub.u.PortIo.uPort + pBp->Pub.u.PortIo.cPorts;
1498 uint32_t u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp);
1499 for (uint16_t idxPort = pBp->Pub.u.PortIo.uPort; idxPort < uPortExcl; idxPort++)
1500 {
1501 bool fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], u32Entry, DBGF_BP_INT3_L1_ENTRY_TYPE_NULL);
1502 if (!fXchg)
1503 {
1504 /* Something raced us, so roll back the other registrations. */
1505 while (idxPort > pBp->Pub.u.PortIo.uPort)
1506 {
1507 fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry);
1508 Assert(fXchg); RT_NOREF(fXchg);
1509 }
1510
1511 return VERR_DBGF_BP_INT3_ADD_TRIES_REACHED; /** @todo New status code */
1512 }
1513 }
1514
1515 return VINF_SUCCESS;
1516}
1517
1518
1519/**
1520 * Get a breakpoint give by address.
1521 *
1522 * @returns The breakpoint handle on success or NIL_DBGF if not found.
1523 * @param pUVM The user mode VM handle.
1524 * @param enmType The breakpoint type.
1525 * @param GCPtr The breakpoint address.
1526 * @param ppBp Where to store the pointer to the internal breakpoint state on success, optional.
1527 */
1528static DBGFBP dbgfR3BpGetByAddr(PUVM pUVM, DBGFBPTYPE enmType, RTGCUINTPTR GCPtr, PDBGFBPINT *ppBp)
1529{
1530 DBGFBP hBp = NIL_DBGFBP;
1531
1532 switch (enmType)
1533 {
1534 case DBGFBPTYPE_REG:
1535 {
1536 PVM pVM = pUVM->pVM;
1537 VM_ASSERT_VALID_EXT_RETURN(pVM, NIL_DBGFBP);
1538
1539 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
1540 {
1541 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
1542
1543 AssertCompileSize(DBGFBP, sizeof(uint32_t));
1544 DBGFBP hBpTmp = ASMAtomicReadU32(&pHwBp->hBp);
1545 if ( pHwBp->GCPtr == GCPtr
1546 && hBpTmp != NIL_DBGFBP)
1547 {
1548 hBp = hBpTmp;
1549 break;
1550 }
1551 }
1552 break;
1553 }
1554
1555 case DBGFBPTYPE_INT3:
1556 {
1557 const uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtr);
1558 const uint32_t u32L1Entry = ASMAtomicReadU32(&pUVM->dbgf.s.CTX_SUFF(paBpLocL1)[idxL1]);
1559
1560 if (u32L1Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
1561 {
1562 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32L1Entry);
1563 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
1564 hBp = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32L1Entry);
1565 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
1566 {
1567 RTGCUINTPTR GCPtrKey = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
1568 PDBGFBPL2ENTRY pL2Nd = dbgfR3BpL2GetByIdx(pUVM, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32L1Entry));
1569
1570 for (;;)
1571 {
1572 AssertPtr(pL2Nd);
1573
1574 RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(pL2Nd->u64GCPtrKeyAndBpHnd1);
1575 if (GCPtrKey == GCPtrL2Entry)
1576 {
1577 hBp = DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Nd->u64GCPtrKeyAndBpHnd1, pL2Nd->u64LeftRightIdxDepthBpHnd2);
1578 break;
1579 }
1580
1581 /* Not found, get to the next level. */
1582 uint32_t idxL2Next = GCPtrKey < GCPtrL2Entry
1583 ? DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2)
1584 : DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
1585 /* Address not found if the entry denotes the end. */
1586 if (idxL2Next == DBGF_BP_L2_ENTRY_IDX_END)
1587 break;
1588
1589 pL2Nd = dbgfR3BpL2GetByIdx(pUVM, idxL2Next);
1590 }
1591 }
1592 }
1593 break;
1594 }
1595
1596 default:
1597 AssertMsgFailed(("enmType=%d\n", enmType));
1598 break;
1599 }
1600
1601 if ( hBp != NIL_DBGFBP
1602 && ppBp)
1603 *ppBp = dbgfR3BpGetByHnd(pUVM, hBp);
1604 return hBp;
1605}
1606
1607
1608/**
1609 * Get a port I/O breakpoint given by the range.
1610 *
1611 * @returns The breakpoint handle on success or NIL_DBGF if not found.
1612 * @param pUVM The user mode VM handle.
1613 * @param uPort First port in the range.
1614 * @param cPorts Number of ports in the range.
1615 * @param ppBp Where to store the pointer to the internal breakpoint state on success, optional.
1616 */
1617static DBGFBP dbgfR3BpPortIoGetByRange(PUVM pUVM, RTIOPORT uPort, RTIOPORT cPorts, PDBGFBPINT *ppBp)
1618{
1619 DBGFBP hBp = NIL_DBGFBP;
1620
1621 for (RTIOPORT idxPort = uPort; idxPort < uPort + cPorts; idxPort++)
1622 {
1623 const uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.CTX_SUFF(paBpLocPortIo)[idxPort]);
1624 if (u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
1625 {
1626 hBp = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32Entry);
1627 break;
1628 }
1629 }
1630
1631 if ( hBp != NIL_DBGFBP
1632 && ppBp)
1633 *ppBp = dbgfR3BpGetByHnd(pUVM, hBp);
1634 return hBp;
1635}
1636
1637
1638/**
1639 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
1640 */
1641static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpInt3RemoveEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
1642{
1643 DBGFBP hBp = (DBGFBP)(uintptr_t)pvUser;
1644
1645 VMCPU_ASSERT_EMT(pVCpu);
1646 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1647
1648 PUVM pUVM = pVM->pUVM;
1649 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
1650 AssertPtrReturn(pBp, VERR_DBGF_BP_IPE_8);
1651
1652 int rc = VINF_SUCCESS;
1653 if (pVCpu->idCpu == 0)
1654 {
1655 uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(pBp->Pub.u.Int3.GCPtr);
1656 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1657 AssertReturn(u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, VERR_DBGF_BP_IPE_6);
1658
1659 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
1660 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
1661 {
1662 /* Single breakpoint, just exchange atomically with the null value. */
1663 if (!ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry))
1664 {
1665 /*
1666 * A breakpoint addition must have raced us converting the L1 entry to an L2 index type, re-read
1667 * and remove the node from the created binary search tree.
1668 *
1669 * This works because after the entry was converted to an L2 index it can only be converted back
1670 * to a direct handle by removing one or more nodes which always goes through the fast mutex
1671 * protecting the L2 table. Likewise adding a new breakpoint requires grabbing the mutex as well
1672 * so there is serialization here and the node can be removed safely without having to worry about
1673 * concurrent tree modifications.
1674 */
1675 u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1676 AssertReturn(DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry) == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX, VERR_DBGF_BP_IPE_9);
1677
1678 rc = dbgfR3BpInt3L2BstRemove(pUVM, idxL1, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry),
1679 hBp, pBp->Pub.u.Int3.GCPtr);
1680 }
1681 }
1682 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
1683 rc = dbgfR3BpInt3L2BstRemove(pUVM, idxL1, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry),
1684 hBp, pBp->Pub.u.Int3.GCPtr);
1685 }
1686
1687 return rc;
1688}
1689
1690
1691/**
1692 * Removes the given int3 breakpoint from all lookup tables.
1693 *
1694 * @returns VBox status code.
1695 * @param pUVM The user mode VM handle.
1696 * @param hBp The breakpoint handle to remove.
1697 * @param pBp The internal breakpoint state.
1698 */
1699static int dbgfR3BpInt3Remove(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1700{
1701 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_INT3, VERR_DBGF_BP_IPE_3);
1702
1703 /*
1704 * This has to be done by an EMT rendezvous in order to not have an EMT traversing
1705 * any L2 trees while it is being removed.
1706 */
1707 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpInt3RemoveEmtWorker, (void *)(uintptr_t)hBp);
1708}
1709
1710
1711/**
1712 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
1713 */
1714static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpPortIoRemoveEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
1715{
1716 DBGFBP hBp = (DBGFBP)(uintptr_t)pvUser;
1717
1718 VMCPU_ASSERT_EMT(pVCpu);
1719 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1720
1721 PUVM pUVM = pVM->pUVM;
1722 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
1723 AssertPtrReturn(pBp, VERR_DBGF_BP_IPE_8);
1724
1725 int rc = VINF_SUCCESS;
1726 if (pVCpu->idCpu == 0)
1727 {
1728 /*
1729 * Remove the whole range, there shouldn't be any other breakpoint configured for this range as this is not
1730 * allowed right now.
1731 */
1732 uint16_t uPortExcl = pBp->Pub.u.PortIo.uPort + pBp->Pub.u.PortIo.cPorts;
1733 for (uint16_t idxPort = pBp->Pub.u.PortIo.uPort; idxPort < uPortExcl; idxPort++)
1734 {
1735 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort]);
1736 AssertReturn(u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, VERR_DBGF_BP_IPE_6);
1737
1738 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
1739 AssertReturn(u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND, VERR_DBGF_BP_IPE_7);
1740
1741 bool fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry);
1742 Assert(fXchg); RT_NOREF(fXchg);
1743 }
1744 }
1745
1746 return rc;
1747}
1748
1749
1750/**
1751 * Removes the given port I/O breakpoint from all lookup tables.
1752 *
1753 * @returns VBox status code.
1754 * @param pUVM The user mode VM handle.
1755 * @param hBp The breakpoint handle to remove.
1756 * @param pBp The internal breakpoint state.
1757 */
1758static int dbgfR3BpPortIoRemove(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1759{
1760 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_PORT_IO, VERR_DBGF_BP_IPE_3);
1761
1762 /*
1763 * This has to be done by an EMT rendezvous in order to not have an EMT accessing
1764 * the breakpoint while it is removed.
1765 */
1766 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpPortIoRemoveEmtWorker, (void *)(uintptr_t)hBp);
1767}
1768
1769
1770/**
1771 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
1772 */
1773static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpRegRecalcOnCpu(PVM pVM, PVMCPU pVCpu, void *pvUser)
1774{
1775 RT_NOREF(pvUser);
1776
1777 /*
1778 * CPU 0 updates the enabled hardware breakpoint counts.
1779 */
1780 if (pVCpu->idCpu == 0)
1781 {
1782 pVM->dbgf.s.cEnabledHwBreakpoints = 0;
1783 pVM->dbgf.s.cEnabledHwIoBreakpoints = 0;
1784
1785 for (uint32_t iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++)
1786 {
1787 if (pVM->dbgf.s.aHwBreakpoints[iBp].fEnabled)
1788 {
1789 pVM->dbgf.s.cEnabledHwBreakpoints += 1;
1790 pVM->dbgf.s.cEnabledHwIoBreakpoints += pVM->dbgf.s.aHwBreakpoints[iBp].fType == X86_DR7_RW_IO;
1791 }
1792 }
1793 }
1794
1795 return CPUMRecalcHyperDRx(pVCpu, UINT8_MAX);
1796}
1797
1798
1799/**
1800 * Arms the given breakpoint.
1801 *
1802 * @returns VBox status code.
1803 * @param pUVM The user mode VM handle.
1804 * @param hBp The breakpoint handle to arm.
1805 * @param pBp The internal breakpoint state pointer for the handle.
1806 *
1807 * @thread Any thread.
1808 */
1809static int dbgfR3BpArm(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1810{
1811 int rc;
1812 PVM pVM = pUVM->pVM;
1813
1814 Assert(!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub));
1815 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
1816 {
1817 case DBGFBPTYPE_REG:
1818 {
1819 Assert(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints));
1820 PDBGFBPHW pBpHw = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
1821 Assert(pBpHw->hBp == hBp); RT_NOREF(hBp);
1822
1823 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1824 ASMAtomicWriteBool(&pBpHw->fEnabled, true);
1825 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpRegRecalcOnCpu, NULL);
1826 if (RT_FAILURE(rc))
1827 {
1828 ASMAtomicWriteBool(&pBpHw->fEnabled, false);
1829 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1830 }
1831 break;
1832 }
1833 case DBGFBPTYPE_INT3:
1834 {
1835 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1836
1837 /** @todo When we enable the first int3 breakpoint we should do this in an EMT rendezvous
1838 * as the VMX code intercepts #BP only when at least one int3 breakpoint is enabled.
1839 * A racing vCPU might trigger it and forward it to the guest causing panics/crashes/havoc. */
1840 /*
1841 * Save current byte and write the int3 instruction byte.
1842 */
1843 rc = PGMPhysSimpleReadGCPhys(pVM, &pBp->Pub.u.Int3.bOrg, pBp->Pub.u.Int3.PhysAddr, sizeof(pBp->Pub.u.Int3.bOrg));
1844 if (RT_SUCCESS(rc))
1845 {
1846 static const uint8_t s_bInt3 = 0xcc;
1847 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Int3.PhysAddr, &s_bInt3, sizeof(s_bInt3));
1848 if (RT_SUCCESS(rc))
1849 {
1850 ASMAtomicIncU32(&pVM->dbgf.s.cEnabledInt3Breakpoints);
1851 Log(("DBGF: Set breakpoint at %RGv (Phys %RGp)\n", pBp->Pub.u.Int3.GCPtr, pBp->Pub.u.Int3.PhysAddr));
1852 }
1853 }
1854
1855 if (RT_FAILURE(rc))
1856 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1857
1858 break;
1859 }
1860 case DBGFBPTYPE_PORT_IO:
1861 {
1862 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1863 ASMAtomicIncU32(&pUVM->dbgf.s.cPortIoBps);
1864 IOMR3NotifyBreakpointCountChange(pVM, true /*fPortIo*/, false /*fMmio*/);
1865 rc = VINF_SUCCESS;
1866 break;
1867 }
1868 case DBGFBPTYPE_MMIO:
1869 rc = VERR_NOT_IMPLEMENTED;
1870 break;
1871 default:
1872 AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
1873 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1874 }
1875
1876 return rc;
1877}
1878
1879
1880/**
1881 * Disarms the given breakpoint.
1882 *
1883 * @returns VBox status code.
1884 * @param pUVM The user mode VM handle.
1885 * @param hBp The breakpoint handle to disarm.
1886 * @param pBp The internal breakpoint state pointer for the handle.
1887 *
1888 * @thread Any thread.
1889 */
1890static int dbgfR3BpDisarm(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1891{
1892 int rc;
1893 PVM pVM = pUVM->pVM;
1894
1895 Assert(DBGF_BP_PUB_IS_ENABLED(&pBp->Pub));
1896 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
1897 {
1898 case DBGFBPTYPE_REG:
1899 {
1900 Assert(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints));
1901 PDBGFBPHW pBpHw = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
1902 Assert(pBpHw->hBp == hBp); RT_NOREF(hBp);
1903
1904 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1905 ASMAtomicWriteBool(&pBpHw->fEnabled, false);
1906 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpRegRecalcOnCpu, NULL);
1907 if (RT_FAILURE(rc))
1908 {
1909 ASMAtomicWriteBool(&pBpHw->fEnabled, true);
1910 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1911 }
1912 break;
1913 }
1914 case DBGFBPTYPE_INT3:
1915 {
1916 /*
1917 * Check that the current byte is the int3 instruction, and restore the original one.
1918 * We currently ignore invalid bytes.
1919 */
1920 uint8_t bCurrent = 0;
1921 rc = PGMPhysSimpleReadGCPhys(pVM, &bCurrent, pBp->Pub.u.Int3.PhysAddr, sizeof(bCurrent));
1922 if ( RT_SUCCESS(rc)
1923 && bCurrent == 0xcc)
1924 {
1925 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Int3.PhysAddr, &pBp->Pub.u.Int3.bOrg, sizeof(pBp->Pub.u.Int3.bOrg));
1926 if (RT_SUCCESS(rc))
1927 {
1928 ASMAtomicDecU32(&pVM->dbgf.s.cEnabledInt3Breakpoints);
1929 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1930 Log(("DBGF: Removed breakpoint at %RGv (Phys %RGp)\n", pBp->Pub.u.Int3.GCPtr, pBp->Pub.u.Int3.PhysAddr));
1931 }
1932 }
1933 break;
1934 }
1935 case DBGFBPTYPE_PORT_IO:
1936 {
1937 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1938 uint32_t cPortIoBps = ASMAtomicDecU32(&pUVM->dbgf.s.cPortIoBps);
1939 if (!cPortIoBps) /** @todo Need to gather all EMTs to not have a stray EMT accessing BP data when it might go away. */
1940 IOMR3NotifyBreakpointCountChange(pVM, false /*fPortIo*/, false /*fMmio*/);
1941 rc = VINF_SUCCESS;
1942 break;
1943 }
1944 case DBGFBPTYPE_MMIO:
1945 rc = VERR_NOT_IMPLEMENTED;
1946 break;
1947 default:
1948 AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
1949 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1950 }
1951
1952 return rc;
1953}
1954
1955
1956/**
1957 * Worker for DBGFR3BpHit() differnetiating on the breakpoint type.
1958 *
1959 * @returns Strict VBox status code.
1960 * @param pVM The cross context VM structure.
1961 * @param pVCpu The vCPU the breakpoint event happened on.
1962 * @param hBp The breakpoint handle.
1963 * @param pBp The breakpoint data.
1964 * @param pBpOwner The breakpoint owner data.
1965 *
1966 * @thread EMT
1967 */
1968static VBOXSTRICTRC dbgfR3BpHit(PVM pVM, PVMCPU pVCpu, DBGFBP hBp, PDBGFBPINT pBp, PCDBGFBPOWNERINT pBpOwner)
1969{
1970 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1971
1972 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
1973 {
1974 case DBGFBPTYPE_REG:
1975 case DBGFBPTYPE_INT3:
1976 {
1977 if (DBGF_BP_PUB_IS_EXEC_BEFORE(&pBp->Pub))
1978 rcStrict = pBpOwner->pfnBpHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub, DBGF_BP_F_HIT_EXEC_BEFORE);
1979 if (rcStrict == VINF_SUCCESS)
1980 {
1981 uint8_t abInstr[DBGF_BP_INSN_MAX];
1982 RTGCPTR const GCPtrInstr = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
1983 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &abInstr[0], GCPtrInstr, sizeof(abInstr));
1984 AssertRC(rc);
1985 if (RT_SUCCESS(rc))
1986 {
1987 /* Replace the int3 with the original instruction byte. */
1988 abInstr[0] = pBp->Pub.u.Int3.bOrg;
1989 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), GCPtrInstr,
1990 &abInstr[0], sizeof(abInstr));
1991 if ( rcStrict == VINF_SUCCESS
1992 && DBGF_BP_PUB_IS_EXEC_AFTER(&pBp->Pub))
1993 {
1994 VBOXSTRICTRC rcStrict2 = pBpOwner->pfnBpHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub,
1995 DBGF_BP_F_HIT_EXEC_AFTER);
1996 if (rcStrict2 == VINF_SUCCESS)
1997 return VBOXSTRICTRC_VAL(rcStrict);
1998 if (rcStrict2 != VINF_DBGF_BP_HALT)
1999 return VERR_DBGF_BP_OWNER_CALLBACK_WRONG_STATUS;
2000 }
2001 else
2002 return VBOXSTRICTRC_VAL(rcStrict);
2003 }
2004 }
2005 break;
2006 }
2007 case DBGFBPTYPE_PORT_IO:
2008 case DBGFBPTYPE_MMIO:
2009 {
2010 pVCpu->dbgf.s.fBpIoActive = false;
2011 rcStrict = pBpOwner->pfnBpIoHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub,
2012 pVCpu->dbgf.s.fBpIoBefore
2013 ? DBGF_BP_F_HIT_EXEC_BEFORE
2014 : DBGF_BP_F_HIT_EXEC_AFTER,
2015 pVCpu->dbgf.s.fBpIoAccess, pVCpu->dbgf.s.uBpIoAddress,
2016 pVCpu->dbgf.s.uBpIoValue);
2017
2018 break;
2019 }
2020 default:
2021 AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
2022 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
2023 }
2024
2025 return rcStrict;
2026}
2027
2028
2029/**
2030 * Creates a new breakpoint owner returning a handle which can be used when setting breakpoints.
2031 *
2032 * @returns VBox status code.
2033 * @retval VERR_DBGF_BP_OWNER_NO_MORE_HANDLES if there are no more free owner handles available.
2034 * @param pUVM The user mode VM handle.
2035 * @param pfnBpHit The R3 callback which is called when a breakpoint with the owner handle is hit.
2036 * @param pfnBpIoHit The R3 callback which is called when a I/O breakpoint with the owner handle is hit.
2037 * @param phBpOwner Where to store the owner handle on success.
2038 *
2039 * @thread Any thread but might defer work to EMT on the first call.
2040 */
2041VMMR3DECL(int) DBGFR3BpOwnerCreate(PUVM pUVM, PFNDBGFBPHIT pfnBpHit, PFNDBGFBPIOHIT pfnBpIoHit, PDBGFBPOWNER phBpOwner)
2042{
2043 /*
2044 * Validate the input.
2045 */
2046 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2047 AssertReturn(pfnBpHit || pfnBpIoHit, VERR_INVALID_PARAMETER);
2048 AssertPtrReturn(phBpOwner, VERR_INVALID_POINTER);
2049
2050 int rc = dbgfR3BpOwnerEnsureInit(pUVM);
2051 AssertRCReturn(rc ,rc);
2052
2053 /* Try to find a free entry in the owner table. */
2054 for (;;)
2055 {
2056 /* Scan the associated bitmap for a free entry. */
2057 int32_t iClr = ASMBitFirstClear(pUVM->dbgf.s.pbmBpOwnersAllocR3, DBGF_BP_OWNER_COUNT_MAX);
2058 if (iClr != -1)
2059 {
2060 /*
2061 * Try to allocate, we could get raced here as well. In that case
2062 * we try again.
2063 */
2064 if (!ASMAtomicBitTestAndSet(pUVM->dbgf.s.pbmBpOwnersAllocR3, iClr))
2065 {
2066 PDBGFBPOWNERINT pBpOwner = &pUVM->dbgf.s.paBpOwnersR3[iClr];
2067 pBpOwner->cRefs = 1;
2068 pBpOwner->pfnBpHitR3 = pfnBpHit;
2069 pBpOwner->pfnBpIoHitR3 = pfnBpIoHit;
2070
2071 *phBpOwner = (DBGFBPOWNER)iClr;
2072 return VINF_SUCCESS;
2073 }
2074 /* else Retry with another spot. */
2075 }
2076 else /* no free entry in bitmap, out of entries. */
2077 {
2078 rc = VERR_DBGF_BP_OWNER_NO_MORE_HANDLES;
2079 break;
2080 }
2081 }
2082
2083 return rc;
2084}
2085
2086
2087/**
2088 * Destroys the owner identified by the given handle.
2089 *
2090 * @returns VBox status code.
2091 * @retval VERR_INVALID_HANDLE if the given owner handle is invalid.
2092 * @retval VERR_DBGF_OWNER_BUSY if there are still breakpoints set with the given owner handle.
2093 * @param pUVM The user mode VM handle.
2094 * @param hBpOwner The breakpoint owner handle to destroy.
2095 */
2096VMMR3DECL(int) DBGFR3BpOwnerDestroy(PUVM pUVM, DBGFBPOWNER hBpOwner)
2097{
2098 /*
2099 * Validate the input.
2100 */
2101 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2102 AssertReturn(hBpOwner != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2103
2104 int rc = dbgfR3BpOwnerEnsureInit(pUVM);
2105 AssertRCReturn(rc ,rc);
2106
2107 PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
2108 if (RT_LIKELY(pBpOwner))
2109 {
2110 if (ASMAtomicReadU32(&pBpOwner->cRefs) == 1)
2111 {
2112 pBpOwner->pfnBpHitR3 = NULL;
2113 ASMAtomicDecU32(&pBpOwner->cRefs);
2114 ASMAtomicBitClear(pUVM->dbgf.s.pbmBpOwnersAllocR3, hBpOwner);
2115 }
2116 else
2117 rc = VERR_DBGF_OWNER_BUSY;
2118 }
2119 else
2120 rc = VERR_INVALID_HANDLE;
2121
2122 return rc;
2123}
2124
2125
2126/**
2127 * Sets a breakpoint (int 3 based).
2128 *
2129 * @returns VBox status code.
2130 * @param pUVM The user mode VM handle.
2131 * @param idSrcCpu The ID of the virtual CPU used for the
2132 * breakpoint address resolution.
2133 * @param pAddress The address of the breakpoint.
2134 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2135 * Use 0 (or 1) if it's gonna trigger at once.
2136 * @param iHitDisable The hit count which disables the breakpoint.
2137 * Use ~(uint64_t) if it's never gonna be disabled.
2138 * @param phBp Where to store the breakpoint handle on success.
2139 *
2140 * @thread Any thread.
2141 */
2142VMMR3DECL(int) DBGFR3BpSetInt3(PUVM pUVM, VMCPUID idSrcCpu, PCDBGFADDRESS pAddress,
2143 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2144{
2145 return DBGFR3BpSetInt3Ex(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, idSrcCpu, pAddress,
2146 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
2147}
2148
2149
2150/**
2151 * Sets a breakpoint (int 3 based) - extended version.
2152 *
2153 * @returns VBox status code.
2154 * @param pUVM The user mode VM handle.
2155 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2156 * @param pvUser Opaque user data to pass in the owner callback.
2157 * @param idSrcCpu The ID of the virtual CPU used for the
2158 * breakpoint address resolution.
2159 * @param pAddress The address of the breakpoint.
2160 * @param fFlags Combination of DBGF_BP_F_XXX.
2161 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2162 * Use 0 (or 1) if it's gonna trigger at once.
2163 * @param iHitDisable The hit count which disables the breakpoint.
2164 * Use ~(uint64_t) if it's never gonna be disabled.
2165 * @param phBp Where to store the breakpoint handle on success.
2166 *
2167 * @thread Any thread.
2168 */
2169VMMR3DECL(int) DBGFR3BpSetInt3Ex(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2170 VMCPUID idSrcCpu, PCDBGFADDRESS pAddress, uint16_t fFlags,
2171 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2172{
2173 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2174 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2175 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
2176 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2177 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2178
2179 int rc = dbgfR3BpEnsureInit(pUVM);
2180 AssertRCReturn(rc, rc);
2181
2182 /*
2183 * Translate & save the breakpoint address into a guest-physical address.
2184 */
2185 RTGCPHYS GCPhysBpAddr = NIL_RTGCPHYS;
2186 rc = DBGFR3AddrToPhys(pUVM, idSrcCpu, pAddress, &GCPhysBpAddr);
2187 if (RT_SUCCESS(rc))
2188 {
2189 /*
2190 * The physical address from DBGFR3AddrToPhys() is the start of the page,
2191 * we need the exact byte offset into the page while writing to it in dbgfR3BpInt3Arm().
2192 */
2193 GCPhysBpAddr |= (pAddress->FlatPtr & X86_PAGE_OFFSET_MASK);
2194
2195 PDBGFBPINT pBp = NULL;
2196 DBGFBP hBp = dbgfR3BpGetByAddr(pUVM, DBGFBPTYPE_INT3, pAddress->FlatPtr, &pBp);
2197 if ( hBp != NIL_DBGFBP
2198 && pBp->Pub.u.Int3.PhysAddr == GCPhysBpAddr)
2199 {
2200 rc = VINF_SUCCESS;
2201 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2202 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2203 if (RT_SUCCESS(rc))
2204 {
2205 rc = VINF_DBGF_BP_ALREADY_EXIST;
2206 if (phBp)
2207 *phBp = hBp;
2208 }
2209 return rc;
2210 }
2211
2212 rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_INT3, fFlags, iHitTrigger, iHitDisable, &hBp, &pBp);
2213 if (RT_SUCCESS(rc))
2214 {
2215 pBp->Pub.u.Int3.PhysAddr = GCPhysBpAddr;
2216 pBp->Pub.u.Int3.GCPtr = pAddress->FlatPtr;
2217
2218 /* Add the breakpoint to the lookup tables. */
2219 rc = dbgfR3BpInt3Add(pUVM, hBp, pBp);
2220 if (RT_SUCCESS(rc))
2221 {
2222 /* Enable the breakpoint if requested. */
2223 if (fFlags & DBGF_BP_F_ENABLED)
2224 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2225 if (RT_SUCCESS(rc))
2226 {
2227 *phBp = hBp;
2228 return VINF_SUCCESS;
2229 }
2230
2231 int rc2 = dbgfR3BpInt3Remove(pUVM, hBp, pBp); AssertRC(rc2);
2232 }
2233
2234 dbgfR3BpFree(pUVM, hBp, pBp);
2235 }
2236 }
2237
2238 return rc;
2239}
2240
2241
2242/**
2243 * Sets a register breakpoint.
2244 *
2245 * @returns VBox status code.
2246 * @param pUVM The user mode VM handle.
2247 * @param pAddress The address of the breakpoint.
2248 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2249 * Use 0 (or 1) if it's gonna trigger at once.
2250 * @param iHitDisable The hit count which disables the breakpoint.
2251 * Use ~(uint64_t) if it's never gonna be disabled.
2252 * @param fType The access type (one of the X86_DR7_RW_* defines).
2253 * @param cb The access size - 1,2,4 or 8 (the latter is AMD64 long mode only.
2254 * Must be 1 if fType is X86_DR7_RW_EO.
2255 * @param phBp Where to store the breakpoint handle.
2256 *
2257 * @thread Any thread.
2258 */
2259VMMR3DECL(int) DBGFR3BpSetReg(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger,
2260 uint64_t iHitDisable, uint8_t fType, uint8_t cb, PDBGFBP phBp)
2261{
2262 return DBGFR3BpSetRegEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, pAddress,
2263 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, fType, cb, phBp);
2264}
2265
2266
2267/**
2268 * Sets a register breakpoint - extended version.
2269 *
2270 * @returns VBox status code.
2271 * @param pUVM The user mode VM handle.
2272 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2273 * @param pvUser Opaque user data to pass in the owner callback.
2274 * @param pAddress The address of the breakpoint.
2275 * @param fFlags Combination of DBGF_BP_F_XXX.
2276 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2277 * Use 0 (or 1) if it's gonna trigger at once.
2278 * @param iHitDisable The hit count which disables the breakpoint.
2279 * Use ~(uint64_t) if it's never gonna be disabled.
2280 * @param fType The access type (one of the X86_DR7_RW_* defines).
2281 * @param cb The access size - 1,2,4 or 8 (the latter is AMD64 long mode only.
2282 * Must be 1 if fType is X86_DR7_RW_EO.
2283 * @param phBp Where to store the breakpoint handle.
2284 *
2285 * @thread Any thread.
2286 */
2287VMMR3DECL(int) DBGFR3BpSetRegEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2288 PCDBGFADDRESS pAddress, uint16_t fFlags,
2289 uint64_t iHitTrigger, uint64_t iHitDisable,
2290 uint8_t fType, uint8_t cb, PDBGFBP phBp)
2291{
2292 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2293 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2294 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
2295 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2296 AssertReturn(cb > 0 && cb <= 8 && RT_IS_POWER_OF_TWO(cb), VERR_INVALID_PARAMETER);
2297 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2298 switch (fType)
2299 {
2300 case X86_DR7_RW_EO:
2301 if (cb == 1)
2302 break;
2303 AssertMsgFailedReturn(("fType=%#x cb=%d != 1\n", fType, cb), VERR_INVALID_PARAMETER);
2304 case X86_DR7_RW_IO:
2305 case X86_DR7_RW_RW:
2306 case X86_DR7_RW_WO:
2307 break;
2308 default:
2309 AssertMsgFailedReturn(("fType=%#x\n", fType), VERR_INVALID_PARAMETER);
2310 }
2311
2312 int rc = dbgfR3BpEnsureInit(pUVM);
2313 AssertRCReturn(rc, rc);
2314
2315 PDBGFBPINT pBp = NULL;
2316 DBGFBP hBp = dbgfR3BpGetByAddr(pUVM, DBGFBPTYPE_REG, pAddress->FlatPtr, &pBp);
2317 if ( hBp != NIL_DBGFBP
2318 && pBp->Pub.u.Reg.cb == cb
2319 && pBp->Pub.u.Reg.fType == fType)
2320 {
2321 rc = VINF_SUCCESS;
2322 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2323 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2324 if (RT_SUCCESS(rc))
2325 {
2326 rc = VINF_DBGF_BP_ALREADY_EXIST;
2327 if (phBp)
2328 *phBp = hBp;
2329 }
2330 return rc;
2331 }
2332
2333 /* Allocate new breakpoint. */
2334 rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_REG, fFlags,
2335 iHitTrigger, iHitDisable, &hBp, &pBp);
2336 if (RT_SUCCESS(rc))
2337 {
2338 pBp->Pub.u.Reg.GCPtr = pAddress->FlatPtr;
2339 pBp->Pub.u.Reg.fType = fType;
2340 pBp->Pub.u.Reg.cb = cb;
2341 pBp->Pub.u.Reg.iReg = UINT8_MAX;
2342 ASMCompilerBarrier();
2343
2344 /* Assign the proper hardware breakpoint. */
2345 rc = dbgfR3BpRegAssign(pUVM->pVM, hBp, pBp);
2346 if (RT_SUCCESS(rc))
2347 {
2348 /* Arm the breakpoint. */
2349 if (fFlags & DBGF_BP_F_ENABLED)
2350 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2351 if (RT_SUCCESS(rc))
2352 {
2353 if (phBp)
2354 *phBp = hBp;
2355 return VINF_SUCCESS;
2356 }
2357
2358 int rc2 = dbgfR3BpRegRemove(pUVM->pVM, hBp, pBp);
2359 AssertRC(rc2); RT_NOREF(rc2);
2360 }
2361
2362 dbgfR3BpFree(pUVM, hBp, pBp);
2363 }
2364
2365 return rc;
2366}
2367
2368
2369/**
2370 * This is only kept for now to not mess with the debugger implementation at this point,
2371 * recompiler breakpoints are not supported anymore (IEM has some API but it isn't implemented
2372 * and should probably be merged with the DBGF breakpoints).
2373 */
2374VMMR3DECL(int) DBGFR3BpSetREM(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger,
2375 uint64_t iHitDisable, PDBGFBP phBp)
2376{
2377 RT_NOREF(pUVM, pAddress, iHitTrigger, iHitDisable, phBp);
2378 return VERR_NOT_SUPPORTED;
2379}
2380
2381
2382/**
2383 * Sets an I/O port breakpoint.
2384 *
2385 * @returns VBox status code.
2386 * @param pUVM The user mode VM handle.
2387 * @param uPort The first I/O port.
2388 * @param cPorts The number of I/O ports, see DBGFBPIOACCESS_XXX.
2389 * @param fAccess The access we want to break on.
2390 * @param iHitTrigger The hit count at which the breakpoint start
2391 * triggering. Use 0 (or 1) if it's gonna trigger at
2392 * once.
2393 * @param iHitDisable The hit count which disables the breakpoint.
2394 * Use ~(uint64_t) if it's never gonna be disabled.
2395 * @param phBp Where to store the breakpoint handle.
2396 *
2397 * @thread Any thread.
2398 */
2399VMMR3DECL(int) DBGFR3BpSetPortIo(PUVM pUVM, RTIOPORT uPort, RTIOPORT cPorts, uint32_t fAccess,
2400 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2401{
2402 return DBGFR3BpSetPortIoEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, uPort, cPorts, fAccess,
2403 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
2404}
2405
2406
2407/**
2408 * Sets an I/O port breakpoint - extended version.
2409 *
2410 * @returns VBox status code.
2411 * @param pUVM The user mode VM handle.
2412 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2413 * @param pvUser Opaque user data to pass in the owner callback.
2414 * @param uPort The first I/O port.
2415 * @param cPorts The number of I/O ports, see DBGFBPIOACCESS_XXX.
2416 * @param fAccess The access we want to break on.
2417 * @param fFlags Combination of DBGF_BP_F_XXX.
2418 * @param iHitTrigger The hit count at which the breakpoint start
2419 * triggering. Use 0 (or 1) if it's gonna trigger at
2420 * once.
2421 * @param iHitDisable The hit count which disables the breakpoint.
2422 * Use ~(uint64_t) if it's never gonna be disabled.
2423 * @param phBp Where to store the breakpoint handle.
2424 *
2425 * @thread Any thread.
2426 */
2427VMMR3DECL(int) DBGFR3BpSetPortIoEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2428 RTIOPORT uPort, RTIOPORT cPorts, uint32_t fAccess,
2429 uint32_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2430{
2431 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2432 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2433 AssertReturn(!(fAccess & ~DBGFBPIOACCESS_VALID_MASK_PORT_IO), VERR_INVALID_FLAGS);
2434 AssertReturn(fAccess, VERR_INVALID_FLAGS);
2435 AssertReturn(!(fFlags & ~DBGF_BP_F_VALID_MASK), VERR_INVALID_FLAGS);
2436 AssertReturn(fFlags, VERR_INVALID_FLAGS);
2437 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2438 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2439 AssertReturn(cPorts > 0, VERR_OUT_OF_RANGE);
2440 AssertReturn((RTIOPORT)(uPort + (cPorts - 1)) >= uPort, VERR_OUT_OF_RANGE);
2441
2442 int rc = dbgfR3BpPortIoEnsureInit(pUVM);
2443 AssertRCReturn(rc, rc);
2444
2445 PDBGFBPINT pBp = NULL;
2446 DBGFBP hBp = dbgfR3BpPortIoGetByRange(pUVM, uPort, cPorts, &pBp);
2447 if ( hBp != NIL_DBGFBP
2448 && pBp->Pub.u.PortIo.uPort == uPort
2449 && pBp->Pub.u.PortIo.cPorts == cPorts
2450 && pBp->Pub.u.PortIo.fAccess == fAccess)
2451 {
2452 rc = VINF_SUCCESS;
2453 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2454 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2455 if (RT_SUCCESS(rc))
2456 {
2457 rc = VINF_DBGF_BP_ALREADY_EXIST;
2458 if (phBp)
2459 *phBp = hBp;
2460 }
2461 return rc;
2462 }
2463
2464 rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_PORT_IO, fFlags, iHitTrigger, iHitDisable, &hBp, &pBp);
2465 if (RT_SUCCESS(rc))
2466 {
2467 pBp->Pub.u.PortIo.uPort = uPort;
2468 pBp->Pub.u.PortIo.cPorts = cPorts;
2469 pBp->Pub.u.PortIo.fAccess = fAccess;
2470
2471 /* Add the breakpoint to the lookup tables. */
2472 rc = dbgfR3BpPortIoAdd(pUVM, hBp, pBp);
2473 if (RT_SUCCESS(rc))
2474 {
2475 /* Enable the breakpoint if requested. */
2476 if (fFlags & DBGF_BP_F_ENABLED)
2477 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2478 if (RT_SUCCESS(rc))
2479 {
2480 *phBp = hBp;
2481 return VINF_SUCCESS;
2482 }
2483
2484 int rc2 = dbgfR3BpPortIoRemove(pUVM, hBp, pBp); AssertRC(rc2);
2485 }
2486
2487 dbgfR3BpFree(pUVM, hBp, pBp);
2488 }
2489
2490 return rc;
2491}
2492
2493
2494/**
2495 * Sets a memory mapped I/O breakpoint.
2496 *
2497 * @returns VBox status code.
2498 * @param pUVM The user mode VM handle.
2499 * @param GCPhys The first MMIO address.
2500 * @param cb The size of the MMIO range to break on.
2501 * @param fAccess The access we want to break on.
2502 * @param iHitTrigger The hit count at which the breakpoint start
2503 * triggering. Use 0 (or 1) if it's gonna trigger at
2504 * once.
2505 * @param iHitDisable The hit count which disables the breakpoint.
2506 * Use ~(uint64_t) if it's never gonna be disabled.
2507 * @param phBp Where to store the breakpoint handle.
2508 *
2509 * @thread Any thread.
2510 */
2511VMMR3DECL(int) DBGFR3BpSetMmio(PUVM pUVM, RTGCPHYS GCPhys, uint32_t cb, uint32_t fAccess,
2512 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2513{
2514 return DBGFR3BpSetMmioEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, GCPhys, cb, fAccess,
2515 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
2516}
2517
2518
2519/**
2520 * Sets a memory mapped I/O breakpoint - extended version.
2521 *
2522 * @returns VBox status code.
2523 * @param pUVM The user mode VM handle.
2524 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2525 * @param pvUser Opaque user data to pass in the owner callback.
2526 * @param GCPhys The first MMIO address.
2527 * @param cb The size of the MMIO range to break on.
2528 * @param fAccess The access we want to break on.
2529 * @param fFlags Combination of DBGF_BP_F_XXX.
2530 * @param iHitTrigger The hit count at which the breakpoint start
2531 * triggering. Use 0 (or 1) if it's gonna trigger at
2532 * once.
2533 * @param iHitDisable The hit count which disables the breakpoint.
2534 * Use ~(uint64_t) if it's never gonna be disabled.
2535 * @param phBp Where to store the breakpoint handle.
2536 *
2537 * @thread Any thread.
2538 */
2539VMMR3DECL(int) DBGFR3BpSetMmioEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2540 RTGCPHYS GCPhys, uint32_t cb, uint32_t fAccess,
2541 uint32_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2542{
2543 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2544 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2545 AssertReturn(!(fAccess & ~DBGFBPIOACCESS_VALID_MASK_MMIO), VERR_INVALID_FLAGS);
2546 AssertReturn(fAccess, VERR_INVALID_FLAGS);
2547 AssertReturn(!(fFlags & ~DBGF_BP_F_VALID_MASK), VERR_INVALID_FLAGS);
2548 AssertReturn(fFlags, VERR_INVALID_FLAGS);
2549 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2550 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2551 AssertReturn(cb, VERR_OUT_OF_RANGE);
2552 AssertReturn(GCPhys + cb < GCPhys, VERR_OUT_OF_RANGE);
2553
2554 int rc = dbgfR3BpEnsureInit(pUVM);
2555 AssertRCReturn(rc, rc);
2556
2557 return VERR_NOT_IMPLEMENTED;
2558}
2559
2560
2561/**
2562 * Clears a breakpoint.
2563 *
2564 * @returns VBox status code.
2565 * @param pUVM The user mode VM handle.
2566 * @param hBp The handle of the breakpoint which should be removed (cleared).
2567 *
2568 * @thread Any thread.
2569 */
2570VMMR3DECL(int) DBGFR3BpClear(PUVM pUVM, DBGFBP hBp)
2571{
2572 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2573 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2574
2575 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2576 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
2577
2578 /* Disarm the breakpoint when it is enabled. */
2579 if (DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2580 {
2581 int rc = dbgfR3BpDisarm(pUVM, hBp, pBp);
2582 AssertRC(rc);
2583 }
2584
2585 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
2586 {
2587 case DBGFBPTYPE_REG:
2588 {
2589 int rc = dbgfR3BpRegRemove(pUVM->pVM, hBp, pBp);
2590 AssertRC(rc);
2591 break;
2592 }
2593 case DBGFBPTYPE_INT3:
2594 {
2595 int rc = dbgfR3BpInt3Remove(pUVM, hBp, pBp);
2596 AssertRC(rc);
2597 break;
2598 }
2599 case DBGFBPTYPE_PORT_IO:
2600 {
2601 int rc = dbgfR3BpPortIoRemove(pUVM, hBp, pBp);
2602 AssertRC(rc);
2603 break;
2604 }
2605 default:
2606 break;
2607 }
2608
2609 dbgfR3BpFree(pUVM, hBp, pBp);
2610 return VINF_SUCCESS;
2611}
2612
2613
2614/**
2615 * Enables a breakpoint.
2616 *
2617 * @returns VBox status code.
2618 * @param pUVM The user mode VM handle.
2619 * @param hBp The handle of the breakpoint which should be enabled.
2620 *
2621 * @thread Any thread.
2622 */
2623VMMR3DECL(int) DBGFR3BpEnable(PUVM pUVM, DBGFBP hBp)
2624{
2625 /*
2626 * Validate the input.
2627 */
2628 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2629 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2630
2631 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2632 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
2633
2634 int rc;
2635 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2636 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2637 else
2638 rc = VINF_DBGF_BP_ALREADY_ENABLED;
2639
2640 return rc;
2641}
2642
2643
2644/**
2645 * Disables a breakpoint.
2646 *
2647 * @returns VBox status code.
2648 * @param pUVM The user mode VM handle.
2649 * @param hBp The handle of the breakpoint which should be disabled.
2650 *
2651 * @thread Any thread.
2652 */
2653VMMR3DECL(int) DBGFR3BpDisable(PUVM pUVM, DBGFBP hBp)
2654{
2655 /*
2656 * Validate the input.
2657 */
2658 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2659 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2660
2661 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2662 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
2663
2664 int rc;
2665 if (DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2666 rc = dbgfR3BpDisarm(pUVM, hBp, pBp);
2667 else
2668 rc = VINF_DBGF_BP_ALREADY_DISABLED;
2669
2670 return rc;
2671}
2672
2673
2674/**
2675 * Enumerate the breakpoints.
2676 *
2677 * @returns VBox status code.
2678 * @param pUVM The user mode VM handle.
2679 * @param pfnCallback The callback function.
2680 * @param pvUser The user argument to pass to the callback.
2681 *
2682 * @thread Any thread.
2683 */
2684VMMR3DECL(int) DBGFR3BpEnum(PUVM pUVM, PFNDBGFBPENUM pfnCallback, void *pvUser)
2685{
2686 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2687
2688 for (uint32_t idChunk = 0; idChunk < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); idChunk++)
2689 {
2690 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
2691
2692 if (pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID)
2693 break; /* Stop here as the first non allocated chunk means there is no one allocated afterwards as well. */
2694
2695 if (pBpChunk->cBpsFree < DBGF_BP_COUNT_PER_CHUNK)
2696 {
2697 /* Scan the bitmap for allocated entries. */
2698 int32_t iAlloc = ASMBitFirstSet(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK);
2699 if (iAlloc != -1)
2700 {
2701 do
2702 {
2703 DBGFBP hBp = DBGF_BP_HND_CREATE(idChunk, (uint32_t)iAlloc);
2704 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2705
2706 /* Make a copy of the breakpoints public data to have a consistent view. */
2707 DBGFBPPUB BpPub;
2708 BpPub.cHits = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.cHits);
2709 BpPub.iHitTrigger = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.iHitTrigger);
2710 BpPub.iHitDisable = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.iHitDisable);
2711 BpPub.hOwner = ASMAtomicReadU32((volatile uint32_t *)&pBp->Pub.hOwner);
2712 BpPub.u16Type = ASMAtomicReadU16((volatile uint16_t *)&pBp->Pub.u16Type); /* Actually constant. */
2713 BpPub.fFlags = ASMAtomicReadU16((volatile uint16_t *)&pBp->Pub.fFlags);
2714 memcpy(&BpPub.u, &pBp->Pub.u, sizeof(pBp->Pub.u)); /* Is constant after allocation. */
2715
2716 /* Check if a removal raced us. */
2717 if (ASMBitTest(pBpChunk->pbmAlloc, iAlloc))
2718 {
2719 int rc = pfnCallback(pUVM, pvUser, hBp, &BpPub);
2720 if (RT_FAILURE(rc) || rc == VINF_CALLBACK_RETURN)
2721 return rc;
2722 }
2723
2724 iAlloc = ASMBitNextSet(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK, iAlloc);
2725 } while (iAlloc != -1);
2726 }
2727 }
2728 }
2729
2730 return VINF_SUCCESS;
2731}
2732
2733
2734/**
2735 * Called whenever a breakpoint event needs to be serviced in ring-3 to decide what to do.
2736 *
2737 * @returns VBox status code.
2738 * @param pVM The cross context VM structure.
2739 * @param pVCpu The vCPU the breakpoint event happened on.
2740 *
2741 * @thread EMT
2742 */
2743VMMR3_INT_DECL(int) DBGFR3BpHit(PVM pVM, PVMCPU pVCpu)
2744{
2745 /* Send it straight into the debugger?. */
2746 if (pVCpu->dbgf.s.fBpInvokeOwnerCallback)
2747 {
2748 DBGFBP hBp = pVCpu->dbgf.s.hBpActive;
2749 pVCpu->dbgf.s.fBpInvokeOwnerCallback = false;
2750
2751 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pVM->pUVM, hBp);
2752 AssertReturn(pBp, VERR_DBGF_BP_IPE_9);
2753
2754 /* Resolve owner (can be NIL_DBGFBPOWNER) and invoke callback if there is one. */
2755 if (pBp->Pub.hOwner != NIL_DBGFBPOWNER)
2756 {
2757 PCDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pVM->pUVM, pBp->Pub.hOwner);
2758 if (pBpOwner)
2759 {
2760 VBOXSTRICTRC rcStrict = dbgfR3BpHit(pVM, pVCpu, hBp, pBp, pBpOwner);
2761 if (VBOXSTRICTRC_VAL(rcStrict) == VINF_SUCCESS)
2762 {
2763 pVCpu->dbgf.s.hBpActive = NIL_DBGFBP;
2764 return VINF_SUCCESS;
2765 }
2766 if (VBOXSTRICTRC_VAL(rcStrict) != VINF_DBGF_BP_HALT) /* Guru meditation. */
2767 return VERR_DBGF_BP_OWNER_CALLBACK_WRONG_STATUS;
2768 /* else: Halt in the debugger. */
2769 }
2770 }
2771 }
2772
2773 return DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
2774}
2775
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette