VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGFR3Bp.cpp@ 107227

Last change on this file since 107227 was 107227, checked in by vboxsync, 8 weeks ago

VMM: Cleaning up ARMv8 / x86 split. jiraref:VBP-1470

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 108.1 KB
Line 
1/* $Id: DBGFR3Bp.cpp 107227 2024-12-04 15:20:14Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, Breakpoint Management.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_dbgf_bp DBGF - The Debugger Facility, Breakpoint Management
30 *
31 * The debugger facilities breakpoint managers purpose is to efficiently manage
32 * large amounts of breakpoints for various use cases like dtrace like operations
33 * or execution flow tracing for instance. Especially execution flow tracing can
34 * require thousands of breakpoints which need to be managed efficiently to not slow
35 * down guest operation too much. Before the rewrite starting end of 2020, DBGF could
36 * only handle 32 breakpoints (+ 4 hardware assisted breakpoints). The new
37 * manager is supposed to be able to handle up to one million breakpoints.
38 *
39 * @see grp_dbgf
40 *
41 *
42 * @section sec_dbgf_bp_owner Breakpoint owners
43 *
44 * A single breakpoint owner has a mandatory ring-3 callback and an optional ring-0
45 * callback assigned which is called whenever a breakpoint with the owner assigned is hit.
46 * The common part of the owner is managed by a single table mapped into both ring-0
47 * and ring-3 and the handle being the index into the table. This allows resolving
48 * the handle to the internal structure efficiently. Searching for a free entry is
49 * done using a bitmap indicating free and occupied entries. For the optional
50 * ring-0 owner part there is a separate ring-0 only table for security reasons.
51 *
52 * The callback of the owner can be used to gather and log guest state information
53 * and decide whether to continue guest execution or stop and drop into the debugger.
54 * Breakpoints which don't have an owner assigned will always drop the VM right into
55 * the debugger.
56 *
57 *
58 * @section sec_dbgf_bp_bps Breakpoints
59 *
60 * Breakpoints are referenced by an opaque handle which acts as an index into a global table
61 * mapped into ring-3 and ring-0. Each entry contains the necessary state to manage the breakpoint
62 * like trigger conditions, type, owner, etc. If an owner is given an optional opaque user argument
63 * can be supplied which is passed in the respective owner callback. For owners with ring-0 callbacks
64 * a dedicated ring-0 table is held saving possible ring-0 user arguments.
65 *
66 * To keep memory consumption under control and still support large amounts of
67 * breakpoints the table is split into fixed sized chunks and the chunk index and index
68 * into the chunk can be derived from the handle with only a few logical operations.
69 *
70 *
71 * @section sec_dbgf_bp_resolv Resolving breakpoint addresses
72 *
73 * Whenever a \#BP(0) event is triggered DBGF needs to decide whether the event originated
74 * from within the guest or whether a DBGF breakpoint caused it. This has to happen as fast
75 * as possible. The following scheme is employed to achieve this:
76 *
77 * @verbatim
78 * 7 6 5 4 3 2 1 0
79 * +---+---+---+---+---+---+---+---+
80 * | | | | | | | | | BP address
81 * +---+---+---+---+---+---+---+---+
82 * \_____________________/ \_____/
83 * | |
84 * | +---------------+
85 * | |
86 * BP table | v
87 * +------------+ | +-----------+
88 * | hBp 0 | | X <- | 0 | xxxxx |
89 * | hBp 1 | <----------------+------------------------ | 1 | hBp 1 |
90 * | | | +--- | 2 | idxL2 |
91 * | hBp <m> | <---+ v | |...| ... |
92 * | | | +-----------+ | |...| ... |
93 * | | | | | | |...| ... |
94 * | hBp <n> | <-+ +----- | +> leaf | | | . |
95 * | | | | | | | | . |
96 * | | | | + root + | <------------+ | . |
97 * | | | | | | +-----------+
98 * | | +------- | leaf<+ | L1: 65536
99 * | . | | . |
100 * | . | | . |
101 * | . | | . |
102 * +------------+ +-----------+
103 * L2 idx BST
104 * @endverbatim
105 *
106 * -# Take the lowest 16 bits of the breakpoint address and use it as an direct index
107 * into the L1 table. The L1 table is contiguous and consists of 4 byte entries
108 * resulting in 256KiB of memory used. The topmost 4 bits indicate how to proceed
109 * and the meaning of the remaining 28bits depends on the topmost 4 bits:
110 * - A 0 type entry means no breakpoint is registered with the matching lowest 16bits,
111 * so forward the event to the guest.
112 * - A 1 in the topmost 4 bits means that the remaining 28bits directly denote a breakpoint
113 * handle which can be resolved by extracting the chunk index and index into the chunk
114 * of the global breakpoint table. If the address matches the breakpoint is processed
115 * according to the configuration. Otherwise the breakpoint is again forwarded to the guest.
116 * - A 2 in the topmost 4 bits means that there are multiple breakpoints registered
117 * matching the lowest 16bits and the search must continue in the L2 table with the
118 * remaining 28bits acting as an index into the L2 table indicating the search root.
119 * -# The L2 table consists of multiple index based binary search trees, there is one for each reference
120 * from the L1 table. The key for the table are the upper 6 bytes of the breakpoint address
121 * used for searching. This tree is traversed until either a matching address is found and
122 * the breakpoint is being processed or again forwarded to the guest if it isn't successful.
123 * Each entry in the L2 table is 16 bytes big and densly packed to avoid excessive memory usage.
124 *
125 * @section sec_dbgf_bp_ioport Handling I/O port breakpoints
126 *
127 * Because of the limited amount of I/O ports being available (65536) a single table with 65536 entries,
128 * each 4 byte big will be allocated. This amounts to 256KiB of memory being used additionally as soon as
129 * an I/O breakpoint is enabled. The entries contain the breakpoint handle directly allowing only one breakpoint
130 * per port right now, which is something we accept as a limitation right now to keep things relatively simple.
131 * When there is at least one I/O breakpoint active IOM will be notified and it will afterwards call the DBGF API
132 * whenever the guest does an I/O port access to decide whether a breakpoint was hit. This keeps the overhead small
133 * when there is no I/O port breakpoint enabled.
134 *
135 * @section sec_dbgf_bp_note Random thoughts and notes for the implementation
136 *
137 * - The assumption for this approach is that the lowest 16bits of the breakpoint address are
138 * hopefully the ones being the most varying ones across breakpoints so the traversal
139 * can skip the L2 table in most of the cases. Even if the L2 table must be taken the
140 * individual trees should be quite shallow resulting in low overhead when walking it
141 * (though only real world testing can assert this assumption).
142 * - Index based tables and trees are used instead of pointers because the tables
143 * are always mapped into ring-0 and ring-3 with different base addresses.
144 * - Efficent breakpoint allocation is done by having a global bitmap indicating free
145 * and occupied breakpoint entries. Same applies for the L2 BST table.
146 * - Special care must be taken when modifying the L1 and L2 tables as other EMTs
147 * might still access it (want to try a lockless approach first using
148 * atomic updates, have to resort to locking if that turns out to be too difficult).
149 * - Each BP entry is supposed to be 64 byte big and each chunk should contain 65536
150 * breakpoints which results in 4MiB for each chunk plus the allocation bitmap.
151 * - ring-0 has to take special care when traversing the L2 BST to not run into cycles
152 * and do strict bounds checking before accessing anything. The L1 and L2 table
153 * are written to from ring-3 only. Same goes for the breakpoint table with the
154 * exception being the opaque user argument for ring-0 which is stored in ring-0 only
155 * memory.
156 */
157
158
159/*********************************************************************************************************************************
160* Header Files *
161*********************************************************************************************************************************/
162#define LOG_GROUP LOG_GROUP_DBGF
163#define VMCPU_INCL_CPUM_GST_CTX
164#include <VBox/vmm/cpum.h>
165#include <VBox/vmm/dbgf.h>
166#include <VBox/vmm/selm.h>
167#include <VBox/vmm/iem.h>
168#include <VBox/vmm/mm.h>
169#include <VBox/vmm/iom.h>
170#include <VBox/vmm/hm.h>
171#include "DBGFInternal.h"
172#include <VBox/vmm/vm.h>
173#include <VBox/vmm/uvm.h>
174
175#include <VBox/err.h>
176#include <VBox/log.h>
177#include <iprt/assert.h>
178#include <iprt/mem.h>
179#ifdef VBOX_VMM_TARGET_ARMV8
180# include <iprt/armv8.h>
181#endif
182
183#include "DBGFInline.h"
184
185
186/*********************************************************************************************************************************
187* Structures and Typedefs *
188*********************************************************************************************************************************/
189
190
191/*********************************************************************************************************************************
192* Internal Functions *
193*********************************************************************************************************************************/
194RT_C_DECLS_BEGIN
195RT_C_DECLS_END
196
197
198/**
199 * Initialize the breakpoint mangement.
200 *
201 * @returns VBox status code.
202 * @param pUVM The user mode VM handle.
203 */
204DECLHIDDEN(int) dbgfR3BpInit(PUVM pUVM)
205{
206 PVM pVM = pUVM->pVM;
207
208 //pUVM->dbgf.s.paBpOwnersR3 = NULL;
209 //pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
210
211 /* Init hardware breakpoint states. */
212 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
213 {
214 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
215
216 AssertCompileSize(DBGFBP, sizeof(uint32_t));
217 pHwBp->hBp = NIL_DBGFBP;
218 //pHwBp->fEnabled = false;
219 }
220
221 /* Now the global breakpoint table chunks. */
222 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
223 {
224 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
225
226 //pBpChunk->pBpBaseR3 = NULL;
227 //pBpChunk->pbmAlloc = NULL;
228 //pBpChunk->cBpsFree = 0;
229 pBpChunk->idChunk = DBGF_BP_CHUNK_ID_INVALID; /* Not allocated. */
230 }
231
232 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
233 {
234 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
235
236 //pL2Chunk->pL2BaseR3 = NULL;
237 //pL2Chunk->pbmAlloc = NULL;
238 //pL2Chunk->cFree = 0;
239 pL2Chunk->idChunk = DBGF_BP_CHUNK_ID_INVALID; /* Not allocated. */
240 }
241
242 //pUVM->dbgf.s.paBpLocL1R3 = NULL;
243 //pUVM->dbgf.s.paBpLocPortIoR3 = NULL;
244 pUVM->dbgf.s.hMtxBpL2Wr = NIL_RTSEMFASTMUTEX;
245 return RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxBpL2Wr);
246}
247
248
249/**
250 * Terminates the breakpoint mangement.
251 *
252 * @returns VBox status code.
253 * @param pUVM The user mode VM handle.
254 */
255DECLHIDDEN(int) dbgfR3BpTerm(PUVM pUVM)
256{
257 if (pUVM->dbgf.s.pbmBpOwnersAllocR3)
258 {
259 RTMemFree((void *)pUVM->dbgf.s.pbmBpOwnersAllocR3);
260 pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
261 }
262
263 /* Free all allocated chunk bitmaps (the chunks itself are destroyed during ring-0 VM destruction). */
264 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
265 {
266 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
267
268 if (pBpChunk->idChunk != DBGF_BP_CHUNK_ID_INVALID)
269 {
270 AssertPtr(pBpChunk->pbmAlloc);
271 RTMemFree((void *)pBpChunk->pbmAlloc);
272 pBpChunk->pbmAlloc = NULL;
273 pBpChunk->idChunk = DBGF_BP_CHUNK_ID_INVALID;
274 }
275 }
276
277 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
278 {
279 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
280
281 if (pL2Chunk->idChunk != DBGF_BP_CHUNK_ID_INVALID)
282 {
283 AssertPtr(pL2Chunk->pbmAlloc);
284 RTMemFree((void *)pL2Chunk->pbmAlloc);
285 pL2Chunk->pbmAlloc = NULL;
286 pL2Chunk->idChunk = DBGF_BP_CHUNK_ID_INVALID;
287 }
288 }
289
290 if (pUVM->dbgf.s.hMtxBpL2Wr != NIL_RTSEMFASTMUTEX)
291 {
292 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxBpL2Wr);
293 pUVM->dbgf.s.hMtxBpL2Wr = NIL_RTSEMFASTMUTEX;
294 }
295
296 return VINF_SUCCESS;
297}
298
299
300/**
301 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
302 */
303static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
304{
305 RT_NOREF(pvUser);
306
307 VMCPU_ASSERT_EMT(pVCpu);
308 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
309
310 /*
311 * The initialization will be done on EMT(0). It is possible that multiple
312 * initialization attempts are done because dbgfR3BpEnsureInit() can be called
313 * from racing non EMT threads when trying to set a breakpoint for the first time.
314 * Just fake success if the L1 is already present which means that a previous rendezvous
315 * successfully initialized the breakpoint manager.
316 */
317 PUVM pUVM = pVM->pUVM;
318 if ( pVCpu->idCpu == 0
319 && !pUVM->dbgf.s.paBpLocL1R3)
320 {
321#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
322 if (!SUPR3IsDriverless())
323 {
324 DBGFBPINITREQ Req;
325 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
326 Req.Hdr.cbReq = sizeof(Req);
327 Req.paBpLocL1R3 = NULL;
328 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_INIT, 0 /*u64Arg*/, &Req.Hdr);
329 AssertLogRelMsgRCReturn(rc, ("VMMR0_DO_DBGF_BP_INIT failed: %Rrc\n", rc), rc);
330 pUVM->dbgf.s.paBpLocL1R3 = Req.paBpLocL1R3;
331 }
332 else
333#endif
334 {
335 /* Driverless: Do dbgfR0BpInitWorker here, ring-3 style. */
336 uint32_t const cbL1Loc = RT_ALIGN_32(UINT16_MAX * sizeof(uint32_t), HOST_PAGE_SIZE);
337 pUVM->dbgf.s.paBpLocL1R3 = (uint32_t *)RTMemPageAllocZ(cbL1Loc);
338 AssertLogRelMsgReturn(pUVM->dbgf.s.paBpLocL1R3, ("cbL1Loc=%#x\n", cbL1Loc), VERR_NO_PAGE_MEMORY);
339 }
340 }
341
342 return VINF_SUCCESS;
343}
344
345
346/**
347 * Ensures that the breakpoint manager is fully initialized.
348 *
349 * @returns VBox status code.
350 * @param pUVM The user mode VM handle.
351 *
352 * @thread Any thread.
353 */
354static int dbgfR3BpEnsureInit(PUVM pUVM)
355{
356 /* If the L1 lookup table is allocated initialization succeeded before. */
357 if (RT_LIKELY(pUVM->dbgf.s.paBpLocL1R3))
358 return VINF_SUCCESS;
359
360 /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
361 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpInitEmtWorker, NULL /*pvUser*/);
362}
363
364
365/**
366 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
367 */
368static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpPortIoInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
369{
370 RT_NOREF(pvUser);
371
372 VMCPU_ASSERT_EMT(pVCpu);
373 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
374
375 /*
376 * The initialization will be done on EMT(0). It is possible that multiple
377 * initialization attempts are done because dbgfR3BpPortIoEnsureInit() can be called
378 * from racing non EMT threads when trying to set a breakpoint for the first time.
379 * Just fake success if the L1 is already present which means that a previous rendezvous
380 * successfully initialized the breakpoint manager.
381 */
382 PUVM pUVM = pVM->pUVM;
383 if ( pVCpu->idCpu == 0
384 && !pUVM->dbgf.s.paBpLocPortIoR3)
385 {
386#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
387 if (!SUPR3IsDriverless())
388 {
389 DBGFBPINITREQ Req;
390 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
391 Req.Hdr.cbReq = sizeof(Req);
392 Req.paBpLocL1R3 = NULL;
393 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_PORTIO_INIT, 0 /*u64Arg*/, &Req.Hdr);
394 AssertLogRelMsgRCReturn(rc, ("VMMR0_DO_DBGF_BP_PORTIO_INIT failed: %Rrc\n", rc), rc);
395 pUVM->dbgf.s.paBpLocPortIoR3 = Req.paBpLocL1R3;
396 }
397 else
398#endif
399 {
400 /* Driverless: Do dbgfR0BpPortIoInitWorker here, ring-3 style. */
401 uint32_t const cbPortIoLoc = RT_ALIGN_32(UINT16_MAX * sizeof(uint32_t), HOST_PAGE_SIZE);
402 pUVM->dbgf.s.paBpLocPortIoR3 = (uint32_t *)RTMemPageAllocZ(cbPortIoLoc);
403 AssertLogRelMsgReturn(pUVM->dbgf.s.paBpLocPortIoR3, ("cbPortIoLoc=%#x\n", cbPortIoLoc), VERR_NO_PAGE_MEMORY);
404 }
405 }
406
407 return VINF_SUCCESS;
408}
409
410
411/**
412 * Ensures that the breakpoint manager is initialized to handle I/O port breakpoint.
413 *
414 * @returns VBox status code.
415 * @param pUVM The user mode VM handle.
416 *
417 * @thread Any thread.
418 */
419static int dbgfR3BpPortIoEnsureInit(PUVM pUVM)
420{
421 /* If the L1 lookup table is allocated initialization succeeded before. */
422 if (RT_LIKELY(pUVM->dbgf.s.paBpLocPortIoR3))
423 return VINF_SUCCESS;
424
425 /* Ensure that the breakpoint manager is initialized. */
426 int rc = dbgfR3BpEnsureInit(pUVM);
427 if (RT_FAILURE(rc))
428 return rc;
429
430 /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
431 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpPortIoInitEmtWorker, NULL /*pvUser*/);
432}
433
434
435/**
436 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
437 */
438static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpOwnerInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
439{
440 RT_NOREF(pvUser);
441
442 VMCPU_ASSERT_EMT(pVCpu);
443 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
444
445 /*
446 * The initialization will be done on EMT(0). It is possible that multiple
447 * initialization attempts are done because dbgfR3BpOwnerEnsureInit() can be called
448 * from racing non EMT threads when trying to create a breakpoint owner for the first time.
449 * Just fake success if the pointers are initialized already, meaning that a previous rendezvous
450 * successfully initialized the breakpoint owner table.
451 */
452 int rc = VINF_SUCCESS;
453 PUVM pUVM = pVM->pUVM;
454 if ( pVCpu->idCpu == 0
455 && !pUVM->dbgf.s.pbmBpOwnersAllocR3)
456 {
457 AssertCompile(!(DBGF_BP_OWNER_COUNT_MAX % 64));
458 pUVM->dbgf.s.pbmBpOwnersAllocR3 = RTMemAllocZ(DBGF_BP_OWNER_COUNT_MAX / 8);
459 if (pUVM->dbgf.s.pbmBpOwnersAllocR3)
460 {
461#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
462 if (!SUPR3IsDriverless())
463 {
464 DBGFBPOWNERINITREQ Req;
465 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
466 Req.Hdr.cbReq = sizeof(Req);
467 Req.paBpOwnerR3 = NULL;
468 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_OWNER_INIT, 0 /*u64Arg*/, &Req.Hdr);
469 if (RT_SUCCESS(rc))
470 {
471 pUVM->dbgf.s.paBpOwnersR3 = (PDBGFBPOWNERINT)Req.paBpOwnerR3;
472 return VINF_SUCCESS;
473 }
474 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_OWNER_INIT failed: %Rrc\n", rc));
475 }
476 else
477#endif
478 {
479 /* Driverless: Do dbgfR0BpOwnerInitWorker here, ring-3 style. */
480 uint32_t const cbBpOwnerR3 = RT_ALIGN_32(DBGF_BP_OWNER_COUNT_MAX * sizeof(DBGFBPOWNERINT), HOST_PAGE_SIZE);
481 pUVM->dbgf.s.paBpOwnersR3 = (PDBGFBPOWNERINT)RTMemPageAllocZ(cbBpOwnerR3);
482 if (pUVM->dbgf.s.paBpOwnersR3)
483 return VINF_SUCCESS;
484 AssertLogRelMsgFailed(("cbBpOwnerR3=%#x\n", cbBpOwnerR3));
485 rc = VERR_NO_PAGE_MEMORY;
486 }
487
488 RTMemFree((void *)pUVM->dbgf.s.pbmBpOwnersAllocR3);
489 pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
490 }
491 else
492 rc = VERR_NO_MEMORY;
493 }
494
495 return rc;
496}
497
498
499/**
500 * Ensures that the breakpoint manager is fully initialized.
501 *
502 * @returns VBox status code.
503 * @param pUVM The user mode VM handle.
504 *
505 * @thread Any thread.
506 */
507static int dbgfR3BpOwnerEnsureInit(PUVM pUVM)
508{
509 /* If the allocation bitmap is allocated initialization succeeded before. */
510 if (RT_LIKELY(pUVM->dbgf.s.pbmBpOwnersAllocR3))
511 return VINF_SUCCESS;
512
513 /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
514 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpOwnerInitEmtWorker, NULL /*pvUser*/);
515}
516
517
518/**
519 * Retains the given breakpoint owner handle for use.
520 *
521 * @returns VBox status code.
522 * @retval VERR_INVALID_HANDLE if the given breakpoint owner handle is invalid.
523 * @param pUVM The user mode VM handle.
524 * @param hBpOwner The breakpoint owner handle to retain, NIL_DBGFOWNER is accepted without doing anything.
525 * @param fIo Flag whether the owner must have the I/O handler set because it used by an I/O breakpoint.
526 */
527DECLINLINE(int) dbgfR3BpOwnerRetain(PUVM pUVM, DBGFBPOWNER hBpOwner, bool fIo)
528{
529 if (hBpOwner == NIL_DBGFBPOWNER)
530 return VINF_SUCCESS;
531
532 PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
533 if (pBpOwner)
534 {
535 AssertReturn ( ( fIo
536 && pBpOwner->pfnBpIoHitR3)
537 || ( !fIo
538 && pBpOwner->pfnBpHitR3),
539 VERR_INVALID_HANDLE);
540 ASMAtomicIncU32(&pBpOwner->cRefs);
541 return VINF_SUCCESS;
542 }
543
544 return VERR_INVALID_HANDLE;
545}
546
547
548/**
549 * Releases the given breakpoint owner handle.
550 *
551 * @returns VBox status code.
552 * @retval VERR_INVALID_HANDLE if the given breakpoint owner handle is invalid.
553 * @param pUVM The user mode VM handle.
554 * @param hBpOwner The breakpoint owner handle to retain, NIL_DBGFOWNER is accepted without doing anything.
555 */
556DECLINLINE(int) dbgfR3BpOwnerRelease(PUVM pUVM, DBGFBPOWNER hBpOwner)
557{
558 if (hBpOwner == NIL_DBGFBPOWNER)
559 return VINF_SUCCESS;
560
561 PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
562 if (pBpOwner)
563 {
564 Assert(pBpOwner->cRefs > 1);
565 ASMAtomicDecU32(&pBpOwner->cRefs);
566 return VINF_SUCCESS;
567 }
568
569 return VERR_INVALID_HANDLE;
570}
571
572
573/**
574 * Returns the internal breakpoint state for the given handle.
575 *
576 * @returns Pointer to the internal breakpoint state or NULL if the handle is invalid.
577 * @param pUVM The user mode VM handle.
578 * @param hBp The breakpoint handle to resolve.
579 */
580DECLINLINE(PDBGFBPINT) dbgfR3BpGetByHnd(PUVM pUVM, DBGFBP hBp)
581{
582 uint32_t idChunk = DBGF_BP_HND_GET_CHUNK_ID(hBp);
583 uint32_t idxEntry = DBGF_BP_HND_GET_ENTRY(hBp);
584
585 AssertReturn(idChunk < DBGF_BP_CHUNK_COUNT, NULL);
586 AssertReturn(idxEntry < DBGF_BP_COUNT_PER_CHUNK, NULL);
587
588 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
589 AssertReturn(pBpChunk->idChunk == idChunk, NULL);
590 AssertPtrReturn(pBpChunk->pbmAlloc, NULL);
591 AssertReturn(ASMBitTest(pBpChunk->pbmAlloc, idxEntry), NULL);
592
593 return &pBpChunk->pBpBaseR3[idxEntry];
594}
595
596
597/**
598 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
599 */
600static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpChunkAllocEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
601{
602 uint32_t idChunk = (uint32_t)(uintptr_t)pvUser;
603
604 VMCPU_ASSERT_EMT(pVCpu);
605 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
606
607 AssertReturn(idChunk < DBGF_BP_CHUNK_COUNT, VERR_DBGF_BP_IPE_1);
608
609 PUVM pUVM = pVM->pUVM;
610 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
611
612 AssertReturn( pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID
613 || pBpChunk->idChunk == idChunk,
614 VERR_DBGF_BP_IPE_2);
615
616 /*
617 * The initialization will be done on EMT(0). It is possible that multiple
618 * allocation attempts are done when multiple racing non EMT threads try to
619 * allocate a breakpoint and a new chunk needs to be allocated.
620 * Ignore the request and succeed if the chunk is allocated meaning that a
621 * previous rendezvous successfully allocated the chunk.
622 */
623 int rc = VINF_SUCCESS;
624 if ( pVCpu->idCpu == 0
625 && pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID)
626 {
627 /* Allocate the bitmap first so we can skip calling into VMMR0 if it fails. */
628 AssertCompile(!(DBGF_BP_COUNT_PER_CHUNK % 64));
629 void *pbmAlloc = RTMemAllocZ(DBGF_BP_COUNT_PER_CHUNK / 8);
630 if (RT_LIKELY(pbmAlloc))
631 {
632#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
633 if (!SUPR3IsDriverless())
634 {
635 DBGFBPCHUNKALLOCREQ Req;
636 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
637 Req.Hdr.cbReq = sizeof(Req);
638 Req.idChunk = idChunk;
639 Req.pChunkBaseR3 = NULL;
640 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_CHUNK_ALLOC, 0 /*u64Arg*/, &Req.Hdr);
641 if (RT_SUCCESS(rc))
642 pBpChunk->pBpBaseR3 = (PDBGFBPINT)Req.pChunkBaseR3;
643 else
644 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_CHUNK_ALLOC failed: %Rrc\n", rc));
645 }
646 else
647#endif
648 {
649 /* Driverless: Do dbgfR0BpChunkAllocWorker here, ring-3 style. */
650 uint32_t const cbShared = RT_ALIGN_32(DBGF_BP_COUNT_PER_CHUNK * sizeof(DBGFBPINT), HOST_PAGE_SIZE);
651 pBpChunk->pBpBaseR3 = (PDBGFBPINT)RTMemPageAllocZ(cbShared);
652 AssertLogRelMsgStmt(pBpChunk->pBpBaseR3, ("cbShared=%#x\n", cbShared), rc = VERR_NO_PAGE_MEMORY);
653 }
654 if (RT_SUCCESS(rc))
655 {
656 pBpChunk->pbmAlloc = (void volatile *)pbmAlloc;
657 pBpChunk->cBpsFree = DBGF_BP_COUNT_PER_CHUNK;
658 pBpChunk->idChunk = idChunk;
659 return VINF_SUCCESS;
660 }
661
662 RTMemFree(pbmAlloc);
663 }
664 else
665 rc = VERR_NO_MEMORY;
666 }
667
668 return rc;
669}
670
671
672/**
673 * Tries to allocate the given chunk which requires an EMT rendezvous.
674 *
675 * @returns VBox status code.
676 * @param pUVM The user mode VM handle.
677 * @param idChunk The chunk to allocate.
678 *
679 * @thread Any thread.
680 */
681DECLINLINE(int) dbgfR3BpChunkAlloc(PUVM pUVM, uint32_t idChunk)
682{
683 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpChunkAllocEmtWorker, (void *)(uintptr_t)idChunk);
684}
685
686
687/**
688 * Tries to allocate a new breakpoint of the given type.
689 *
690 * @returns VBox status code.
691 * @param pUVM The user mode VM handle.
692 * @param hOwner The owner handle, NIL_DBGFBPOWNER if none assigned.
693 * @param pvUser Opaque user data passed in the owner callback.
694 * @param enmType Breakpoint type to allocate.
695 * @param fFlags Flags assoicated with the allocated breakpoint.
696 * @param iHitTrigger The hit count at which the breakpoint start triggering.
697 * Use 0 (or 1) if it's gonna trigger at once.
698 * @param iHitDisable The hit count which disables the breakpoint.
699 * Use ~(uint64_t) if it's never gonna be disabled.
700 * @param phBp Where to return the opaque breakpoint handle on success.
701 * @param ppBp Where to return the pointer to the internal breakpoint state on success.
702 *
703 * @thread Any thread.
704 */
705static int dbgfR3BpAlloc(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser, DBGFBPTYPE enmType,
706 uint16_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp,
707 PDBGFBPINT *ppBp)
708{
709 bool fIo = enmType == DBGFBPTYPE_PORT_IO
710 || enmType == DBGFBPTYPE_MMIO;
711 int rc = dbgfR3BpOwnerRetain(pUVM, hOwner, fIo);
712 if (RT_FAILURE(rc))
713 return rc;
714
715 /*
716 * Search for a chunk having a free entry, allocating new chunks
717 * if the encountered ones are full.
718 *
719 * This can be called from multiple threads at the same time so special care
720 * has to be taken to not require any locking here.
721 */
722 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
723 {
724 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
725
726 uint32_t idChunk = ASMAtomicReadU32(&pBpChunk->idChunk);
727 if (idChunk == DBGF_BP_CHUNK_ID_INVALID)
728 {
729 rc = dbgfR3BpChunkAlloc(pUVM, i);
730 if (RT_FAILURE(rc))
731 {
732 LogRel(("DBGF/Bp: Allocating new breakpoint table chunk failed with %Rrc\n", rc));
733 break;
734 }
735
736 idChunk = ASMAtomicReadU32(&pBpChunk->idChunk);
737 Assert(idChunk == i);
738 }
739
740 /** @todo Optimize with some hinting if this turns out to be too slow. */
741 for (;;)
742 {
743 uint32_t cBpsFree = ASMAtomicReadU32(&pBpChunk->cBpsFree);
744 if (cBpsFree)
745 {
746 /*
747 * Scan the associated bitmap for a free entry, if none can be found another thread
748 * raced us and we go to the next chunk.
749 */
750 int32_t iClr = ASMBitFirstClear(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK);
751 if (iClr != -1)
752 {
753 /*
754 * Try to allocate, we could get raced here as well. In that case
755 * we try again.
756 */
757 if (!ASMAtomicBitTestAndSet(pBpChunk->pbmAlloc, iClr))
758 {
759 /* Success, immediately mark as allocated, initialize the breakpoint state and return. */
760 ASMAtomicDecU32(&pBpChunk->cBpsFree);
761
762 PDBGFBPINT pBp = &pBpChunk->pBpBaseR3[iClr];
763 pBp->Pub.cHits = 0;
764 pBp->Pub.iHitTrigger = iHitTrigger;
765 pBp->Pub.iHitDisable = iHitDisable;
766 pBp->Pub.hOwner = hOwner;
767 pBp->Pub.u16Type = DBGF_BP_PUB_MAKE_TYPE(enmType);
768 pBp->Pub.fFlags = fFlags & ~DBGF_BP_F_ENABLED; /* The enabled flag is handled in the respective APIs. */
769 pBp->pvUserR3 = pvUser;
770
771 /** @todo Owner handling (reference and call ring-0 if it has an ring-0 callback). */
772
773 *phBp = DBGF_BP_HND_CREATE(idChunk, iClr);
774 *ppBp = pBp;
775 return VINF_SUCCESS;
776 }
777 /* else Retry with another spot. */
778 }
779 else /* no free entry in bitmap, go to the next chunk */
780 break;
781 }
782 else /* !cBpsFree, go to the next chunk */
783 break;
784 }
785 }
786
787 rc = dbgfR3BpOwnerRelease(pUVM, hOwner); AssertRC(rc);
788 return VERR_DBGF_NO_MORE_BP_SLOTS;
789}
790
791
792/**
793 * Frees the given breakpoint handle.
794 *
795 * @param pUVM The user mode VM handle.
796 * @param hBp The breakpoint handle to free.
797 * @param pBp The internal breakpoint state pointer.
798 */
799static void dbgfR3BpFree(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
800{
801 uint32_t idChunk = DBGF_BP_HND_GET_CHUNK_ID(hBp);
802 uint32_t idxEntry = DBGF_BP_HND_GET_ENTRY(hBp);
803
804 AssertReturnVoid(idChunk < DBGF_BP_CHUNK_COUNT);
805 AssertReturnVoid(idxEntry < DBGF_BP_COUNT_PER_CHUNK);
806
807 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
808 AssertPtrReturnVoid(pBpChunk->pbmAlloc);
809 AssertReturnVoid(ASMBitTest(pBpChunk->pbmAlloc, idxEntry));
810
811 /** @todo Need a trip to Ring-0 if an owner is assigned with a Ring-0 part to clear the breakpoint. */
812 int rc = dbgfR3BpOwnerRelease(pUVM, pBp->Pub.hOwner); AssertRC(rc); RT_NOREF(rc);
813 memset(pBp, 0, sizeof(*pBp));
814
815 ASMAtomicBitClear(pBpChunk->pbmAlloc, idxEntry);
816 ASMAtomicIncU32(&pBpChunk->cBpsFree);
817}
818
819
820/**
821 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
822 */
823static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpL2TblChunkAllocEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
824{
825 uint32_t idChunk = (uint32_t)(uintptr_t)pvUser;
826
827 VMCPU_ASSERT_EMT(pVCpu);
828 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
829
830 AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, VERR_DBGF_BP_IPE_1);
831
832 PUVM pUVM = pVM->pUVM;
833 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
834
835 AssertReturn( pL2Chunk->idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID
836 || pL2Chunk->idChunk == idChunk,
837 VERR_DBGF_BP_IPE_2);
838
839 /*
840 * The initialization will be done on EMT(0). It is possible that multiple
841 * allocation attempts are done when multiple racing non EMT threads try to
842 * allocate a breakpoint and a new chunk needs to be allocated.
843 * Ignore the request and succeed if the chunk is allocated meaning that a
844 * previous rendezvous successfully allocated the chunk.
845 */
846 int rc = VINF_SUCCESS;
847 if ( pVCpu->idCpu == 0
848 && pL2Chunk->idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID)
849 {
850 /* Allocate the bitmap first so we can skip calling into VMMR0 if it fails. */
851 AssertCompile(!(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK % 64));
852 void *pbmAlloc = RTMemAllocZ(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK / 8);
853 if (RT_LIKELY(pbmAlloc))
854 {
855#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
856 if (!SUPR3IsDriverless())
857 {
858 DBGFBPL2TBLCHUNKALLOCREQ Req;
859 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
860 Req.Hdr.cbReq = sizeof(Req);
861 Req.idChunk = idChunk;
862 Req.pChunkBaseR3 = NULL;
863 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC, 0 /*u64Arg*/, &Req.Hdr);
864 if (RT_SUCCESS(rc))
865 pL2Chunk->pL2BaseR3 = (PDBGFBPL2ENTRY)Req.pChunkBaseR3;
866 else
867 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC failed: %Rrc\n", rc));
868 }
869 else
870#endif
871 {
872 /* Driverless: Do dbgfR0BpL2TblChunkAllocWorker here, ring-3 style. */
873 uint32_t const cbTotal = RT_ALIGN_32(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK * sizeof(DBGFBPL2ENTRY), HOST_PAGE_SIZE);
874 pL2Chunk->pL2BaseR3 = (PDBGFBPL2ENTRY)RTMemPageAllocZ(cbTotal);
875 AssertLogRelMsgStmt(pL2Chunk->pL2BaseR3, ("cbTotal=%#x\n", cbTotal), rc = VERR_NO_PAGE_MEMORY);
876 }
877 if (RT_SUCCESS(rc))
878 {
879 pL2Chunk->pbmAlloc = (void volatile *)pbmAlloc;
880 pL2Chunk->cFree = DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK;
881 pL2Chunk->idChunk = idChunk;
882 return VINF_SUCCESS;
883 }
884
885 RTMemFree(pbmAlloc);
886 }
887 else
888 rc = VERR_NO_MEMORY;
889 }
890
891 return rc;
892}
893
894
895/**
896 * Tries to allocate the given L2 table chunk which requires an EMT rendezvous.
897 *
898 * @returns VBox status code.
899 * @param pUVM The user mode VM handle.
900 * @param idChunk The chunk to allocate.
901 *
902 * @thread Any thread.
903 */
904DECLINLINE(int) dbgfR3BpL2TblChunkAlloc(PUVM pUVM, uint32_t idChunk)
905{
906 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpL2TblChunkAllocEmtWorker, (void *)(uintptr_t)idChunk);
907}
908
909
910/**
911 * Tries to allocate a new breakpoint of the given type.
912 *
913 * @returns VBox status code.
914 * @param pUVM The user mode VM handle.
915 * @param pidxL2Tbl Where to return the L2 table entry index on success.
916 * @param ppL2TblEntry Where to return the pointer to the L2 table entry on success.
917 *
918 * @thread Any thread.
919 */
920static int dbgfR3BpL2TblEntryAlloc(PUVM pUVM, uint32_t *pidxL2Tbl, PDBGFBPL2ENTRY *ppL2TblEntry)
921{
922 /*
923 * Search for a chunk having a free entry, allocating new chunks
924 * if the encountered ones are full.
925 *
926 * This can be called from multiple threads at the same time so special care
927 * has to be taken to not require any locking here.
928 */
929 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
930 {
931 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
932
933 uint32_t idChunk = ASMAtomicReadU32(&pL2Chunk->idChunk);
934 if (idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID)
935 {
936 int rc = dbgfR3BpL2TblChunkAlloc(pUVM, i);
937 if (RT_FAILURE(rc))
938 {
939 LogRel(("DBGF/Bp: Allocating new breakpoint L2 lookup table chunk failed with %Rrc\n", rc));
940 break;
941 }
942
943 idChunk = ASMAtomicReadU32(&pL2Chunk->idChunk);
944 Assert(idChunk == i);
945 }
946
947 /** @todo Optimize with some hinting if this turns out to be too slow. */
948 for (;;)
949 {
950 uint32_t cFree = ASMAtomicReadU32(&pL2Chunk->cFree);
951 if (cFree)
952 {
953 /*
954 * Scan the associated bitmap for a free entry, if none can be found another thread
955 * raced us and we go to the next chunk.
956 */
957 int32_t iClr = ASMBitFirstClear(pL2Chunk->pbmAlloc, DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK);
958 if (iClr != -1)
959 {
960 /*
961 * Try to allocate, we could get raced here as well. In that case
962 * we try again.
963 */
964 if (!ASMAtomicBitTestAndSet(pL2Chunk->pbmAlloc, iClr))
965 {
966 /* Success, immediately mark as allocated, initialize the breakpoint state and return. */
967 ASMAtomicDecU32(&pL2Chunk->cFree);
968
969 PDBGFBPL2ENTRY pL2Entry = &pL2Chunk->pL2BaseR3[iClr];
970
971 *pidxL2Tbl = DBGF_BP_L2_IDX_CREATE(idChunk, iClr);
972 *ppL2TblEntry = pL2Entry;
973 return VINF_SUCCESS;
974 }
975 /* else Retry with another spot. */
976 }
977 else /* no free entry in bitmap, go to the next chunk */
978 break;
979 }
980 else /* !cFree, go to the next chunk */
981 break;
982 }
983 }
984
985 return VERR_DBGF_NO_MORE_BP_SLOTS;
986}
987
988
989/**
990 * Frees the given breakpoint handle.
991 *
992 * @param pUVM The user mode VM handle.
993 * @param idxL2Tbl The L2 table index to free.
994 * @param pL2TblEntry The L2 table entry pointer to free.
995 */
996static void dbgfR3BpL2TblEntryFree(PUVM pUVM, uint32_t idxL2Tbl, PDBGFBPL2ENTRY pL2TblEntry)
997{
998 uint32_t idChunk = DBGF_BP_L2_IDX_GET_CHUNK_ID(idxL2Tbl);
999 uint32_t idxEntry = DBGF_BP_L2_IDX_GET_ENTRY(idxL2Tbl);
1000
1001 AssertReturnVoid(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT);
1002 AssertReturnVoid(idxEntry < DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK);
1003
1004 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
1005 AssertPtrReturnVoid(pL2Chunk->pbmAlloc);
1006 AssertReturnVoid(ASMBitTest(pL2Chunk->pbmAlloc, idxEntry));
1007
1008 memset(pL2TblEntry, 0, sizeof(*pL2TblEntry));
1009
1010 ASMAtomicBitClear(pL2Chunk->pbmAlloc, idxEntry);
1011 ASMAtomicIncU32(&pL2Chunk->cFree);
1012}
1013
1014
1015/**
1016 * Sets the enabled flag of the given breakpoint to the given value.
1017 *
1018 * @param pBp The breakpoint to set the state.
1019 * @param fEnabled Enabled status.
1020 */
1021DECLINLINE(void) dbgfR3BpSetEnabled(PDBGFBPINT pBp, bool fEnabled)
1022{
1023 if (fEnabled)
1024 pBp->Pub.fFlags |= DBGF_BP_F_ENABLED;
1025 else
1026 pBp->Pub.fFlags &= ~DBGF_BP_F_ENABLED;
1027}
1028
1029
1030/**
1031 * Assigns a hardware breakpoint state to the given register breakpoint.
1032 *
1033 * @returns VBox status code.
1034 * @param pVM The cross-context VM structure pointer.
1035 * @param hBp The breakpoint handle to assign.
1036 * @param pBp The internal breakpoint state.
1037 *
1038 * @thread Any thread.
1039 */
1040static int dbgfR3BpRegAssign(PVM pVM, DBGFBP hBp, PDBGFBPINT pBp)
1041{
1042 AssertReturn(pBp->Pub.u.Reg.iReg == UINT8_MAX, VERR_DBGF_BP_IPE_3);
1043
1044 for (uint8_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
1045 {
1046 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
1047
1048 AssertCompileSize(DBGFBP, sizeof(uint32_t));
1049 if (ASMAtomicCmpXchgU32(&pHwBp->hBp, hBp, NIL_DBGFBP))
1050 {
1051 pHwBp->GCPtr = pBp->Pub.u.Reg.GCPtr;
1052 pHwBp->fType = pBp->Pub.u.Reg.fType;
1053 pHwBp->cb = pBp->Pub.u.Reg.cb;
1054 pHwBp->fEnabled = DBGF_BP_PUB_IS_ENABLED(&pBp->Pub);
1055
1056 pBp->Pub.u.Reg.iReg = i;
1057 return VINF_SUCCESS;
1058 }
1059 }
1060
1061 return VERR_DBGF_NO_MORE_BP_SLOTS;
1062}
1063
1064
1065/**
1066 * Removes the assigned hardware breakpoint state from the given register breakpoint.
1067 *
1068 * @returns VBox status code.
1069 * @param pVM The cross-context VM structure pointer.
1070 * @param hBp The breakpoint handle to remove.
1071 * @param pBp The internal breakpoint state.
1072 *
1073 * @thread Any thread.
1074 */
1075static int dbgfR3BpRegRemove(PVM pVM, DBGFBP hBp, PDBGFBPINT pBp)
1076{
1077 AssertReturn(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints), VERR_DBGF_BP_IPE_3);
1078
1079 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
1080 AssertReturn(pHwBp->hBp == hBp, VERR_DBGF_BP_IPE_4);
1081 AssertReturn(!pHwBp->fEnabled, VERR_DBGF_BP_IPE_5);
1082
1083 pHwBp->GCPtr = 0;
1084 pHwBp->fType = 0;
1085 pHwBp->cb = 0;
1086 ASMCompilerBarrier();
1087
1088 ASMAtomicWriteU32(&pHwBp->hBp, NIL_DBGFBP);
1089 return VINF_SUCCESS;
1090}
1091
1092
1093/**
1094 * Returns the pointer to the L2 table entry from the given index.
1095 *
1096 * @returns Current context pointer to the L2 table entry or NULL if the provided index value is invalid.
1097 * @param pUVM The user mode VM handle.
1098 * @param idxL2 The L2 table index to resolve.
1099 *
1100 * @note The content of the resolved L2 table entry is not validated!.
1101 */
1102DECLINLINE(PDBGFBPL2ENTRY) dbgfR3BpL2GetByIdx(PUVM pUVM, uint32_t idxL2)
1103{
1104 uint32_t idChunk = DBGF_BP_L2_IDX_GET_CHUNK_ID(idxL2);
1105 uint32_t idxEntry = DBGF_BP_L2_IDX_GET_ENTRY(idxL2);
1106
1107 AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, NULL);
1108 AssertReturn(idxEntry < DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK, NULL);
1109
1110 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
1111 AssertPtrReturn(pL2Chunk->pbmAlloc, NULL);
1112 AssertReturn(ASMBitTest(pL2Chunk->pbmAlloc, idxEntry), NULL);
1113
1114 return &pL2Chunk->CTX_SUFF(pL2Base)[idxEntry];
1115}
1116
1117
1118/**
1119 * Creates a binary search tree with the given root and leaf nodes.
1120 *
1121 * @returns VBox status code.
1122 * @param pUVM The user mode VM handle.
1123 * @param idxL1 The index into the L1 table where the created tree should be linked into.
1124 * @param u32EntryOld The old entry in the L1 table used to compare with in the atomic update.
1125 * @param hBpRoot The root node DBGF handle to assign.
1126 * @param GCPtrRoot The root nodes GC pointer to use as a key.
1127 * @param hBpLeaf The leafs node DBGF handle to assign.
1128 * @param GCPtrLeaf The leafs node GC pointer to use as a key.
1129 */
1130static int dbgfR3BpInt3L2BstCreate(PUVM pUVM, uint32_t idxL1, uint32_t u32EntryOld,
1131 DBGFBP hBpRoot, RTGCUINTPTR GCPtrRoot,
1132 DBGFBP hBpLeaf, RTGCUINTPTR GCPtrLeaf)
1133{
1134 AssertReturn(GCPtrRoot != GCPtrLeaf, VERR_DBGF_BP_IPE_9);
1135 Assert(DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtrRoot) == DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtrLeaf));
1136
1137 /* Allocate two nodes. */
1138 uint32_t idxL2Root = 0;
1139 PDBGFBPL2ENTRY pL2Root = NULL;
1140 int rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Root, &pL2Root);
1141 if (RT_SUCCESS(rc))
1142 {
1143 uint32_t idxL2Leaf = 0;
1144 PDBGFBPL2ENTRY pL2Leaf = NULL;
1145 rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Leaf, &pL2Leaf);
1146 if (RT_SUCCESS(rc))
1147 {
1148 dbgfBpL2TblEntryInit(pL2Leaf, hBpLeaf, GCPtrLeaf, DBGF_BP_L2_ENTRY_IDX_END, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
1149 if (GCPtrLeaf < GCPtrRoot)
1150 dbgfBpL2TblEntryInit(pL2Root, hBpRoot, GCPtrRoot, idxL2Leaf, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
1151 else
1152 dbgfBpL2TblEntryInit(pL2Root, hBpRoot, GCPtrRoot, DBGF_BP_L2_ENTRY_IDX_END, idxL2Leaf, 0 /*iDepth*/);
1153
1154 uint32_t const u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_L2_IDX(idxL2Root);
1155 if (ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], u32Entry, u32EntryOld))
1156 return VINF_SUCCESS;
1157
1158 /* The L1 entry has changed due to another thread racing us during insertion, free nodes and try again. */
1159 dbgfR3BpL2TblEntryFree(pUVM, idxL2Leaf, pL2Leaf);
1160 rc = VINF_TRY_AGAIN;
1161 }
1162
1163 dbgfR3BpL2TblEntryFree(pUVM, idxL2Root, pL2Root);
1164 }
1165
1166 return rc;
1167}
1168
1169
1170/**
1171 * Inserts the given breakpoint handle into an existing binary search tree.
1172 *
1173 * @returns VBox status code.
1174 * @param pUVM The user mode VM handle.
1175 * @param idxL2Root The index of the tree root in the L2 table.
1176 * @param hBp The node DBGF handle to insert.
1177 * @param GCPtr The nodes GC pointer to use as a key.
1178 */
1179static int dbgfR3BpInt2L2BstNodeInsert(PUVM pUVM, uint32_t idxL2Root, DBGFBP hBp, RTGCUINTPTR GCPtr)
1180{
1181 GCPtr = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
1182
1183 /* Allocate a new node first. */
1184 uint32_t idxL2Nd = 0;
1185 PDBGFBPL2ENTRY pL2Nd = NULL;
1186 int rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Nd, &pL2Nd);
1187 if (RT_SUCCESS(rc))
1188 {
1189 /* Walk the tree and find the correct node to insert to. */
1190 PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Root);
1191 while (RT_LIKELY(pL2Entry))
1192 {
1193 /* Make a copy of the entry. */
1194 DBGFBPL2ENTRY L2Entry;
1195 L2Entry.u64GCPtrKeyAndBpHnd1 = ASMAtomicReadU64(&pL2Entry->u64GCPtrKeyAndBpHnd1);
1196 L2Entry.u64LeftRightIdxDepthBpHnd2 = ASMAtomicReadU64(&pL2Entry->u64LeftRightIdxDepthBpHnd2);
1197
1198 RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(L2Entry.u64GCPtrKeyAndBpHnd1);
1199 AssertBreak(GCPtr != GCPtrL2Entry);
1200
1201 /* Not found, get to the next level. */
1202 uint32_t idxL2Next = GCPtr < GCPtrL2Entry
1203 ? DBGF_BP_L2_ENTRY_GET_IDX_LEFT(L2Entry.u64LeftRightIdxDepthBpHnd2)
1204 : DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(L2Entry.u64LeftRightIdxDepthBpHnd2);
1205 if (idxL2Next == DBGF_BP_L2_ENTRY_IDX_END)
1206 {
1207 /* Insert the new node here. */
1208 dbgfBpL2TblEntryInit(pL2Nd, hBp, GCPtr, DBGF_BP_L2_ENTRY_IDX_END, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
1209 if (GCPtr < GCPtrL2Entry)
1210 dbgfBpL2TblEntryUpdateLeft(pL2Entry, idxL2Next, 0 /*iDepth*/);
1211 else
1212 dbgfBpL2TblEntryUpdateRight(pL2Entry, idxL2Next, 0 /*iDepth*/);
1213 return VINF_SUCCESS;
1214 }
1215
1216 pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Next);
1217 }
1218
1219 dbgfR3BpL2TblEntryFree(pUVM, idxL2Nd, pL2Nd);
1220 rc = VERR_DBGF_BP_L2_LOOKUP_FAILED;
1221 }
1222
1223 return rc;
1224}
1225
1226
1227/**
1228 * Adds the given breakpoint handle keyed with the GC pointer to the proper L2 binary search tree
1229 * possibly creating a new tree.
1230 *
1231 * @returns VBox status code.
1232 * @param pUVM The user mode VM handle.
1233 * @param idxL1 The index into the L1 table the breakpoint uses.
1234 * @param hBp The breakpoint handle which is to be added.
1235 * @param GCPtr The GC pointer the breakpoint is keyed with.
1236 */
1237static int dbgfR3BpInt3L2BstNodeAdd(PUVM pUVM, uint32_t idxL1, DBGFBP hBp, RTGCUINTPTR GCPtr)
1238{
1239 int rc = RTSemFastMutexRequest(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc);
1240
1241 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]); /* Re-read, could get raced by a remove operation. */
1242 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
1243 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
1244 {
1245 /* Create a new search tree, gather the necessary information first. */
1246 DBGFBP hBp2 = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32Entry);
1247 PDBGFBPINT pBp2 = dbgfR3BpGetByHnd(pUVM, hBp2);
1248 AssertStmt(RT_VALID_PTR(pBp2), rc = VERR_DBGF_BP_IPE_7);
1249 if (RT_SUCCESS(rc))
1250 rc = dbgfR3BpInt3L2BstCreate(pUVM, idxL1, u32Entry, hBp, GCPtr, hBp2, pBp2->Pub.u.Sw.GCPtr);
1251 }
1252 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
1253 rc = dbgfR3BpInt2L2BstNodeInsert(pUVM, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry), hBp, GCPtr);
1254
1255 int rc2 = RTSemFastMutexRelease(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc2);
1256 return rc;
1257}
1258
1259
1260/**
1261 * Gets the leftmost from the given tree node start index.
1262 *
1263 * @returns VBox status code.
1264 * @param pUVM The user mode VM handle.
1265 * @param idxL2Start The start index to walk from.
1266 * @param pidxL2Leftmost Where to store the L2 table index of the leftmost entry.
1267 * @param ppL2NdLeftmost Where to store the pointer to the leftmost L2 table entry.
1268 * @param pidxL2NdLeftParent Where to store the L2 table index of the leftmost entries parent.
1269 * @param ppL2NdLeftParent Where to store the pointer to the leftmost L2 table entries parent.
1270 */
1271static int dbgfR33BpInt3BstGetLeftmostEntryFromNode(PUVM pUVM, uint32_t idxL2Start,
1272 uint32_t *pidxL2Leftmost, PDBGFBPL2ENTRY *ppL2NdLeftmost,
1273 uint32_t *pidxL2NdLeftParent, PDBGFBPL2ENTRY *ppL2NdLeftParent)
1274{
1275 uint32_t idxL2Parent = DBGF_BP_L2_ENTRY_IDX_END;
1276 PDBGFBPL2ENTRY pL2NdParent = NULL;
1277
1278 for (;;)
1279 {
1280 PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Start);
1281 AssertPtr(pL2Entry);
1282
1283 uint32_t idxL2Left = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
1284 if (idxL2Start == DBGF_BP_L2_ENTRY_IDX_END)
1285 {
1286 *pidxL2Leftmost = idxL2Start;
1287 *ppL2NdLeftmost = pL2Entry;
1288 *pidxL2NdLeftParent = idxL2Parent;
1289 *ppL2NdLeftParent = pL2NdParent;
1290 break;
1291 }
1292
1293 idxL2Parent = idxL2Start;
1294 idxL2Start = idxL2Left;
1295 pL2NdParent = pL2Entry;
1296 }
1297
1298 return VINF_SUCCESS;
1299}
1300
1301
1302/**
1303 * Removes the given node rearranging the tree.
1304 *
1305 * @returns VBox status code.
1306 * @param pUVM The user mode VM handle.
1307 * @param idxL1 The index into the L1 table pointing to the binary search tree containing the node.
1308 * @param idxL2Root The L2 table index where the tree root is located.
1309 * @param idxL2Nd The node index to remove.
1310 * @param pL2Nd The L2 table entry to remove.
1311 * @param idxL2NdParent The parents index, can be DBGF_BP_L2_ENTRY_IDX_END if the root is about to be removed.
1312 * @param pL2NdParent The parents L2 table entry, can be NULL if the root is about to be removed.
1313 * @param fLeftChild Flag whether the node is the left child of the parent or the right one.
1314 */
1315static int dbgfR3BpInt3BstNodeRemove(PUVM pUVM, uint32_t idxL1, uint32_t idxL2Root,
1316 uint32_t idxL2Nd, PDBGFBPL2ENTRY pL2Nd,
1317 uint32_t idxL2NdParent, PDBGFBPL2ENTRY pL2NdParent,
1318 bool fLeftChild)
1319{
1320 /*
1321 * If there are only two nodes remaining the tree will get destroyed and the
1322 * L1 entry will be converted to the direct handle type.
1323 */
1324 uint32_t idxL2Left = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
1325 uint32_t idxL2Right = DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
1326
1327 Assert(idxL2NdParent != DBGF_BP_L2_ENTRY_IDX_END || !pL2NdParent); RT_NOREF(idxL2NdParent);
1328 uint32_t idxL2ParentNew = DBGF_BP_L2_ENTRY_IDX_END;
1329 if (idxL2Right == DBGF_BP_L2_ENTRY_IDX_END)
1330 idxL2ParentNew = idxL2Left;
1331 else
1332 {
1333 /* Find the leftmost entry of the right subtree and move it to the to be removed nodes location in the tree. */
1334 PDBGFBPL2ENTRY pL2NdLeftmostParent = NULL;
1335 PDBGFBPL2ENTRY pL2NdLeftmost = NULL;
1336 uint32_t idxL2NdLeftmostParent = DBGF_BP_L2_ENTRY_IDX_END;
1337 uint32_t idxL2Leftmost = DBGF_BP_L2_ENTRY_IDX_END;
1338 int rc = dbgfR33BpInt3BstGetLeftmostEntryFromNode(pUVM, idxL2Right, &idxL2Leftmost ,&pL2NdLeftmost,
1339 &idxL2NdLeftmostParent, &pL2NdLeftmostParent);
1340 AssertRCReturn(rc, rc);
1341
1342 if (pL2NdLeftmostParent)
1343 {
1344 /* Rearrange the leftmost entries parents pointer. */
1345 dbgfBpL2TblEntryUpdateLeft(pL2NdLeftmostParent, DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2NdLeftmost->u64LeftRightIdxDepthBpHnd2), 0 /*iDepth*/);
1346 dbgfBpL2TblEntryUpdateRight(pL2NdLeftmost, idxL2Right, 0 /*iDepth*/);
1347 }
1348
1349 dbgfBpL2TblEntryUpdateLeft(pL2NdLeftmost, idxL2Left, 0 /*iDepth*/);
1350
1351 /* Update the remove nodes parent to point to the new node. */
1352 idxL2ParentNew = idxL2Leftmost;
1353 }
1354
1355 if (pL2NdParent)
1356 {
1357 /* Asssign the new L2 index to proper parents left or right pointer. */
1358 if (fLeftChild)
1359 dbgfBpL2TblEntryUpdateLeft(pL2NdParent, idxL2ParentNew, 0 /*iDepth*/);
1360 else
1361 dbgfBpL2TblEntryUpdateRight(pL2NdParent, idxL2ParentNew, 0 /*iDepth*/);
1362 }
1363 else
1364 {
1365 /* The root node is removed, set the new root in the L1 table. */
1366 Assert(idxL2ParentNew != DBGF_BP_L2_ENTRY_IDX_END);
1367 idxL2Root = idxL2ParentNew;
1368 ASMAtomicXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_CREATE_L2_IDX(idxL2Left));
1369 }
1370
1371 /* Free the node. */
1372 dbgfR3BpL2TblEntryFree(pUVM, idxL2Nd, pL2Nd);
1373
1374 /*
1375 * Check whether the old/new root is the only node remaining and convert the L1
1376 * table entry to a direct breakpoint handle one in that case.
1377 */
1378 pL2Nd = dbgfR3BpL2GetByIdx(pUVM, idxL2Root);
1379 AssertPtr(pL2Nd);
1380 if ( DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2) == DBGF_BP_L2_ENTRY_IDX_END
1381 && DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2) == DBGF_BP_L2_ENTRY_IDX_END)
1382 {
1383 DBGFBP hBp = DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Nd->u64GCPtrKeyAndBpHnd1, pL2Nd->u64LeftRightIdxDepthBpHnd2);
1384 dbgfR3BpL2TblEntryFree(pUVM, idxL2Root, pL2Nd);
1385 ASMAtomicXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp));
1386 }
1387
1388 return VINF_SUCCESS;
1389}
1390
1391
1392/**
1393 * Removes the given breakpoint handle keyed with the GC pointer from the L2 binary search tree
1394 * pointed to by the given L2 root index.
1395 *
1396 * @returns VBox status code.
1397 * @param pUVM The user mode VM handle.
1398 * @param idxL1 The index into the L1 table pointing to the binary search tree.
1399 * @param idxL2Root The L2 table index where the tree root is located.
1400 * @param hBp The breakpoint handle which is to be removed.
1401 * @param GCPtr The GC pointer the breakpoint is keyed with.
1402 */
1403static int dbgfR3BpInt3L2BstRemove(PUVM pUVM, uint32_t idxL1, uint32_t idxL2Root, DBGFBP hBp, RTGCUINTPTR GCPtr)
1404{
1405 GCPtr = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
1406
1407 int rc = RTSemFastMutexRequest(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc);
1408
1409 uint32_t idxL2Cur = idxL2Root;
1410 uint32_t idxL2Parent = DBGF_BP_L2_ENTRY_IDX_END;
1411 bool fLeftChild = false;
1412 PDBGFBPL2ENTRY pL2EntryParent = NULL;
1413 for (;;)
1414 {
1415 PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Cur);
1416 AssertPtr(pL2Entry);
1417
1418 /* Check whether this node is to be removed.. */
1419 RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(pL2Entry->u64GCPtrKeyAndBpHnd1);
1420 if (GCPtrL2Entry == GCPtr)
1421 {
1422 Assert(DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Entry->u64GCPtrKeyAndBpHnd1, pL2Entry->u64LeftRightIdxDepthBpHnd2) == hBp); RT_NOREF(hBp);
1423
1424 rc = dbgfR3BpInt3BstNodeRemove(pUVM, idxL1, idxL2Root, idxL2Cur, pL2Entry, idxL2Parent, pL2EntryParent, fLeftChild);
1425 break;
1426 }
1427
1428 pL2EntryParent = pL2Entry;
1429 idxL2Parent = idxL2Cur;
1430
1431 if (GCPtrL2Entry < GCPtr)
1432 {
1433 fLeftChild = true;
1434 idxL2Cur = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
1435 }
1436 else
1437 {
1438 fLeftChild = false;
1439 idxL2Cur = DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
1440 }
1441
1442 AssertBreakStmt(idxL2Cur != DBGF_BP_L2_ENTRY_IDX_END, rc = VERR_DBGF_BP_L2_LOOKUP_FAILED);
1443 }
1444
1445 int rc2 = RTSemFastMutexRelease(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc2);
1446
1447 return rc;
1448}
1449
1450
1451/**
1452 * Adds the given int3 breakpoint to the appropriate lookup tables.
1453 *
1454 * @returns VBox status code.
1455 * @param pUVM The user mode VM handle.
1456 * @param hBp The breakpoint handle to add.
1457 * @param pBp The internal breakpoint state.
1458 */
1459static int dbgfR3BpInt3Add(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1460{
1461 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_SOFTWARE, VERR_DBGF_BP_IPE_3);
1462
1463 int rc = VINF_SUCCESS;
1464 uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(pBp->Pub.u.Sw.GCPtr);
1465 uint8_t cTries = 16;
1466
1467 while (cTries--)
1468 {
1469 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1470 if (u32Entry == DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
1471 {
1472 /*
1473 * No breakpoint assigned so far for this entry, create an entry containing
1474 * the direct breakpoint handle and try to exchange it atomically.
1475 */
1476 u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp);
1477 if (ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], u32Entry, DBGF_BP_INT3_L1_ENTRY_TYPE_NULL))
1478 break;
1479 }
1480 else
1481 {
1482 rc = dbgfR3BpInt3L2BstNodeAdd(pUVM, idxL1, hBp, pBp->Pub.u.Sw.GCPtr);
1483 if (rc != VINF_TRY_AGAIN)
1484 break;
1485 }
1486 }
1487
1488 if ( RT_SUCCESS(rc)
1489 && !cTries) /* Too much contention, abort with an error. */
1490 rc = VERR_DBGF_BP_INT3_ADD_TRIES_REACHED;
1491
1492 return rc;
1493}
1494
1495
1496/**
1497 * Adds the given port I/O breakpoint to the appropriate lookup tables.
1498 *
1499 * @returns VBox status code.
1500 * @param pUVM The user mode VM handle.
1501 * @param hBp The breakpoint handle to add.
1502 * @param pBp The internal breakpoint state.
1503 */
1504static int dbgfR3BpPortIoAdd(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1505{
1506 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_PORT_IO, VERR_DBGF_BP_IPE_3);
1507
1508 uint16_t uPortExcl = pBp->Pub.u.PortIo.uPort + pBp->Pub.u.PortIo.cPorts;
1509 uint32_t u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp);
1510 for (uint16_t idxPort = pBp->Pub.u.PortIo.uPort; idxPort < uPortExcl; idxPort++)
1511 {
1512 bool fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], u32Entry, DBGF_BP_INT3_L1_ENTRY_TYPE_NULL);
1513 if (!fXchg)
1514 {
1515 /* Something raced us, so roll back the other registrations. */
1516 while (idxPort > pBp->Pub.u.PortIo.uPort)
1517 {
1518 fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry);
1519 Assert(fXchg); RT_NOREF(fXchg);
1520 }
1521
1522 return VERR_DBGF_BP_INT3_ADD_TRIES_REACHED; /** @todo New status code */
1523 }
1524 }
1525
1526 return VINF_SUCCESS;
1527}
1528
1529
1530/**
1531 * Get a breakpoint give by address.
1532 *
1533 * @returns The breakpoint handle on success or NIL_DBGFBP if not found.
1534 * @param pUVM The user mode VM handle.
1535 * @param enmType The breakpoint type.
1536 * @param GCPtr The breakpoint address.
1537 * @param ppBp Where to store the pointer to the internal breakpoint state on success, optional.
1538 */
1539static DBGFBP dbgfR3BpGetByAddr(PUVM pUVM, DBGFBPTYPE enmType, RTGCUINTPTR GCPtr, PDBGFBPINT *ppBp)
1540{
1541 DBGFBP hBp = NIL_DBGFBP;
1542
1543 switch (enmType)
1544 {
1545 case DBGFBPTYPE_REG:
1546 {
1547 PVM pVM = pUVM->pVM;
1548 VM_ASSERT_VALID_EXT_RETURN(pVM, NIL_DBGFBP);
1549
1550 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
1551 {
1552 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
1553
1554 AssertCompileSize(DBGFBP, sizeof(uint32_t));
1555 DBGFBP hBpTmp = ASMAtomicReadU32(&pHwBp->hBp);
1556 if ( pHwBp->GCPtr == GCPtr
1557 && hBpTmp != NIL_DBGFBP)
1558 {
1559 hBp = hBpTmp;
1560 break;
1561 }
1562 }
1563 break;
1564 }
1565
1566 case DBGFBPTYPE_SOFTWARE:
1567 {
1568 const uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtr);
1569 const uint32_t u32L1Entry = ASMAtomicReadU32(&pUVM->dbgf.s.CTX_SUFF(paBpLocL1)[idxL1]);
1570
1571 if (u32L1Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
1572 {
1573 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32L1Entry);
1574 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
1575 hBp = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32L1Entry);
1576 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
1577 {
1578 RTGCUINTPTR GCPtrKey = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
1579 PDBGFBPL2ENTRY pL2Nd = dbgfR3BpL2GetByIdx(pUVM, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32L1Entry));
1580
1581 for (;;)
1582 {
1583 AssertPtr(pL2Nd);
1584
1585 RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(pL2Nd->u64GCPtrKeyAndBpHnd1);
1586 if (GCPtrKey == GCPtrL2Entry)
1587 {
1588 hBp = DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Nd->u64GCPtrKeyAndBpHnd1, pL2Nd->u64LeftRightIdxDepthBpHnd2);
1589 break;
1590 }
1591
1592 /* Not found, get to the next level. */
1593 uint32_t idxL2Next = GCPtrKey < GCPtrL2Entry
1594 ? DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2)
1595 : DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
1596 /* Address not found if the entry denotes the end. */
1597 if (idxL2Next == DBGF_BP_L2_ENTRY_IDX_END)
1598 break;
1599
1600 pL2Nd = dbgfR3BpL2GetByIdx(pUVM, idxL2Next);
1601 }
1602 }
1603 }
1604 break;
1605 }
1606
1607 default:
1608 AssertMsgFailed(("enmType=%d\n", enmType));
1609 break;
1610 }
1611
1612 if ( hBp != NIL_DBGFBP
1613 && ppBp)
1614 *ppBp = dbgfR3BpGetByHnd(pUVM, hBp);
1615 return hBp;
1616}
1617
1618
1619/**
1620 * Get a port I/O breakpoint given by the range.
1621 *
1622 * @returns The breakpoint handle on success or NIL_DBGF if not found.
1623 * @param pUVM The user mode VM handle.
1624 * @param uPort First port in the range.
1625 * @param cPorts Number of ports in the range.
1626 * @param ppBp Where to store the pointer to the internal breakpoint state on success, optional.
1627 */
1628static DBGFBP dbgfR3BpPortIoGetByRange(PUVM pUVM, RTIOPORT uPort, RTIOPORT cPorts, PDBGFBPINT *ppBp)
1629{
1630 DBGFBP hBp = NIL_DBGFBP;
1631
1632 for (RTIOPORT idxPort = uPort; idxPort < uPort + cPorts; idxPort++)
1633 {
1634 const uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.CTX_SUFF(paBpLocPortIo)[idxPort]);
1635 if (u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
1636 {
1637 hBp = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32Entry);
1638 break;
1639 }
1640 }
1641
1642 if ( hBp != NIL_DBGFBP
1643 && ppBp)
1644 *ppBp = dbgfR3BpGetByHnd(pUVM, hBp);
1645 return hBp;
1646}
1647
1648
1649/**
1650 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
1651 */
1652static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpInt3RemoveEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
1653{
1654 DBGFBP hBp = (DBGFBP)(uintptr_t)pvUser;
1655
1656 VMCPU_ASSERT_EMT(pVCpu);
1657 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1658
1659 PUVM pUVM = pVM->pUVM;
1660 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
1661 AssertPtrReturn(pBp, VERR_DBGF_BP_IPE_8);
1662
1663 int rc = VINF_SUCCESS;
1664 if (pVCpu->idCpu == 0)
1665 {
1666 uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(pBp->Pub.u.Sw.GCPtr);
1667 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1668 AssertReturn(u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, VERR_DBGF_BP_IPE_6);
1669
1670 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
1671 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
1672 {
1673 /* Single breakpoint, just exchange atomically with the null value. */
1674 if (!ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry))
1675 {
1676 /*
1677 * A breakpoint addition must have raced us converting the L1 entry to an L2 index type, re-read
1678 * and remove the node from the created binary search tree.
1679 *
1680 * This works because after the entry was converted to an L2 index it can only be converted back
1681 * to a direct handle by removing one or more nodes which always goes through the fast mutex
1682 * protecting the L2 table. Likewise adding a new breakpoint requires grabbing the mutex as well
1683 * so there is serialization here and the node can be removed safely without having to worry about
1684 * concurrent tree modifications.
1685 */
1686 u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1687 AssertReturn(DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry) == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX, VERR_DBGF_BP_IPE_9);
1688
1689 rc = dbgfR3BpInt3L2BstRemove(pUVM, idxL1, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry),
1690 hBp, pBp->Pub.u.Sw.GCPtr);
1691 }
1692 }
1693 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
1694 rc = dbgfR3BpInt3L2BstRemove(pUVM, idxL1, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry),
1695 hBp, pBp->Pub.u.Sw.GCPtr);
1696 }
1697
1698 return rc;
1699}
1700
1701
1702/**
1703 * Removes the given int3 breakpoint from all lookup tables.
1704 *
1705 * @returns VBox status code.
1706 * @param pUVM The user mode VM handle.
1707 * @param hBp The breakpoint handle to remove.
1708 * @param pBp The internal breakpoint state.
1709 */
1710static int dbgfR3BpInt3Remove(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1711{
1712 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_SOFTWARE, VERR_DBGF_BP_IPE_3);
1713
1714 /*
1715 * This has to be done by an EMT rendezvous in order to not have an EMT traversing
1716 * any L2 trees while it is being removed.
1717 */
1718 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpInt3RemoveEmtWorker, (void *)(uintptr_t)hBp);
1719}
1720
1721
1722/**
1723 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
1724 */
1725static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpPortIoRemoveEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
1726{
1727 DBGFBP hBp = (DBGFBP)(uintptr_t)pvUser;
1728
1729 VMCPU_ASSERT_EMT(pVCpu);
1730 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1731
1732 PUVM pUVM = pVM->pUVM;
1733 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
1734 AssertPtrReturn(pBp, VERR_DBGF_BP_IPE_8);
1735
1736 int rc = VINF_SUCCESS;
1737 if (pVCpu->idCpu == 0)
1738 {
1739 /*
1740 * Remove the whole range, there shouldn't be any other breakpoint configured for this range as this is not
1741 * allowed right now.
1742 */
1743 uint16_t uPortExcl = pBp->Pub.u.PortIo.uPort + pBp->Pub.u.PortIo.cPorts;
1744 for (uint16_t idxPort = pBp->Pub.u.PortIo.uPort; idxPort < uPortExcl; idxPort++)
1745 {
1746 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort]);
1747 AssertReturn(u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, VERR_DBGF_BP_IPE_6);
1748
1749 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
1750 AssertReturn(u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND, VERR_DBGF_BP_IPE_7);
1751
1752 bool fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry);
1753 Assert(fXchg); RT_NOREF(fXchg);
1754 }
1755 }
1756
1757 return rc;
1758}
1759
1760
1761/**
1762 * Removes the given port I/O breakpoint from all lookup tables.
1763 *
1764 * @returns VBox status code.
1765 * @param pUVM The user mode VM handle.
1766 * @param hBp The breakpoint handle to remove.
1767 * @param pBp The internal breakpoint state.
1768 */
1769static int dbgfR3BpPortIoRemove(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1770{
1771 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_PORT_IO, VERR_DBGF_BP_IPE_3);
1772
1773 /*
1774 * This has to be done by an EMT rendezvous in order to not have an EMT accessing
1775 * the breakpoint while it is removed.
1776 */
1777 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpPortIoRemoveEmtWorker, (void *)(uintptr_t)hBp);
1778}
1779
1780
1781/**
1782 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
1783 */
1784static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpRegRecalcOnCpu(PVM pVM, PVMCPU pVCpu, void *pvUser)
1785{
1786 RT_NOREF(pvUser);
1787
1788#ifdef VBOX_VMM_TARGET_X86
1789 /*
1790 * CPU 0 updates the enabled hardware breakpoint counts.
1791 */
1792 if (pVCpu->idCpu == 0)
1793 {
1794 pVM->dbgf.s.cEnabledHwBreakpoints = 0;
1795 pVM->dbgf.s.cEnabledHwIoBreakpoints = 0;
1796
1797 for (uint32_t iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++)
1798 {
1799 if (pVM->dbgf.s.aHwBreakpoints[iBp].fEnabled)
1800 {
1801 pVM->dbgf.s.cEnabledHwBreakpoints += 1;
1802 pVM->dbgf.s.cEnabledHwIoBreakpoints += pVM->dbgf.s.aHwBreakpoints[iBp].fType == X86_DR7_RW_IO;
1803 }
1804 }
1805 }
1806
1807 return CPUMRecalcHyperDRx(pVCpu, UINT8_MAX);
1808
1809#else
1810 /** @todo arm64: hardware breakpoints. */
1811 RT_NOREF(pVM, pVCpu);
1812 AssertReleaseFailed();
1813 return VERR_NOT_IMPLEMENTED;
1814#endif
1815}
1816
1817
1818/**
1819 * Arms the given breakpoint.
1820 *
1821 * @returns VBox status code.
1822 * @param pUVM The user mode VM handle.
1823 * @param hBp The breakpoint handle to arm.
1824 * @param pBp The internal breakpoint state pointer for the handle.
1825 *
1826 * @thread Any thread.
1827 */
1828static int dbgfR3BpArm(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1829{
1830 int rc;
1831 PVM pVM = pUVM->pVM;
1832
1833 Assert(!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub));
1834 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
1835 {
1836 case DBGFBPTYPE_REG:
1837 {
1838 Assert(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints));
1839 PDBGFBPHW pBpHw = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
1840 Assert(pBpHw->hBp == hBp); RT_NOREF(hBp);
1841
1842 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1843 ASMAtomicWriteBool(&pBpHw->fEnabled, true);
1844 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpRegRecalcOnCpu, NULL);
1845 if (RT_FAILURE(rc))
1846 {
1847 ASMAtomicWriteBool(&pBpHw->fEnabled, false);
1848 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1849 }
1850 break;
1851 }
1852 case DBGFBPTYPE_SOFTWARE:
1853 {
1854 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1855
1856 /** @todo When we enable the first software breakpoint we should do this in an EMT rendezvous
1857 * as the VMX code intercepts #BP only when at least one int3 breakpoint is enabled.
1858 * A racing vCPU might trigger it and forward it to the guest causing panics/crashes/havoc. */
1859 /*
1860 * Save original instruction and replace a breakpoint instruction.
1861 */
1862#ifdef VBOX_VMM_TARGET_ARMV8
1863 static const uint32_t s_BreakpointInstr = Armv8A64MkInstrBrk(0xc0de);
1864 rc = PGMPhysSimpleReadGCPhys(pVM, &pBp->Pub.u.Sw.Arch.armv8.u32Org, pBp->Pub.u.Sw.PhysAddr,
1865 sizeof(pBp->Pub.u.Sw.Arch.armv8.u32Org));
1866#elif defined(VBOX_VMM_TARGET_X86)
1867 static const uint8_t s_BreakpointInstr = 0xcc;
1868 rc = PGMPhysSimpleReadGCPhys(pVM, &pBp->Pub.u.Sw.Arch.x86.bOrg, pBp->Pub.u.Sw.PhysAddr,
1869 sizeof(pBp->Pub.u.Sw.Arch.x86.bOrg));
1870#else
1871# error "port me"
1872#endif
1873 if (RT_SUCCESS(rc))
1874 {
1875 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Sw.PhysAddr, &s_BreakpointInstr, sizeof(s_BreakpointInstr));
1876 if (RT_SUCCESS(rc))
1877 {
1878 ASMAtomicIncU32(&pVM->dbgf.s.cEnabledSwBreakpoints);
1879 Log(("DBGF: Set breakpoint at %RGv (Phys %RGp)\n", pBp->Pub.u.Sw.GCPtr, pBp->Pub.u.Sw.PhysAddr));
1880 break;
1881 }
1882 }
1883 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1884 break;
1885 }
1886 case DBGFBPTYPE_PORT_IO:
1887 {
1888 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1889 ASMAtomicIncU32(&pUVM->dbgf.s.cPortIoBps);
1890 IOMR3NotifyBreakpointCountChange(pVM, true /*fPortIo*/, false /*fMmio*/);
1891 rc = VINF_SUCCESS;
1892 break;
1893 }
1894 case DBGFBPTYPE_MMIO:
1895 rc = VERR_NOT_IMPLEMENTED;
1896 break;
1897 default:
1898 AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
1899 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1900 }
1901
1902 return rc;
1903}
1904
1905
1906/**
1907 * Disarms the given breakpoint.
1908 *
1909 * @returns VBox status code.
1910 * @param pUVM The user mode VM handle.
1911 * @param hBp The breakpoint handle to disarm.
1912 * @param pBp The internal breakpoint state pointer for the handle.
1913 *
1914 * @thread Any thread.
1915 */
1916static int dbgfR3BpDisarm(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1917{
1918 int rc;
1919 PVM pVM = pUVM->pVM;
1920
1921 Assert(DBGF_BP_PUB_IS_ENABLED(&pBp->Pub));
1922 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
1923 {
1924 case DBGFBPTYPE_REG:
1925 {
1926 Assert(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints));
1927 PDBGFBPHW pBpHw = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
1928 Assert(pBpHw->hBp == hBp); RT_NOREF(hBp);
1929
1930 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1931 ASMAtomicWriteBool(&pBpHw->fEnabled, false);
1932 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpRegRecalcOnCpu, NULL);
1933 if (RT_FAILURE(rc))
1934 {
1935 ASMAtomicWriteBool(&pBpHw->fEnabled, true);
1936 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1937 }
1938 break;
1939 }
1940 case DBGFBPTYPE_SOFTWARE:
1941 {
1942 /*
1943 * Check that the current byte is the int3 instruction, and restore the original one.
1944 * We currently ignore invalid bytes.
1945 */
1946#ifdef VBOX_VMM_TARGET_ARMV8
1947 uint32_t u32Current = 0;
1948 rc = PGMPhysSimpleReadGCPhys(pVM, &u32Current, pBp->Pub.u.Sw.PhysAddr, sizeof(u32Current));
1949 if ( RT_SUCCESS(rc)
1950 && u32Current == Armv8A64MkInstrBrk(0xc0de))
1951 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Sw.PhysAddr, &pBp->Pub.u.Sw.Arch.armv8.u32Org,
1952 sizeof(pBp->Pub.u.Sw.Arch.armv8.u32Org));
1953#else
1954 uint8_t bCurrent = 0;
1955 rc = PGMPhysSimpleReadGCPhys(pVM, &bCurrent, pBp->Pub.u.Sw.PhysAddr, sizeof(bCurrent));
1956 if ( RT_SUCCESS(rc)
1957 && bCurrent == 0xcc)
1958 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Sw.PhysAddr, &pBp->Pub.u.Sw.Arch.x86.bOrg,
1959 sizeof(pBp->Pub.u.Sw.Arch.x86.bOrg));
1960#endif
1961 if (RT_SUCCESS(rc))
1962 {
1963 ASMAtomicDecU32(&pVM->dbgf.s.cEnabledSwBreakpoints);
1964 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1965 Log(("DBGF: Removed breakpoint at %RGv (Phys %RGp)\n", pBp->Pub.u.Sw.GCPtr, pBp->Pub.u.Sw.PhysAddr));
1966 }
1967 break;
1968 }
1969 case DBGFBPTYPE_PORT_IO:
1970 {
1971 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1972 uint32_t cPortIoBps = ASMAtomicDecU32(&pUVM->dbgf.s.cPortIoBps);
1973 if (!cPortIoBps) /** @todo Need to gather all EMTs to not have a stray EMT accessing BP data when it might go away. */
1974 IOMR3NotifyBreakpointCountChange(pVM, false /*fPortIo*/, false /*fMmio*/);
1975 rc = VINF_SUCCESS;
1976 break;
1977 }
1978 case DBGFBPTYPE_MMIO:
1979 rc = VERR_NOT_IMPLEMENTED;
1980 break;
1981 default:
1982 AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
1983 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1984 }
1985
1986 return rc;
1987}
1988
1989
1990/**
1991 * Worker for DBGFR3BpHit() differentiating on the breakpoint type.
1992 *
1993 * @returns Strict VBox status code.
1994 * @param pVM The cross context VM structure.
1995 * @param pVCpu The vCPU the breakpoint event happened on.
1996 * @param hBp The breakpoint handle.
1997 * @param pBp The breakpoint data.
1998 * @param pBpOwner The breakpoint owner data.
1999 *
2000 * @thread EMT
2001 */
2002static VBOXSTRICTRC dbgfR3BpHit(PVM pVM, PVMCPU pVCpu, DBGFBP hBp, PDBGFBPINT pBp, PCDBGFBPOWNERINT pBpOwner)
2003{
2004 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2005
2006 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
2007 {
2008 case DBGFBPTYPE_REG:
2009 case DBGFBPTYPE_SOFTWARE:
2010 {
2011 if (DBGF_BP_PUB_IS_EXEC_BEFORE(&pBp->Pub))
2012 rcStrict = pBpOwner->pfnBpHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub, DBGF_BP_F_HIT_EXEC_BEFORE);
2013 if (rcStrict == VINF_SUCCESS)
2014 {
2015 /** @todo Need to take more care with the reading there if the breakpoint is
2016 * on the edge of a page. */
2017 uint8_t abInstr[DBGF_BP_INSN_MAX];
2018 RTGCPTR const GCPtrInstr = CPUMGetGuestFlatPC(pVCpu);
2019 rcStrict = PGMPhysSimpleReadGCPtr(pVCpu, &abInstr[0], GCPtrInstr, sizeof(abInstr));
2020 if (rcStrict == VINF_SUCCESS)
2021 {
2022#ifdef VBOX_VMM_TARGET_X86
2023 /* Replace the int3 with the original instruction byte. */
2024 abInstr[0] = pBp->Pub.u.Sw.Arch.x86.bOrg;
2025 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, GCPtrInstr, &abInstr[0], sizeof(abInstr));
2026#else
2027 /** @todo arm64: implement stepping over breakpoint. Fix unnecessary opcode reading. */
2028 AssertFailed();
2029 rcStrict = VERR_NOT_IMPLEMENTED;
2030#endif
2031 if ( rcStrict == VINF_SUCCESS
2032 && DBGF_BP_PUB_IS_EXEC_AFTER(&pBp->Pub))
2033 {
2034 VBOXSTRICTRC rcStrict2 = pBpOwner->pfnBpHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub,
2035 DBGF_BP_F_HIT_EXEC_AFTER);
2036 if (rcStrict2 == VINF_SUCCESS)
2037 return rcStrict;
2038 if (rcStrict2 != VINF_DBGF_BP_HALT)
2039 return VERR_DBGF_BP_OWNER_CALLBACK_WRONG_STATUS;
2040 }
2041 else
2042 return rcStrict;
2043 }
2044 }
2045 break;
2046 }
2047 case DBGFBPTYPE_PORT_IO:
2048 case DBGFBPTYPE_MMIO:
2049 {
2050 pVCpu->dbgf.s.fBpIoActive = false;
2051 rcStrict = pBpOwner->pfnBpIoHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub,
2052 pVCpu->dbgf.s.fBpIoBefore
2053 ? DBGF_BP_F_HIT_EXEC_BEFORE
2054 : DBGF_BP_F_HIT_EXEC_AFTER,
2055 pVCpu->dbgf.s.fBpIoAccess, pVCpu->dbgf.s.uBpIoAddress,
2056 pVCpu->dbgf.s.uBpIoValue);
2057
2058 break;
2059 }
2060 default:
2061 AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
2062 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
2063 }
2064
2065 return rcStrict;
2066}
2067
2068
2069/**
2070 * Creates a new breakpoint owner returning a handle which can be used when setting breakpoints.
2071 *
2072 * @returns VBox status code.
2073 * @retval VERR_DBGF_BP_OWNER_NO_MORE_HANDLES if there are no more free owner handles available.
2074 * @param pUVM The user mode VM handle.
2075 * @param pfnBpHit The R3 callback which is called when a breakpoint with the owner handle is hit.
2076 * @param pfnBpIoHit The R3 callback which is called when a I/O breakpoint with the owner handle is hit.
2077 * @param phBpOwner Where to store the owner handle on success.
2078 *
2079 * @thread Any thread but might defer work to EMT on the first call.
2080 */
2081VMMR3DECL(int) DBGFR3BpOwnerCreate(PUVM pUVM, PFNDBGFBPHIT pfnBpHit, PFNDBGFBPIOHIT pfnBpIoHit, PDBGFBPOWNER phBpOwner)
2082{
2083 /*
2084 * Validate the input.
2085 */
2086 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2087 AssertReturn(pfnBpHit || pfnBpIoHit, VERR_INVALID_PARAMETER);
2088 AssertPtrReturn(phBpOwner, VERR_INVALID_POINTER);
2089
2090 int rc = dbgfR3BpOwnerEnsureInit(pUVM);
2091 AssertRCReturn(rc ,rc);
2092
2093 /* Try to find a free entry in the owner table. */
2094 for (;;)
2095 {
2096 /* Scan the associated bitmap for a free entry. */
2097 int32_t iClr = ASMBitFirstClear(pUVM->dbgf.s.pbmBpOwnersAllocR3, DBGF_BP_OWNER_COUNT_MAX);
2098 if (iClr != -1)
2099 {
2100 /*
2101 * Try to allocate, we could get raced here as well. In that case
2102 * we try again.
2103 */
2104 if (!ASMAtomicBitTestAndSet(pUVM->dbgf.s.pbmBpOwnersAllocR3, iClr))
2105 {
2106 PDBGFBPOWNERINT pBpOwner = &pUVM->dbgf.s.paBpOwnersR3[iClr];
2107 pBpOwner->cRefs = 1;
2108 pBpOwner->pfnBpHitR3 = pfnBpHit;
2109 pBpOwner->pfnBpIoHitR3 = pfnBpIoHit;
2110
2111 *phBpOwner = (DBGFBPOWNER)iClr;
2112 return VINF_SUCCESS;
2113 }
2114 /* else Retry with another spot. */
2115 }
2116 else /* no free entry in bitmap, out of entries. */
2117 {
2118 rc = VERR_DBGF_BP_OWNER_NO_MORE_HANDLES;
2119 break;
2120 }
2121 }
2122
2123 return rc;
2124}
2125
2126
2127/**
2128 * Destroys the owner identified by the given handle.
2129 *
2130 * @returns VBox status code.
2131 * @retval VERR_INVALID_HANDLE if the given owner handle is invalid.
2132 * @retval VERR_DBGF_OWNER_BUSY if there are still breakpoints set with the given owner handle.
2133 * @param pUVM The user mode VM handle.
2134 * @param hBpOwner The breakpoint owner handle to destroy.
2135 */
2136VMMR3DECL(int) DBGFR3BpOwnerDestroy(PUVM pUVM, DBGFBPOWNER hBpOwner)
2137{
2138 /*
2139 * Validate the input.
2140 */
2141 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2142 AssertReturn(hBpOwner != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2143
2144 int rc = dbgfR3BpOwnerEnsureInit(pUVM);
2145 AssertRCReturn(rc ,rc);
2146
2147 PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
2148 if (RT_LIKELY(pBpOwner))
2149 {
2150 if (ASMAtomicReadU32(&pBpOwner->cRefs) == 1)
2151 {
2152 pBpOwner->pfnBpHitR3 = NULL;
2153 ASMAtomicDecU32(&pBpOwner->cRefs);
2154 ASMAtomicBitClear(pUVM->dbgf.s.pbmBpOwnersAllocR3, hBpOwner);
2155 }
2156 else
2157 rc = VERR_DBGF_OWNER_BUSY;
2158 }
2159 else
2160 rc = VERR_INVALID_HANDLE;
2161
2162 return rc;
2163}
2164
2165
2166/**
2167 * Sets a breakpoint (int 3 based).
2168 *
2169 * @returns VBox status code.
2170 * @param pUVM The user mode VM handle.
2171 * @param idSrcCpu The ID of the virtual CPU used for the
2172 * breakpoint address resolution.
2173 * @param pAddress The address of the breakpoint.
2174 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2175 * Use 0 (or 1) if it's gonna trigger at once.
2176 * @param iHitDisable The hit count which disables the breakpoint.
2177 * Use ~(uint64_t) if it's never gonna be disabled.
2178 * @param phBp Where to store the breakpoint handle on success.
2179 *
2180 * @thread Any thread.
2181 */
2182VMMR3DECL(int) DBGFR3BpSetInt3(PUVM pUVM, VMCPUID idSrcCpu, PCDBGFADDRESS pAddress,
2183 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2184{
2185 return DBGFR3BpSetInt3Ex(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, idSrcCpu, pAddress,
2186 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
2187}
2188
2189
2190/**
2191 * Sets a breakpoint (int 3 based) - extended version.
2192 *
2193 * @returns VBox status code.
2194 * @param pUVM The user mode VM handle.
2195 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2196 * @param pvUser Opaque user data to pass in the owner callback.
2197 * @param idSrcCpu The ID of the virtual CPU used for the
2198 * breakpoint address resolution.
2199 * @param pAddress The address of the breakpoint.
2200 * @param fFlags Combination of DBGF_BP_F_XXX.
2201 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2202 * Use 0 (or 1) if it's gonna trigger at once.
2203 * @param iHitDisable The hit count which disables the breakpoint.
2204 * Use ~(uint64_t) if it's never gonna be disabled.
2205 * @param phBp Where to store the breakpoint handle on success.
2206 *
2207 * @thread Any thread.
2208 */
2209VMMR3DECL(int) DBGFR3BpSetInt3Ex(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2210 VMCPUID idSrcCpu, PCDBGFADDRESS pAddress, uint16_t fFlags,
2211 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2212{
2213 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2214 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2215 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
2216 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2217 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2218
2219 int rc = dbgfR3BpEnsureInit(pUVM);
2220 AssertRCReturn(rc, rc);
2221
2222 /*
2223 * Translate & save the breakpoint address into a guest-physical address.
2224 */
2225 RTGCPHYS GCPhysBpAddr = NIL_RTGCPHYS;
2226 rc = DBGFR3AddrToPhys(pUVM, idSrcCpu, pAddress, &GCPhysBpAddr);
2227 if (RT_SUCCESS(rc))
2228 {
2229 /*
2230 * The physical address from DBGFR3AddrToPhys() is the start of the page,
2231 * we need the exact byte offset into the page while writing to it in dbgfR3BpInt3Arm().
2232 */
2233 GCPhysBpAddr |= (pAddress->FlatPtr & X86_PAGE_OFFSET_MASK);
2234
2235 PDBGFBPINT pBp = NULL;
2236 DBGFBP hBp = dbgfR3BpGetByAddr(pUVM, DBGFBPTYPE_SOFTWARE, pAddress->FlatPtr, &pBp);
2237 if ( hBp != NIL_DBGFBP
2238 && pBp->Pub.u.Sw.PhysAddr == GCPhysBpAddr)
2239 {
2240 rc = VINF_SUCCESS;
2241 if ( !DBGF_BP_PUB_IS_ENABLED(&pBp->Pub)
2242 && (fFlags & DBGF_BP_F_ENABLED))
2243 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2244 if (RT_SUCCESS(rc))
2245 {
2246 rc = VINF_DBGF_BP_ALREADY_EXIST;
2247 *phBp = hBp;
2248 }
2249 return rc;
2250 }
2251
2252 rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_SOFTWARE, fFlags, iHitTrigger, iHitDisable, &hBp, &pBp);
2253 if (RT_SUCCESS(rc))
2254 {
2255 pBp->Pub.u.Sw.PhysAddr = GCPhysBpAddr;
2256 pBp->Pub.u.Sw.GCPtr = pAddress->FlatPtr;
2257
2258 /* Add the breakpoint to the lookup tables. */
2259 rc = dbgfR3BpInt3Add(pUVM, hBp, pBp);
2260 if (RT_SUCCESS(rc))
2261 {
2262 /* Enable the breakpoint if requested. */
2263 if (fFlags & DBGF_BP_F_ENABLED)
2264 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2265 if (RT_SUCCESS(rc))
2266 {
2267 *phBp = hBp;
2268 return VINF_SUCCESS;
2269 }
2270
2271 int rc2 = dbgfR3BpInt3Remove(pUVM, hBp, pBp); AssertRC(rc2);
2272 }
2273
2274 dbgfR3BpFree(pUVM, hBp, pBp);
2275 }
2276 }
2277
2278 return rc;
2279}
2280
2281
2282/**
2283 * Sets a register breakpoint.
2284 *
2285 * @returns VBox status code.
2286 * @param pUVM The user mode VM handle.
2287 * @param pAddress The address of the breakpoint.
2288 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2289 * Use 0 (or 1) if it's gonna trigger at once.
2290 * @param iHitDisable The hit count which disables the breakpoint.
2291 * Use ~(uint64_t) if it's never gonna be disabled.
2292 * @param fType The access type (one of the X86_DR7_RW_* defines).
2293 * @param cb The access size - 1,2,4 or 8 (the latter is AMD64 long mode only.
2294 * Must be 1 if fType is X86_DR7_RW_EO.
2295 * @param phBp Where to store the breakpoint handle.
2296 *
2297 * @thread Any thread.
2298 */
2299VMMR3DECL(int) DBGFR3BpSetReg(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger,
2300 uint64_t iHitDisable, uint8_t fType, uint8_t cb, PDBGFBP phBp)
2301{
2302 return DBGFR3BpSetRegEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, pAddress,
2303 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, fType, cb, phBp);
2304}
2305
2306
2307/**
2308 * Sets a register breakpoint - extended version.
2309 *
2310 * @returns VBox status code.
2311 * @param pUVM The user mode VM handle.
2312 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2313 * @param pvUser Opaque user data to pass in the owner callback.
2314 * @param pAddress The address of the breakpoint.
2315 * @param fFlags Combination of DBGF_BP_F_XXX.
2316 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2317 * Use 0 (or 1) if it's gonna trigger at once.
2318 * @param iHitDisable The hit count which disables the breakpoint.
2319 * Use ~(uint64_t) if it's never gonna be disabled.
2320 * @param fType The access type (one of the X86_DR7_RW_* defines).
2321 * @param cb The access size - 1,2,4 or 8 (the latter is AMD64 long mode only.
2322 * Must be 1 if fType is X86_DR7_RW_EO.
2323 * @param phBp Where to store the breakpoint handle.
2324 *
2325 * @thread Any thread.
2326 */
2327VMMR3DECL(int) DBGFR3BpSetRegEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2328 PCDBGFADDRESS pAddress, uint16_t fFlags,
2329 uint64_t iHitTrigger, uint64_t iHitDisable,
2330 uint8_t fType, uint8_t cb, PDBGFBP phBp)
2331{
2332 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2333 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2334 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
2335 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2336 AssertReturn(cb > 0 && cb <= 8 && RT_IS_POWER_OF_TWO(cb), VERR_INVALID_PARAMETER);
2337 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2338 switch (fType)
2339 {
2340 case X86_DR7_RW_EO:
2341 AssertMsgReturn(cb == 1, ("fType=%#x cb=%d != 1\n", fType, cb), VERR_INVALID_PARAMETER);
2342 break;
2343 case X86_DR7_RW_IO:
2344 case X86_DR7_RW_RW:
2345 case X86_DR7_RW_WO:
2346 break;
2347 default:
2348 AssertMsgFailedReturn(("fType=%#x\n", fType), VERR_INVALID_PARAMETER);
2349 }
2350
2351 int rc = dbgfR3BpEnsureInit(pUVM);
2352 AssertRCReturn(rc, rc);
2353
2354 /*
2355 * Check if we've already got a matching breakpoint for that address.
2356 */
2357 PDBGFBPINT pBp = NULL;
2358 DBGFBP hBp = dbgfR3BpGetByAddr(pUVM, DBGFBPTYPE_REG, pAddress->FlatPtr, &pBp);
2359 if ( hBp != NIL_DBGFBP
2360 && pBp->Pub.u.Reg.cb == cb
2361 && pBp->Pub.u.Reg.fType == fType)
2362 {
2363 rc = VINF_SUCCESS;
2364 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub) && (fFlags & DBGF_BP_F_ENABLED))
2365 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2366 /* else: We don't disable it when DBGF_BP_F_ENABLED isn't given. */
2367 if (RT_SUCCESS(rc))
2368 {
2369 rc = VINF_DBGF_BP_ALREADY_EXIST;
2370 *phBp = hBp;
2371 }
2372 return rc;
2373 }
2374
2375 /*
2376 * Allocate new breakpoint.
2377 */
2378 rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_REG, fFlags, iHitTrigger, iHitDisable, &hBp, &pBp);
2379 if (RT_SUCCESS(rc))
2380 {
2381 pBp->Pub.u.Reg.GCPtr = pAddress->FlatPtr;
2382 pBp->Pub.u.Reg.fType = fType;
2383 pBp->Pub.u.Reg.cb = cb;
2384 pBp->Pub.u.Reg.iReg = UINT8_MAX;
2385 ASMCompilerBarrier();
2386
2387 /* Assign the proper hardware breakpoint. */
2388 rc = dbgfR3BpRegAssign(pUVM->pVM, hBp, pBp);
2389 if (RT_SUCCESS(rc))
2390 {
2391 /* Arm the breakpoint. */
2392 if (fFlags & DBGF_BP_F_ENABLED)
2393 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2394 if (RT_SUCCESS(rc))
2395 {
2396 *phBp = hBp;
2397 return VINF_SUCCESS;
2398 }
2399
2400 int rc2 = dbgfR3BpRegRemove(pUVM->pVM, hBp, pBp);
2401 AssertRC(rc2); RT_NOREF(rc2);
2402 }
2403
2404 dbgfR3BpFree(pUVM, hBp, pBp);
2405 }
2406
2407 return rc;
2408}
2409
2410
2411/**
2412 * This is only kept for now to not mess with the debugger implementation at this point,
2413 * recompiler breakpoints are not supported anymore (IEM has some API but it isn't implemented
2414 * and should probably be merged with the DBGF breakpoints).
2415 */
2416VMMR3DECL(int) DBGFR3BpSetREM(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger,
2417 uint64_t iHitDisable, PDBGFBP phBp)
2418{
2419 RT_NOREF(pUVM, pAddress, iHitTrigger, iHitDisable, phBp);
2420 return VERR_NOT_SUPPORTED;
2421}
2422
2423
2424/**
2425 * Sets an I/O port breakpoint.
2426 *
2427 * @returns VBox status code.
2428 * @param pUVM The user mode VM handle.
2429 * @param uPort The first I/O port.
2430 * @param cPorts The number of I/O ports, see DBGFBPIOACCESS_XXX.
2431 * @param fAccess The access we want to break on.
2432 * @param iHitTrigger The hit count at which the breakpoint start
2433 * triggering. Use 0 (or 1) if it's gonna trigger at
2434 * once.
2435 * @param iHitDisable The hit count which disables the breakpoint.
2436 * Use ~(uint64_t) if it's never gonna be disabled.
2437 * @param phBp Where to store the breakpoint handle.
2438 *
2439 * @thread Any thread.
2440 */
2441VMMR3DECL(int) DBGFR3BpSetPortIo(PUVM pUVM, RTIOPORT uPort, RTIOPORT cPorts, uint32_t fAccess,
2442 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2443{
2444 return DBGFR3BpSetPortIoEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, uPort, cPorts, fAccess,
2445 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
2446}
2447
2448
2449/**
2450 * Sets an I/O port breakpoint - extended version.
2451 *
2452 * @returns VBox status code.
2453 * @param pUVM The user mode VM handle.
2454 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2455 * @param pvUser Opaque user data to pass in the owner callback.
2456 * @param uPort The first I/O port.
2457 * @param cPorts The number of I/O ports, see DBGFBPIOACCESS_XXX.
2458 * @param fAccess The access we want to break on.
2459 * @param fFlags Combination of DBGF_BP_F_XXX.
2460 * @param iHitTrigger The hit count at which the breakpoint start
2461 * triggering. Use 0 (or 1) if it's gonna trigger at
2462 * once.
2463 * @param iHitDisable The hit count which disables the breakpoint.
2464 * Use ~(uint64_t) if it's never gonna be disabled.
2465 * @param phBp Where to store the breakpoint handle.
2466 *
2467 * @thread Any thread.
2468 */
2469VMMR3DECL(int) DBGFR3BpSetPortIoEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2470 RTIOPORT uPort, RTIOPORT cPorts, uint32_t fAccess,
2471 uint32_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2472{
2473 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2474 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2475 AssertReturn(!(fAccess & ~DBGFBPIOACCESS_VALID_MASK_PORT_IO), VERR_INVALID_FLAGS);
2476 AssertReturn(fAccess, VERR_INVALID_FLAGS);
2477 AssertReturn(!(fFlags & ~DBGF_BP_F_VALID_MASK), VERR_INVALID_FLAGS);
2478 AssertReturn(fFlags, VERR_INVALID_FLAGS);
2479 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2480 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2481 AssertReturn(cPorts > 0, VERR_OUT_OF_RANGE);
2482 AssertReturn((RTIOPORT)(uPort + (cPorts - 1)) >= uPort, VERR_OUT_OF_RANGE);
2483
2484 int rc = dbgfR3BpPortIoEnsureInit(pUVM);
2485 AssertRCReturn(rc, rc);
2486
2487 PDBGFBPINT pBp = NULL;
2488 DBGFBP hBp = dbgfR3BpPortIoGetByRange(pUVM, uPort, cPorts, &pBp);
2489 if ( hBp != NIL_DBGFBP
2490 && pBp->Pub.u.PortIo.uPort == uPort
2491 && pBp->Pub.u.PortIo.cPorts == cPorts
2492 && pBp->Pub.u.PortIo.fAccess == fAccess)
2493 {
2494 rc = VINF_SUCCESS;
2495 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2496 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2497 if (RT_SUCCESS(rc))
2498 {
2499 rc = VINF_DBGF_BP_ALREADY_EXIST;
2500 *phBp = hBp;
2501 }
2502 return rc;
2503 }
2504
2505 rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_PORT_IO, fFlags, iHitTrigger, iHitDisable, &hBp, &pBp);
2506 if (RT_SUCCESS(rc))
2507 {
2508 pBp->Pub.u.PortIo.uPort = uPort;
2509 pBp->Pub.u.PortIo.cPorts = cPorts;
2510 pBp->Pub.u.PortIo.fAccess = fAccess;
2511
2512 /* Add the breakpoint to the lookup tables. */
2513 rc = dbgfR3BpPortIoAdd(pUVM, hBp, pBp);
2514 if (RT_SUCCESS(rc))
2515 {
2516 /* Enable the breakpoint if requested. */
2517 if (fFlags & DBGF_BP_F_ENABLED)
2518 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2519 if (RT_SUCCESS(rc))
2520 {
2521 *phBp = hBp;
2522 return VINF_SUCCESS;
2523 }
2524
2525 int rc2 = dbgfR3BpPortIoRemove(pUVM, hBp, pBp); AssertRC(rc2);
2526 }
2527
2528 dbgfR3BpFree(pUVM, hBp, pBp);
2529 }
2530
2531 return rc;
2532}
2533
2534
2535/**
2536 * Sets a memory mapped I/O breakpoint.
2537 *
2538 * @returns VBox status code.
2539 * @param pUVM The user mode VM handle.
2540 * @param GCPhys The first MMIO address.
2541 * @param cb The size of the MMIO range to break on.
2542 * @param fAccess The access we want to break on.
2543 * @param iHitTrigger The hit count at which the breakpoint start
2544 * triggering. Use 0 (or 1) if it's gonna trigger at
2545 * once.
2546 * @param iHitDisable The hit count which disables the breakpoint.
2547 * Use ~(uint64_t) if it's never gonna be disabled.
2548 * @param phBp Where to store the breakpoint handle.
2549 *
2550 * @thread Any thread.
2551 */
2552VMMR3DECL(int) DBGFR3BpSetMmio(PUVM pUVM, RTGCPHYS GCPhys, uint32_t cb, uint32_t fAccess,
2553 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2554{
2555 return DBGFR3BpSetMmioEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, GCPhys, cb, fAccess,
2556 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
2557}
2558
2559
2560/**
2561 * Sets a memory mapped I/O breakpoint - extended version.
2562 *
2563 * @returns VBox status code.
2564 * @param pUVM The user mode VM handle.
2565 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2566 * @param pvUser Opaque user data to pass in the owner callback.
2567 * @param GCPhys The first MMIO address.
2568 * @param cb The size of the MMIO range to break on.
2569 * @param fAccess The access we want to break on.
2570 * @param fFlags Combination of DBGF_BP_F_XXX.
2571 * @param iHitTrigger The hit count at which the breakpoint start
2572 * triggering. Use 0 (or 1) if it's gonna trigger at
2573 * once.
2574 * @param iHitDisable The hit count which disables the breakpoint.
2575 * Use ~(uint64_t) if it's never gonna be disabled.
2576 * @param phBp Where to store the breakpoint handle.
2577 *
2578 * @thread Any thread.
2579 */
2580VMMR3DECL(int) DBGFR3BpSetMmioEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2581 RTGCPHYS GCPhys, uint32_t cb, uint32_t fAccess,
2582 uint32_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2583{
2584 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2585 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2586 AssertReturn(!(fAccess & ~DBGFBPIOACCESS_VALID_MASK_MMIO), VERR_INVALID_FLAGS);
2587 AssertReturn(fAccess, VERR_INVALID_FLAGS);
2588 AssertReturn(!(fFlags & ~DBGF_BP_F_VALID_MASK), VERR_INVALID_FLAGS);
2589 AssertReturn(fFlags, VERR_INVALID_FLAGS);
2590 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2591 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2592 AssertReturn(cb, VERR_OUT_OF_RANGE);
2593 AssertReturn(GCPhys + cb < GCPhys, VERR_OUT_OF_RANGE);
2594
2595 int rc = dbgfR3BpEnsureInit(pUVM);
2596 AssertRCReturn(rc, rc);
2597
2598 return VERR_NOT_IMPLEMENTED;
2599}
2600
2601
2602/**
2603 * Clears a breakpoint.
2604 *
2605 * @returns VBox status code.
2606 * @param pUVM The user mode VM handle.
2607 * @param hBp The handle of the breakpoint which should be removed (cleared).
2608 *
2609 * @thread Any thread.
2610 */
2611VMMR3DECL(int) DBGFR3BpClear(PUVM pUVM, DBGFBP hBp)
2612{
2613 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2614 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2615
2616 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2617 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
2618
2619 /* Disarm the breakpoint when it is enabled. */
2620 if (DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2621 {
2622 int rc = dbgfR3BpDisarm(pUVM, hBp, pBp);
2623 AssertRC(rc);
2624 }
2625
2626 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
2627 {
2628 case DBGFBPTYPE_REG:
2629 {
2630 int rc = dbgfR3BpRegRemove(pUVM->pVM, hBp, pBp);
2631 AssertRC(rc);
2632 break;
2633 }
2634 case DBGFBPTYPE_SOFTWARE:
2635 {
2636 int rc = dbgfR3BpInt3Remove(pUVM, hBp, pBp);
2637 AssertRC(rc);
2638 break;
2639 }
2640 case DBGFBPTYPE_PORT_IO:
2641 {
2642 int rc = dbgfR3BpPortIoRemove(pUVM, hBp, pBp);
2643 AssertRC(rc);
2644 break;
2645 }
2646 default:
2647 break;
2648 }
2649
2650 dbgfR3BpFree(pUVM, hBp, pBp);
2651 return VINF_SUCCESS;
2652}
2653
2654
2655/**
2656 * Enables a breakpoint.
2657 *
2658 * @returns VBox status code.
2659 * @param pUVM The user mode VM handle.
2660 * @param hBp The handle of the breakpoint which should be enabled.
2661 *
2662 * @thread Any thread.
2663 */
2664VMMR3DECL(int) DBGFR3BpEnable(PUVM pUVM, DBGFBP hBp)
2665{
2666 /*
2667 * Validate the input.
2668 */
2669 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2670 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2671
2672 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2673 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
2674
2675 int rc;
2676 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2677 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2678 else
2679 rc = VINF_DBGF_BP_ALREADY_ENABLED;
2680
2681 return rc;
2682}
2683
2684
2685/**
2686 * Disables a breakpoint.
2687 *
2688 * @returns VBox status code.
2689 * @param pUVM The user mode VM handle.
2690 * @param hBp The handle of the breakpoint which should be disabled.
2691 *
2692 * @thread Any thread.
2693 */
2694VMMR3DECL(int) DBGFR3BpDisable(PUVM pUVM, DBGFBP hBp)
2695{
2696 /*
2697 * Validate the input.
2698 */
2699 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2700 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2701
2702 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2703 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
2704
2705 int rc;
2706 if (DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2707 rc = dbgfR3BpDisarm(pUVM, hBp, pBp);
2708 else
2709 rc = VINF_DBGF_BP_ALREADY_DISABLED;
2710
2711 return rc;
2712}
2713
2714
2715/**
2716 * Enumerate the breakpoints.
2717 *
2718 * @returns VBox status code.
2719 * @param pUVM The user mode VM handle.
2720 * @param pfnCallback The callback function.
2721 * @param pvUser The user argument to pass to the callback.
2722 *
2723 * @thread Any thread.
2724 */
2725VMMR3DECL(int) DBGFR3BpEnum(PUVM pUVM, PFNDBGFBPENUM pfnCallback, void *pvUser)
2726{
2727 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2728
2729 for (uint32_t idChunk = 0; idChunk < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); idChunk++)
2730 {
2731 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
2732
2733 if (pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID)
2734 break; /* Stop here as the first non allocated chunk means there is no one allocated afterwards as well. */
2735
2736 if (pBpChunk->cBpsFree < DBGF_BP_COUNT_PER_CHUNK)
2737 {
2738 /* Scan the bitmap for allocated entries. */
2739 int32_t iAlloc = ASMBitFirstSet(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK);
2740 if (iAlloc != -1)
2741 {
2742 do
2743 {
2744 DBGFBP hBp = DBGF_BP_HND_CREATE(idChunk, (uint32_t)iAlloc);
2745 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2746
2747 /* Make a copy of the breakpoints public data to have a consistent view. */
2748 DBGFBPPUB BpPub;
2749 BpPub.cHits = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.cHits);
2750 BpPub.iHitTrigger = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.iHitTrigger);
2751 BpPub.iHitDisable = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.iHitDisable);
2752 BpPub.hOwner = ASMAtomicReadU32((volatile uint32_t *)&pBp->Pub.hOwner);
2753 BpPub.u16Type = ASMAtomicReadU16((volatile uint16_t *)&pBp->Pub.u16Type); /* Actually constant. */
2754 BpPub.fFlags = ASMAtomicReadU16((volatile uint16_t *)&pBp->Pub.fFlags);
2755 memcpy(&BpPub.u, &pBp->Pub.u, sizeof(pBp->Pub.u)); /* Is constant after allocation. */
2756
2757 /* Check if a removal raced us. */
2758 if (ASMBitTest(pBpChunk->pbmAlloc, iAlloc))
2759 {
2760 int rc = pfnCallback(pUVM, pvUser, hBp, &BpPub);
2761 if (RT_FAILURE(rc) || rc == VINF_CALLBACK_RETURN)
2762 return rc;
2763 }
2764
2765 iAlloc = ASMBitNextSet(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK, iAlloc);
2766 } while (iAlloc != -1);
2767 }
2768 }
2769 }
2770
2771 return VINF_SUCCESS;
2772}
2773
2774
2775/**
2776 * Called whenever a breakpoint event needs to be serviced in ring-3 to decide what to do.
2777 *
2778 * @returns VBox status code.
2779 * @param pVM The cross context VM structure.
2780 * @param pVCpu The vCPU the breakpoint event happened on.
2781 *
2782 * @thread EMT
2783 */
2784VMMR3_INT_DECL(int) DBGFR3BpHit(PVM pVM, PVMCPU pVCpu)
2785{
2786 /* Send it straight into the debugger?. */
2787 if (pVCpu->dbgf.s.fBpInvokeOwnerCallback)
2788 {
2789 DBGFBP hBp = pVCpu->dbgf.s.hBpActive;
2790 pVCpu->dbgf.s.fBpInvokeOwnerCallback = false;
2791
2792 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pVM->pUVM, hBp);
2793 AssertReturn(pBp, VERR_DBGF_BP_IPE_9);
2794
2795 /* Resolve owner (can be NIL_DBGFBPOWNER) and invoke callback if there is one. */
2796 if (pBp->Pub.hOwner != NIL_DBGFBPOWNER)
2797 {
2798 PCDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pVM->pUVM, pBp->Pub.hOwner);
2799 if (pBpOwner)
2800 {
2801 VBOXSTRICTRC rcStrict = dbgfR3BpHit(pVM, pVCpu, hBp, pBp, pBpOwner);
2802 if (VBOXSTRICTRC_VAL(rcStrict) == VINF_SUCCESS)
2803 {
2804 pVCpu->dbgf.s.hBpActive = NIL_DBGFBP;
2805 return VINF_SUCCESS;
2806 }
2807 if (VBOXSTRICTRC_VAL(rcStrict) != VINF_DBGF_BP_HALT) /* Guru meditation. */
2808 return VERR_DBGF_BP_OWNER_CALLBACK_WRONG_STATUS;
2809 /* else: Halt in the debugger. */
2810 }
2811 }
2812 }
2813
2814 return DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
2815}
2816
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette