VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGFR3Bp.cpp@ 107200

Last change on this file since 107200 was 107194, checked in by vboxsync, 2 months ago

VMM: More adjustments for VBOX_WITH_ONLY_PGM_NEM_MODE, VBOX_WITH_MINIMAL_R0, VBOX_WITH_HWVIRT and such. jiraref:VBP-1466

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 107.8 KB
Line 
1/* $Id: DBGFR3Bp.cpp 107194 2024-11-29 14:47:06Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, Breakpoint Management.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_dbgf_bp DBGF - The Debugger Facility, Breakpoint Management
30 *
31 * The debugger facilities breakpoint managers purpose is to efficiently manage
32 * large amounts of breakpoints for various use cases like dtrace like operations
33 * or execution flow tracing for instance. Especially execution flow tracing can
34 * require thousands of breakpoints which need to be managed efficiently to not slow
35 * down guest operation too much. Before the rewrite starting end of 2020, DBGF could
36 * only handle 32 breakpoints (+ 4 hardware assisted breakpoints). The new
37 * manager is supposed to be able to handle up to one million breakpoints.
38 *
39 * @see grp_dbgf
40 *
41 *
42 * @section sec_dbgf_bp_owner Breakpoint owners
43 *
44 * A single breakpoint owner has a mandatory ring-3 callback and an optional ring-0
45 * callback assigned which is called whenever a breakpoint with the owner assigned is hit.
46 * The common part of the owner is managed by a single table mapped into both ring-0
47 * and ring-3 and the handle being the index into the table. This allows resolving
48 * the handle to the internal structure efficiently. Searching for a free entry is
49 * done using a bitmap indicating free and occupied entries. For the optional
50 * ring-0 owner part there is a separate ring-0 only table for security reasons.
51 *
52 * The callback of the owner can be used to gather and log guest state information
53 * and decide whether to continue guest execution or stop and drop into the debugger.
54 * Breakpoints which don't have an owner assigned will always drop the VM right into
55 * the debugger.
56 *
57 *
58 * @section sec_dbgf_bp_bps Breakpoints
59 *
60 * Breakpoints are referenced by an opaque handle which acts as an index into a global table
61 * mapped into ring-3 and ring-0. Each entry contains the necessary state to manage the breakpoint
62 * like trigger conditions, type, owner, etc. If an owner is given an optional opaque user argument
63 * can be supplied which is passed in the respective owner callback. For owners with ring-0 callbacks
64 * a dedicated ring-0 table is held saving possible ring-0 user arguments.
65 *
66 * To keep memory consumption under control and still support large amounts of
67 * breakpoints the table is split into fixed sized chunks and the chunk index and index
68 * into the chunk can be derived from the handle with only a few logical operations.
69 *
70 *
71 * @section sec_dbgf_bp_resolv Resolving breakpoint addresses
72 *
73 * Whenever a \#BP(0) event is triggered DBGF needs to decide whether the event originated
74 * from within the guest or whether a DBGF breakpoint caused it. This has to happen as fast
75 * as possible. The following scheme is employed to achieve this:
76 *
77 * @verbatim
78 * 7 6 5 4 3 2 1 0
79 * +---+---+---+---+---+---+---+---+
80 * | | | | | | | | | BP address
81 * +---+---+---+---+---+---+---+---+
82 * \_____________________/ \_____/
83 * | |
84 * | +---------------+
85 * | |
86 * BP table | v
87 * +------------+ | +-----------+
88 * | hBp 0 | | X <- | 0 | xxxxx |
89 * | hBp 1 | <----------------+------------------------ | 1 | hBp 1 |
90 * | | | +--- | 2 | idxL2 |
91 * | hBp <m> | <---+ v | |...| ... |
92 * | | | +-----------+ | |...| ... |
93 * | | | | | | |...| ... |
94 * | hBp <n> | <-+ +----- | +> leaf | | | . |
95 * | | | | | | | | . |
96 * | | | | + root + | <------------+ | . |
97 * | | | | | | +-----------+
98 * | | +------- | leaf<+ | L1: 65536
99 * | . | | . |
100 * | . | | . |
101 * | . | | . |
102 * +------------+ +-----------+
103 * L2 idx BST
104 * @endverbatim
105 *
106 * -# Take the lowest 16 bits of the breakpoint address and use it as an direct index
107 * into the L1 table. The L1 table is contiguous and consists of 4 byte entries
108 * resulting in 256KiB of memory used. The topmost 4 bits indicate how to proceed
109 * and the meaning of the remaining 28bits depends on the topmost 4 bits:
110 * - A 0 type entry means no breakpoint is registered with the matching lowest 16bits,
111 * so forward the event to the guest.
112 * - A 1 in the topmost 4 bits means that the remaining 28bits directly denote a breakpoint
113 * handle which can be resolved by extracting the chunk index and index into the chunk
114 * of the global breakpoint table. If the address matches the breakpoint is processed
115 * according to the configuration. Otherwise the breakpoint is again forwarded to the guest.
116 * - A 2 in the topmost 4 bits means that there are multiple breakpoints registered
117 * matching the lowest 16bits and the search must continue in the L2 table with the
118 * remaining 28bits acting as an index into the L2 table indicating the search root.
119 * -# The L2 table consists of multiple index based binary search trees, there is one for each reference
120 * from the L1 table. The key for the table are the upper 6 bytes of the breakpoint address
121 * used for searching. This tree is traversed until either a matching address is found and
122 * the breakpoint is being processed or again forwarded to the guest if it isn't successful.
123 * Each entry in the L2 table is 16 bytes big and densly packed to avoid excessive memory usage.
124 *
125 * @section sec_dbgf_bp_ioport Handling I/O port breakpoints
126 *
127 * Because of the limited amount of I/O ports being available (65536) a single table with 65536 entries,
128 * each 4 byte big will be allocated. This amounts to 256KiB of memory being used additionally as soon as
129 * an I/O breakpoint is enabled. The entries contain the breakpoint handle directly allowing only one breakpoint
130 * per port right now, which is something we accept as a limitation right now to keep things relatively simple.
131 * When there is at least one I/O breakpoint active IOM will be notified and it will afterwards call the DBGF API
132 * whenever the guest does an I/O port access to decide whether a breakpoint was hit. This keeps the overhead small
133 * when there is no I/O port breakpoint enabled.
134 *
135 * @section sec_dbgf_bp_note Random thoughts and notes for the implementation
136 *
137 * - The assumption for this approach is that the lowest 16bits of the breakpoint address are
138 * hopefully the ones being the most varying ones across breakpoints so the traversal
139 * can skip the L2 table in most of the cases. Even if the L2 table must be taken the
140 * individual trees should be quite shallow resulting in low overhead when walking it
141 * (though only real world testing can assert this assumption).
142 * - Index based tables and trees are used instead of pointers because the tables
143 * are always mapped into ring-0 and ring-3 with different base addresses.
144 * - Efficent breakpoint allocation is done by having a global bitmap indicating free
145 * and occupied breakpoint entries. Same applies for the L2 BST table.
146 * - Special care must be taken when modifying the L1 and L2 tables as other EMTs
147 * might still access it (want to try a lockless approach first using
148 * atomic updates, have to resort to locking if that turns out to be too difficult).
149 * - Each BP entry is supposed to be 64 byte big and each chunk should contain 65536
150 * breakpoints which results in 4MiB for each chunk plus the allocation bitmap.
151 * - ring-0 has to take special care when traversing the L2 BST to not run into cycles
152 * and do strict bounds checking before accessing anything. The L1 and L2 table
153 * are written to from ring-3 only. Same goes for the breakpoint table with the
154 * exception being the opaque user argument for ring-0 which is stored in ring-0 only
155 * memory.
156 */
157
158
159/*********************************************************************************************************************************
160* Header Files *
161*********************************************************************************************************************************/
162#define LOG_GROUP LOG_GROUP_DBGF
163#define VMCPU_INCL_CPUM_GST_CTX
164#include <VBox/vmm/cpum.h>
165#include <VBox/vmm/dbgf.h>
166#include <VBox/vmm/selm.h>
167#include <VBox/vmm/iem.h>
168#include <VBox/vmm/mm.h>
169#include <VBox/vmm/iom.h>
170#include <VBox/vmm/hm.h>
171#include "DBGFInternal.h"
172#include <VBox/vmm/vm.h>
173#include <VBox/vmm/uvm.h>
174
175#include <VBox/err.h>
176#include <VBox/log.h>
177#include <iprt/assert.h>
178#include <iprt/mem.h>
179#if defined(VBOX_VMM_TARGET_ARMV8)
180# include <iprt/armv8.h>
181#endif
182
183#include "DBGFInline.h"
184
185
186/*********************************************************************************************************************************
187* Structures and Typedefs *
188*********************************************************************************************************************************/
189
190
191/*********************************************************************************************************************************
192* Internal Functions *
193*********************************************************************************************************************************/
194RT_C_DECLS_BEGIN
195RT_C_DECLS_END
196
197
198/**
199 * Initialize the breakpoint mangement.
200 *
201 * @returns VBox status code.
202 * @param pUVM The user mode VM handle.
203 */
204DECLHIDDEN(int) dbgfR3BpInit(PUVM pUVM)
205{
206 PVM pVM = pUVM->pVM;
207
208 //pUVM->dbgf.s.paBpOwnersR3 = NULL;
209 //pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
210
211 /* Init hardware breakpoint states. */
212 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
213 {
214 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
215
216 AssertCompileSize(DBGFBP, sizeof(uint32_t));
217 pHwBp->hBp = NIL_DBGFBP;
218 //pHwBp->fEnabled = false;
219 }
220
221 /* Now the global breakpoint table chunks. */
222 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
223 {
224 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
225
226 //pBpChunk->pBpBaseR3 = NULL;
227 //pBpChunk->pbmAlloc = NULL;
228 //pBpChunk->cBpsFree = 0;
229 pBpChunk->idChunk = DBGF_BP_CHUNK_ID_INVALID; /* Not allocated. */
230 }
231
232 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
233 {
234 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
235
236 //pL2Chunk->pL2BaseR3 = NULL;
237 //pL2Chunk->pbmAlloc = NULL;
238 //pL2Chunk->cFree = 0;
239 pL2Chunk->idChunk = DBGF_BP_CHUNK_ID_INVALID; /* Not allocated. */
240 }
241
242 //pUVM->dbgf.s.paBpLocL1R3 = NULL;
243 //pUVM->dbgf.s.paBpLocPortIoR3 = NULL;
244 pUVM->dbgf.s.hMtxBpL2Wr = NIL_RTSEMFASTMUTEX;
245 return RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxBpL2Wr);
246}
247
248
249/**
250 * Terminates the breakpoint mangement.
251 *
252 * @returns VBox status code.
253 * @param pUVM The user mode VM handle.
254 */
255DECLHIDDEN(int) dbgfR3BpTerm(PUVM pUVM)
256{
257 if (pUVM->dbgf.s.pbmBpOwnersAllocR3)
258 {
259 RTMemFree((void *)pUVM->dbgf.s.pbmBpOwnersAllocR3);
260 pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
261 }
262
263 /* Free all allocated chunk bitmaps (the chunks itself are destroyed during ring-0 VM destruction). */
264 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
265 {
266 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
267
268 if (pBpChunk->idChunk != DBGF_BP_CHUNK_ID_INVALID)
269 {
270 AssertPtr(pBpChunk->pbmAlloc);
271 RTMemFree((void *)pBpChunk->pbmAlloc);
272 pBpChunk->pbmAlloc = NULL;
273 pBpChunk->idChunk = DBGF_BP_CHUNK_ID_INVALID;
274 }
275 }
276
277 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
278 {
279 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
280
281 if (pL2Chunk->idChunk != DBGF_BP_CHUNK_ID_INVALID)
282 {
283 AssertPtr(pL2Chunk->pbmAlloc);
284 RTMemFree((void *)pL2Chunk->pbmAlloc);
285 pL2Chunk->pbmAlloc = NULL;
286 pL2Chunk->idChunk = DBGF_BP_CHUNK_ID_INVALID;
287 }
288 }
289
290 if (pUVM->dbgf.s.hMtxBpL2Wr != NIL_RTSEMFASTMUTEX)
291 {
292 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxBpL2Wr);
293 pUVM->dbgf.s.hMtxBpL2Wr = NIL_RTSEMFASTMUTEX;
294 }
295
296 return VINF_SUCCESS;
297}
298
299
300/**
301 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
302 */
303static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
304{
305 RT_NOREF(pvUser);
306
307 VMCPU_ASSERT_EMT(pVCpu);
308 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
309
310 /*
311 * The initialization will be done on EMT(0). It is possible that multiple
312 * initialization attempts are done because dbgfR3BpEnsureInit() can be called
313 * from racing non EMT threads when trying to set a breakpoint for the first time.
314 * Just fake success if the L1 is already present which means that a previous rendezvous
315 * successfully initialized the breakpoint manager.
316 */
317 PUVM pUVM = pVM->pUVM;
318 if ( pVCpu->idCpu == 0
319 && !pUVM->dbgf.s.paBpLocL1R3)
320 {
321#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
322 if (!SUPR3IsDriverless())
323 {
324 DBGFBPINITREQ Req;
325 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
326 Req.Hdr.cbReq = sizeof(Req);
327 Req.paBpLocL1R3 = NULL;
328 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_INIT, 0 /*u64Arg*/, &Req.Hdr);
329 AssertLogRelMsgRCReturn(rc, ("VMMR0_DO_DBGF_BP_INIT failed: %Rrc\n", rc), rc);
330 pUVM->dbgf.s.paBpLocL1R3 = Req.paBpLocL1R3;
331 }
332 else
333#endif
334 {
335 /* Driverless: Do dbgfR0BpInitWorker here, ring-3 style. */
336 uint32_t const cbL1Loc = RT_ALIGN_32(UINT16_MAX * sizeof(uint32_t), HOST_PAGE_SIZE);
337 pUVM->dbgf.s.paBpLocL1R3 = (uint32_t *)RTMemPageAllocZ(cbL1Loc);
338 AssertLogRelMsgReturn(pUVM->dbgf.s.paBpLocL1R3, ("cbL1Loc=%#x\n", cbL1Loc), VERR_NO_PAGE_MEMORY);
339 }
340 }
341
342 return VINF_SUCCESS;
343}
344
345
346/**
347 * Ensures that the breakpoint manager is fully initialized.
348 *
349 * @returns VBox status code.
350 * @param pUVM The user mode VM handle.
351 *
352 * @thread Any thread.
353 */
354static int dbgfR3BpEnsureInit(PUVM pUVM)
355{
356 /* If the L1 lookup table is allocated initialization succeeded before. */
357 if (RT_LIKELY(pUVM->dbgf.s.paBpLocL1R3))
358 return VINF_SUCCESS;
359
360 /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
361 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpInitEmtWorker, NULL /*pvUser*/);
362}
363
364
365/**
366 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
367 */
368static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpPortIoInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
369{
370 RT_NOREF(pvUser);
371
372 VMCPU_ASSERT_EMT(pVCpu);
373 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
374
375 /*
376 * The initialization will be done on EMT(0). It is possible that multiple
377 * initialization attempts are done because dbgfR3BpPortIoEnsureInit() can be called
378 * from racing non EMT threads when trying to set a breakpoint for the first time.
379 * Just fake success if the L1 is already present which means that a previous rendezvous
380 * successfully initialized the breakpoint manager.
381 */
382 PUVM pUVM = pVM->pUVM;
383 if ( pVCpu->idCpu == 0
384 && !pUVM->dbgf.s.paBpLocPortIoR3)
385 {
386#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
387 if (!SUPR3IsDriverless())
388 {
389 DBGFBPINITREQ Req;
390 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
391 Req.Hdr.cbReq = sizeof(Req);
392 Req.paBpLocL1R3 = NULL;
393 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_PORTIO_INIT, 0 /*u64Arg*/, &Req.Hdr);
394 AssertLogRelMsgRCReturn(rc, ("VMMR0_DO_DBGF_BP_PORTIO_INIT failed: %Rrc\n", rc), rc);
395 pUVM->dbgf.s.paBpLocPortIoR3 = Req.paBpLocL1R3;
396 }
397 else
398#endif
399 {
400 /* Driverless: Do dbgfR0BpPortIoInitWorker here, ring-3 style. */
401 uint32_t const cbPortIoLoc = RT_ALIGN_32(UINT16_MAX * sizeof(uint32_t), HOST_PAGE_SIZE);
402 pUVM->dbgf.s.paBpLocPortIoR3 = (uint32_t *)RTMemPageAllocZ(cbPortIoLoc);
403 AssertLogRelMsgReturn(pUVM->dbgf.s.paBpLocPortIoR3, ("cbPortIoLoc=%#x\n", cbPortIoLoc), VERR_NO_PAGE_MEMORY);
404 }
405 }
406
407 return VINF_SUCCESS;
408}
409
410
411/**
412 * Ensures that the breakpoint manager is initialized to handle I/O port breakpoint.
413 *
414 * @returns VBox status code.
415 * @param pUVM The user mode VM handle.
416 *
417 * @thread Any thread.
418 */
419static int dbgfR3BpPortIoEnsureInit(PUVM pUVM)
420{
421 /* If the L1 lookup table is allocated initialization succeeded before. */
422 if (RT_LIKELY(pUVM->dbgf.s.paBpLocPortIoR3))
423 return VINF_SUCCESS;
424
425 /* Ensure that the breakpoint manager is initialized. */
426 int rc = dbgfR3BpEnsureInit(pUVM);
427 if (RT_FAILURE(rc))
428 return rc;
429
430 /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
431 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpPortIoInitEmtWorker, NULL /*pvUser*/);
432}
433
434
435/**
436 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
437 */
438static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpOwnerInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
439{
440 RT_NOREF(pvUser);
441
442 VMCPU_ASSERT_EMT(pVCpu);
443 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
444
445 /*
446 * The initialization will be done on EMT(0). It is possible that multiple
447 * initialization attempts are done because dbgfR3BpOwnerEnsureInit() can be called
448 * from racing non EMT threads when trying to create a breakpoint owner for the first time.
449 * Just fake success if the pointers are initialized already, meaning that a previous rendezvous
450 * successfully initialized the breakpoint owner table.
451 */
452 int rc = VINF_SUCCESS;
453 PUVM pUVM = pVM->pUVM;
454 if ( pVCpu->idCpu == 0
455 && !pUVM->dbgf.s.pbmBpOwnersAllocR3)
456 {
457 AssertCompile(!(DBGF_BP_OWNER_COUNT_MAX % 64));
458 pUVM->dbgf.s.pbmBpOwnersAllocR3 = RTMemAllocZ(DBGF_BP_OWNER_COUNT_MAX / 8);
459 if (pUVM->dbgf.s.pbmBpOwnersAllocR3)
460 {
461#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
462 if (!SUPR3IsDriverless())
463 {
464 DBGFBPOWNERINITREQ Req;
465 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
466 Req.Hdr.cbReq = sizeof(Req);
467 Req.paBpOwnerR3 = NULL;
468 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_OWNER_INIT, 0 /*u64Arg*/, &Req.Hdr);
469 if (RT_SUCCESS(rc))
470 {
471 pUVM->dbgf.s.paBpOwnersR3 = (PDBGFBPOWNERINT)Req.paBpOwnerR3;
472 return VINF_SUCCESS;
473 }
474 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_OWNER_INIT failed: %Rrc\n", rc));
475 }
476 else
477#endif
478 {
479 /* Driverless: Do dbgfR0BpOwnerInitWorker here, ring-3 style. */
480 uint32_t const cbBpOwnerR3 = RT_ALIGN_32(DBGF_BP_OWNER_COUNT_MAX * sizeof(DBGFBPOWNERINT), HOST_PAGE_SIZE);
481 pUVM->dbgf.s.paBpOwnersR3 = (PDBGFBPOWNERINT)RTMemPageAllocZ(cbBpOwnerR3);
482 if (pUVM->dbgf.s.paBpOwnersR3)
483 return VINF_SUCCESS;
484 AssertLogRelMsgFailed(("cbBpOwnerR3=%#x\n", cbBpOwnerR3));
485 rc = VERR_NO_PAGE_MEMORY;
486 }
487
488 RTMemFree((void *)pUVM->dbgf.s.pbmBpOwnersAllocR3);
489 pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
490 }
491 else
492 rc = VERR_NO_MEMORY;
493 }
494
495 return rc;
496}
497
498
499/**
500 * Ensures that the breakpoint manager is fully initialized.
501 *
502 * @returns VBox status code.
503 * @param pUVM The user mode VM handle.
504 *
505 * @thread Any thread.
506 */
507static int dbgfR3BpOwnerEnsureInit(PUVM pUVM)
508{
509 /* If the allocation bitmap is allocated initialization succeeded before. */
510 if (RT_LIKELY(pUVM->dbgf.s.pbmBpOwnersAllocR3))
511 return VINF_SUCCESS;
512
513 /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
514 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpOwnerInitEmtWorker, NULL /*pvUser*/);
515}
516
517
518/**
519 * Retains the given breakpoint owner handle for use.
520 *
521 * @returns VBox status code.
522 * @retval VERR_INVALID_HANDLE if the given breakpoint owner handle is invalid.
523 * @param pUVM The user mode VM handle.
524 * @param hBpOwner The breakpoint owner handle to retain, NIL_DBGFOWNER is accepted without doing anything.
525 * @param fIo Flag whether the owner must have the I/O handler set because it used by an I/O breakpoint.
526 */
527DECLINLINE(int) dbgfR3BpOwnerRetain(PUVM pUVM, DBGFBPOWNER hBpOwner, bool fIo)
528{
529 if (hBpOwner == NIL_DBGFBPOWNER)
530 return VINF_SUCCESS;
531
532 PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
533 if (pBpOwner)
534 {
535 AssertReturn ( ( fIo
536 && pBpOwner->pfnBpIoHitR3)
537 || ( !fIo
538 && pBpOwner->pfnBpHitR3),
539 VERR_INVALID_HANDLE);
540 ASMAtomicIncU32(&pBpOwner->cRefs);
541 return VINF_SUCCESS;
542 }
543
544 return VERR_INVALID_HANDLE;
545}
546
547
548/**
549 * Releases the given breakpoint owner handle.
550 *
551 * @returns VBox status code.
552 * @retval VERR_INVALID_HANDLE if the given breakpoint owner handle is invalid.
553 * @param pUVM The user mode VM handle.
554 * @param hBpOwner The breakpoint owner handle to retain, NIL_DBGFOWNER is accepted without doing anything.
555 */
556DECLINLINE(int) dbgfR3BpOwnerRelease(PUVM pUVM, DBGFBPOWNER hBpOwner)
557{
558 if (hBpOwner == NIL_DBGFBPOWNER)
559 return VINF_SUCCESS;
560
561 PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
562 if (pBpOwner)
563 {
564 Assert(pBpOwner->cRefs > 1);
565 ASMAtomicDecU32(&pBpOwner->cRefs);
566 return VINF_SUCCESS;
567 }
568
569 return VERR_INVALID_HANDLE;
570}
571
572
573/**
574 * Returns the internal breakpoint state for the given handle.
575 *
576 * @returns Pointer to the internal breakpoint state or NULL if the handle is invalid.
577 * @param pUVM The user mode VM handle.
578 * @param hBp The breakpoint handle to resolve.
579 */
580DECLINLINE(PDBGFBPINT) dbgfR3BpGetByHnd(PUVM pUVM, DBGFBP hBp)
581{
582 uint32_t idChunk = DBGF_BP_HND_GET_CHUNK_ID(hBp);
583 uint32_t idxEntry = DBGF_BP_HND_GET_ENTRY(hBp);
584
585 AssertReturn(idChunk < DBGF_BP_CHUNK_COUNT, NULL);
586 AssertReturn(idxEntry < DBGF_BP_COUNT_PER_CHUNK, NULL);
587
588 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
589 AssertReturn(pBpChunk->idChunk == idChunk, NULL);
590 AssertPtrReturn(pBpChunk->pbmAlloc, NULL);
591 AssertReturn(ASMBitTest(pBpChunk->pbmAlloc, idxEntry), NULL);
592
593 return &pBpChunk->pBpBaseR3[idxEntry];
594}
595
596
597/**
598 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
599 */
600static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpChunkAllocEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
601{
602 uint32_t idChunk = (uint32_t)(uintptr_t)pvUser;
603
604 VMCPU_ASSERT_EMT(pVCpu);
605 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
606
607 AssertReturn(idChunk < DBGF_BP_CHUNK_COUNT, VERR_DBGF_BP_IPE_1);
608
609 PUVM pUVM = pVM->pUVM;
610 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
611
612 AssertReturn( pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID
613 || pBpChunk->idChunk == idChunk,
614 VERR_DBGF_BP_IPE_2);
615
616 /*
617 * The initialization will be done on EMT(0). It is possible that multiple
618 * allocation attempts are done when multiple racing non EMT threads try to
619 * allocate a breakpoint and a new chunk needs to be allocated.
620 * Ignore the request and succeed if the chunk is allocated meaning that a
621 * previous rendezvous successfully allocated the chunk.
622 */
623 int rc = VINF_SUCCESS;
624 if ( pVCpu->idCpu == 0
625 && pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID)
626 {
627 /* Allocate the bitmap first so we can skip calling into VMMR0 if it fails. */
628 AssertCompile(!(DBGF_BP_COUNT_PER_CHUNK % 64));
629 void *pbmAlloc = RTMemAllocZ(DBGF_BP_COUNT_PER_CHUNK / 8);
630 if (RT_LIKELY(pbmAlloc))
631 {
632#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
633 if (!SUPR3IsDriverless())
634 {
635 DBGFBPCHUNKALLOCREQ Req;
636 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
637 Req.Hdr.cbReq = sizeof(Req);
638 Req.idChunk = idChunk;
639 Req.pChunkBaseR3 = NULL;
640 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_CHUNK_ALLOC, 0 /*u64Arg*/, &Req.Hdr);
641 if (RT_SUCCESS(rc))
642 pBpChunk->pBpBaseR3 = (PDBGFBPINT)Req.pChunkBaseR3;
643 else
644 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_CHUNK_ALLOC failed: %Rrc\n", rc));
645 }
646 else
647#endif
648 {
649 /* Driverless: Do dbgfR0BpChunkAllocWorker here, ring-3 style. */
650 uint32_t const cbShared = RT_ALIGN_32(DBGF_BP_COUNT_PER_CHUNK * sizeof(DBGFBPINT), HOST_PAGE_SIZE);
651 pBpChunk->pBpBaseR3 = (PDBGFBPINT)RTMemPageAllocZ(cbShared);
652 AssertLogRelMsgStmt(pBpChunk->pBpBaseR3, ("cbShared=%#x\n", cbShared), rc = VERR_NO_PAGE_MEMORY);
653 }
654 if (RT_SUCCESS(rc))
655 {
656 pBpChunk->pbmAlloc = (void volatile *)pbmAlloc;
657 pBpChunk->cBpsFree = DBGF_BP_COUNT_PER_CHUNK;
658 pBpChunk->idChunk = idChunk;
659 return VINF_SUCCESS;
660 }
661
662 RTMemFree(pbmAlloc);
663 }
664 else
665 rc = VERR_NO_MEMORY;
666 }
667
668 return rc;
669}
670
671
672/**
673 * Tries to allocate the given chunk which requires an EMT rendezvous.
674 *
675 * @returns VBox status code.
676 * @param pUVM The user mode VM handle.
677 * @param idChunk The chunk to allocate.
678 *
679 * @thread Any thread.
680 */
681DECLINLINE(int) dbgfR3BpChunkAlloc(PUVM pUVM, uint32_t idChunk)
682{
683 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpChunkAllocEmtWorker, (void *)(uintptr_t)idChunk);
684}
685
686
687/**
688 * Tries to allocate a new breakpoint of the given type.
689 *
690 * @returns VBox status code.
691 * @param pUVM The user mode VM handle.
692 * @param hOwner The owner handle, NIL_DBGFBPOWNER if none assigned.
693 * @param pvUser Opaque user data passed in the owner callback.
694 * @param enmType Breakpoint type to allocate.
695 * @param fFlags Flags assoicated with the allocated breakpoint.
696 * @param iHitTrigger The hit count at which the breakpoint start triggering.
697 * Use 0 (or 1) if it's gonna trigger at once.
698 * @param iHitDisable The hit count which disables the breakpoint.
699 * Use ~(uint64_t) if it's never gonna be disabled.
700 * @param phBp Where to return the opaque breakpoint handle on success.
701 * @param ppBp Where to return the pointer to the internal breakpoint state on success.
702 *
703 * @thread Any thread.
704 */
705static int dbgfR3BpAlloc(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser, DBGFBPTYPE enmType,
706 uint16_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp,
707 PDBGFBPINT *ppBp)
708{
709 bool fIo = enmType == DBGFBPTYPE_PORT_IO
710 || enmType == DBGFBPTYPE_MMIO;
711 int rc = dbgfR3BpOwnerRetain(pUVM, hOwner, fIo);
712 if (RT_FAILURE(rc))
713 return rc;
714
715 /*
716 * Search for a chunk having a free entry, allocating new chunks
717 * if the encountered ones are full.
718 *
719 * This can be called from multiple threads at the same time so special care
720 * has to be taken to not require any locking here.
721 */
722 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
723 {
724 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
725
726 uint32_t idChunk = ASMAtomicReadU32(&pBpChunk->idChunk);
727 if (idChunk == DBGF_BP_CHUNK_ID_INVALID)
728 {
729 rc = dbgfR3BpChunkAlloc(pUVM, i);
730 if (RT_FAILURE(rc))
731 {
732 LogRel(("DBGF/Bp: Allocating new breakpoint table chunk failed with %Rrc\n", rc));
733 break;
734 }
735
736 idChunk = ASMAtomicReadU32(&pBpChunk->idChunk);
737 Assert(idChunk == i);
738 }
739
740 /** @todo Optimize with some hinting if this turns out to be too slow. */
741 for (;;)
742 {
743 uint32_t cBpsFree = ASMAtomicReadU32(&pBpChunk->cBpsFree);
744 if (cBpsFree)
745 {
746 /*
747 * Scan the associated bitmap for a free entry, if none can be found another thread
748 * raced us and we go to the next chunk.
749 */
750 int32_t iClr = ASMBitFirstClear(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK);
751 if (iClr != -1)
752 {
753 /*
754 * Try to allocate, we could get raced here as well. In that case
755 * we try again.
756 */
757 if (!ASMAtomicBitTestAndSet(pBpChunk->pbmAlloc, iClr))
758 {
759 /* Success, immediately mark as allocated, initialize the breakpoint state and return. */
760 ASMAtomicDecU32(&pBpChunk->cBpsFree);
761
762 PDBGFBPINT pBp = &pBpChunk->pBpBaseR3[iClr];
763 pBp->Pub.cHits = 0;
764 pBp->Pub.iHitTrigger = iHitTrigger;
765 pBp->Pub.iHitDisable = iHitDisable;
766 pBp->Pub.hOwner = hOwner;
767 pBp->Pub.u16Type = DBGF_BP_PUB_MAKE_TYPE(enmType);
768 pBp->Pub.fFlags = fFlags & ~DBGF_BP_F_ENABLED; /* The enabled flag is handled in the respective APIs. */
769 pBp->pvUserR3 = pvUser;
770
771 /** @todo Owner handling (reference and call ring-0 if it has an ring-0 callback). */
772
773 *phBp = DBGF_BP_HND_CREATE(idChunk, iClr);
774 *ppBp = pBp;
775 return VINF_SUCCESS;
776 }
777 /* else Retry with another spot. */
778 }
779 else /* no free entry in bitmap, go to the next chunk */
780 break;
781 }
782 else /* !cBpsFree, go to the next chunk */
783 break;
784 }
785 }
786
787 rc = dbgfR3BpOwnerRelease(pUVM, hOwner); AssertRC(rc);
788 return VERR_DBGF_NO_MORE_BP_SLOTS;
789}
790
791
792/**
793 * Frees the given breakpoint handle.
794 *
795 * @param pUVM The user mode VM handle.
796 * @param hBp The breakpoint handle to free.
797 * @param pBp The internal breakpoint state pointer.
798 */
799static void dbgfR3BpFree(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
800{
801 uint32_t idChunk = DBGF_BP_HND_GET_CHUNK_ID(hBp);
802 uint32_t idxEntry = DBGF_BP_HND_GET_ENTRY(hBp);
803
804 AssertReturnVoid(idChunk < DBGF_BP_CHUNK_COUNT);
805 AssertReturnVoid(idxEntry < DBGF_BP_COUNT_PER_CHUNK);
806
807 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
808 AssertPtrReturnVoid(pBpChunk->pbmAlloc);
809 AssertReturnVoid(ASMBitTest(pBpChunk->pbmAlloc, idxEntry));
810
811 /** @todo Need a trip to Ring-0 if an owner is assigned with a Ring-0 part to clear the breakpoint. */
812 int rc = dbgfR3BpOwnerRelease(pUVM, pBp->Pub.hOwner); AssertRC(rc); RT_NOREF(rc);
813 memset(pBp, 0, sizeof(*pBp));
814
815 ASMAtomicBitClear(pBpChunk->pbmAlloc, idxEntry);
816 ASMAtomicIncU32(&pBpChunk->cBpsFree);
817}
818
819
820/**
821 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
822 */
823static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpL2TblChunkAllocEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
824{
825 uint32_t idChunk = (uint32_t)(uintptr_t)pvUser;
826
827 VMCPU_ASSERT_EMT(pVCpu);
828 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
829
830 AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, VERR_DBGF_BP_IPE_1);
831
832 PUVM pUVM = pVM->pUVM;
833 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
834
835 AssertReturn( pL2Chunk->idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID
836 || pL2Chunk->idChunk == idChunk,
837 VERR_DBGF_BP_IPE_2);
838
839 /*
840 * The initialization will be done on EMT(0). It is possible that multiple
841 * allocation attempts are done when multiple racing non EMT threads try to
842 * allocate a breakpoint and a new chunk needs to be allocated.
843 * Ignore the request and succeed if the chunk is allocated meaning that a
844 * previous rendezvous successfully allocated the chunk.
845 */
846 int rc = VINF_SUCCESS;
847 if ( pVCpu->idCpu == 0
848 && pL2Chunk->idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID)
849 {
850 /* Allocate the bitmap first so we can skip calling into VMMR0 if it fails. */
851 AssertCompile(!(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK % 64));
852 void *pbmAlloc = RTMemAllocZ(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK / 8);
853 if (RT_LIKELY(pbmAlloc))
854 {
855#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
856 if (!SUPR3IsDriverless())
857 {
858 DBGFBPL2TBLCHUNKALLOCREQ Req;
859 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
860 Req.Hdr.cbReq = sizeof(Req);
861 Req.idChunk = idChunk;
862 Req.pChunkBaseR3 = NULL;
863 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC, 0 /*u64Arg*/, &Req.Hdr);
864 if (RT_SUCCESS(rc))
865 pL2Chunk->pL2BaseR3 = (PDBGFBPL2ENTRY)Req.pChunkBaseR3;
866 else
867 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC failed: %Rrc\n", rc));
868 }
869 else
870#endif
871 {
872 /* Driverless: Do dbgfR0BpL2TblChunkAllocWorker here, ring-3 style. */
873 uint32_t const cbTotal = RT_ALIGN_32(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK * sizeof(DBGFBPL2ENTRY), HOST_PAGE_SIZE);
874 pL2Chunk->pL2BaseR3 = (PDBGFBPL2ENTRY)RTMemPageAllocZ(cbTotal);
875 AssertLogRelMsgStmt(pL2Chunk->pL2BaseR3, ("cbTotal=%#x\n", cbTotal), rc = VERR_NO_PAGE_MEMORY);
876 }
877 if (RT_SUCCESS(rc))
878 {
879 pL2Chunk->pbmAlloc = (void volatile *)pbmAlloc;
880 pL2Chunk->cFree = DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK;
881 pL2Chunk->idChunk = idChunk;
882 return VINF_SUCCESS;
883 }
884
885 RTMemFree(pbmAlloc);
886 }
887 else
888 rc = VERR_NO_MEMORY;
889 }
890
891 return rc;
892}
893
894
895/**
896 * Tries to allocate the given L2 table chunk which requires an EMT rendezvous.
897 *
898 * @returns VBox status code.
899 * @param pUVM The user mode VM handle.
900 * @param idChunk The chunk to allocate.
901 *
902 * @thread Any thread.
903 */
904DECLINLINE(int) dbgfR3BpL2TblChunkAlloc(PUVM pUVM, uint32_t idChunk)
905{
906 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpL2TblChunkAllocEmtWorker, (void *)(uintptr_t)idChunk);
907}
908
909
910/**
911 * Tries to allocate a new breakpoint of the given type.
912 *
913 * @returns VBox status code.
914 * @param pUVM The user mode VM handle.
915 * @param pidxL2Tbl Where to return the L2 table entry index on success.
916 * @param ppL2TblEntry Where to return the pointer to the L2 table entry on success.
917 *
918 * @thread Any thread.
919 */
920static int dbgfR3BpL2TblEntryAlloc(PUVM pUVM, uint32_t *pidxL2Tbl, PDBGFBPL2ENTRY *ppL2TblEntry)
921{
922 /*
923 * Search for a chunk having a free entry, allocating new chunks
924 * if the encountered ones are full.
925 *
926 * This can be called from multiple threads at the same time so special care
927 * has to be taken to not require any locking here.
928 */
929 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
930 {
931 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
932
933 uint32_t idChunk = ASMAtomicReadU32(&pL2Chunk->idChunk);
934 if (idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID)
935 {
936 int rc = dbgfR3BpL2TblChunkAlloc(pUVM, i);
937 if (RT_FAILURE(rc))
938 {
939 LogRel(("DBGF/Bp: Allocating new breakpoint L2 lookup table chunk failed with %Rrc\n", rc));
940 break;
941 }
942
943 idChunk = ASMAtomicReadU32(&pL2Chunk->idChunk);
944 Assert(idChunk == i);
945 }
946
947 /** @todo Optimize with some hinting if this turns out to be too slow. */
948 for (;;)
949 {
950 uint32_t cFree = ASMAtomicReadU32(&pL2Chunk->cFree);
951 if (cFree)
952 {
953 /*
954 * Scan the associated bitmap for a free entry, if none can be found another thread
955 * raced us and we go to the next chunk.
956 */
957 int32_t iClr = ASMBitFirstClear(pL2Chunk->pbmAlloc, DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK);
958 if (iClr != -1)
959 {
960 /*
961 * Try to allocate, we could get raced here as well. In that case
962 * we try again.
963 */
964 if (!ASMAtomicBitTestAndSet(pL2Chunk->pbmAlloc, iClr))
965 {
966 /* Success, immediately mark as allocated, initialize the breakpoint state and return. */
967 ASMAtomicDecU32(&pL2Chunk->cFree);
968
969 PDBGFBPL2ENTRY pL2Entry = &pL2Chunk->pL2BaseR3[iClr];
970
971 *pidxL2Tbl = DBGF_BP_L2_IDX_CREATE(idChunk, iClr);
972 *ppL2TblEntry = pL2Entry;
973 return VINF_SUCCESS;
974 }
975 /* else Retry with another spot. */
976 }
977 else /* no free entry in bitmap, go to the next chunk */
978 break;
979 }
980 else /* !cFree, go to the next chunk */
981 break;
982 }
983 }
984
985 return VERR_DBGF_NO_MORE_BP_SLOTS;
986}
987
988
989/**
990 * Frees the given breakpoint handle.
991 *
992 * @param pUVM The user mode VM handle.
993 * @param idxL2Tbl The L2 table index to free.
994 * @param pL2TblEntry The L2 table entry pointer to free.
995 */
996static void dbgfR3BpL2TblEntryFree(PUVM pUVM, uint32_t idxL2Tbl, PDBGFBPL2ENTRY pL2TblEntry)
997{
998 uint32_t idChunk = DBGF_BP_L2_IDX_GET_CHUNK_ID(idxL2Tbl);
999 uint32_t idxEntry = DBGF_BP_L2_IDX_GET_ENTRY(idxL2Tbl);
1000
1001 AssertReturnVoid(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT);
1002 AssertReturnVoid(idxEntry < DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK);
1003
1004 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
1005 AssertPtrReturnVoid(pL2Chunk->pbmAlloc);
1006 AssertReturnVoid(ASMBitTest(pL2Chunk->pbmAlloc, idxEntry));
1007
1008 memset(pL2TblEntry, 0, sizeof(*pL2TblEntry));
1009
1010 ASMAtomicBitClear(pL2Chunk->pbmAlloc, idxEntry);
1011 ASMAtomicIncU32(&pL2Chunk->cFree);
1012}
1013
1014
1015/**
1016 * Sets the enabled flag of the given breakpoint to the given value.
1017 *
1018 * @param pBp The breakpoint to set the state.
1019 * @param fEnabled Enabled status.
1020 */
1021DECLINLINE(void) dbgfR3BpSetEnabled(PDBGFBPINT pBp, bool fEnabled)
1022{
1023 if (fEnabled)
1024 pBp->Pub.fFlags |= DBGF_BP_F_ENABLED;
1025 else
1026 pBp->Pub.fFlags &= ~DBGF_BP_F_ENABLED;
1027}
1028
1029
1030/**
1031 * Assigns a hardware breakpoint state to the given register breakpoint.
1032 *
1033 * @returns VBox status code.
1034 * @param pVM The cross-context VM structure pointer.
1035 * @param hBp The breakpoint handle to assign.
1036 * @param pBp The internal breakpoint state.
1037 *
1038 * @thread Any thread.
1039 */
1040static int dbgfR3BpRegAssign(PVM pVM, DBGFBP hBp, PDBGFBPINT pBp)
1041{
1042 AssertReturn(pBp->Pub.u.Reg.iReg == UINT8_MAX, VERR_DBGF_BP_IPE_3);
1043
1044 for (uint8_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
1045 {
1046 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
1047
1048 AssertCompileSize(DBGFBP, sizeof(uint32_t));
1049 if (ASMAtomicCmpXchgU32(&pHwBp->hBp, hBp, NIL_DBGFBP))
1050 {
1051 pHwBp->GCPtr = pBp->Pub.u.Reg.GCPtr;
1052 pHwBp->fType = pBp->Pub.u.Reg.fType;
1053 pHwBp->cb = pBp->Pub.u.Reg.cb;
1054 pHwBp->fEnabled = DBGF_BP_PUB_IS_ENABLED(&pBp->Pub);
1055
1056 pBp->Pub.u.Reg.iReg = i;
1057 return VINF_SUCCESS;
1058 }
1059 }
1060
1061 return VERR_DBGF_NO_MORE_BP_SLOTS;
1062}
1063
1064
1065/**
1066 * Removes the assigned hardware breakpoint state from the given register breakpoint.
1067 *
1068 * @returns VBox status code.
1069 * @param pVM The cross-context VM structure pointer.
1070 * @param hBp The breakpoint handle to remove.
1071 * @param pBp The internal breakpoint state.
1072 *
1073 * @thread Any thread.
1074 */
1075static int dbgfR3BpRegRemove(PVM pVM, DBGFBP hBp, PDBGFBPINT pBp)
1076{
1077 AssertReturn(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints), VERR_DBGF_BP_IPE_3);
1078
1079 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
1080 AssertReturn(pHwBp->hBp == hBp, VERR_DBGF_BP_IPE_4);
1081 AssertReturn(!pHwBp->fEnabled, VERR_DBGF_BP_IPE_5);
1082
1083 pHwBp->GCPtr = 0;
1084 pHwBp->fType = 0;
1085 pHwBp->cb = 0;
1086 ASMCompilerBarrier();
1087
1088 ASMAtomicWriteU32(&pHwBp->hBp, NIL_DBGFBP);
1089 return VINF_SUCCESS;
1090}
1091
1092
1093/**
1094 * Returns the pointer to the L2 table entry from the given index.
1095 *
1096 * @returns Current context pointer to the L2 table entry or NULL if the provided index value is invalid.
1097 * @param pUVM The user mode VM handle.
1098 * @param idxL2 The L2 table index to resolve.
1099 *
1100 * @note The content of the resolved L2 table entry is not validated!.
1101 */
1102DECLINLINE(PDBGFBPL2ENTRY) dbgfR3BpL2GetByIdx(PUVM pUVM, uint32_t idxL2)
1103{
1104 uint32_t idChunk = DBGF_BP_L2_IDX_GET_CHUNK_ID(idxL2);
1105 uint32_t idxEntry = DBGF_BP_L2_IDX_GET_ENTRY(idxL2);
1106
1107 AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, NULL);
1108 AssertReturn(idxEntry < DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK, NULL);
1109
1110 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
1111 AssertPtrReturn(pL2Chunk->pbmAlloc, NULL);
1112 AssertReturn(ASMBitTest(pL2Chunk->pbmAlloc, idxEntry), NULL);
1113
1114 return &pL2Chunk->CTX_SUFF(pL2Base)[idxEntry];
1115}
1116
1117
1118/**
1119 * Creates a binary search tree with the given root and leaf nodes.
1120 *
1121 * @returns VBox status code.
1122 * @param pUVM The user mode VM handle.
1123 * @param idxL1 The index into the L1 table where the created tree should be linked into.
1124 * @param u32EntryOld The old entry in the L1 table used to compare with in the atomic update.
1125 * @param hBpRoot The root node DBGF handle to assign.
1126 * @param GCPtrRoot The root nodes GC pointer to use as a key.
1127 * @param hBpLeaf The leafs node DBGF handle to assign.
1128 * @param GCPtrLeaf The leafs node GC pointer to use as a key.
1129 */
1130static int dbgfR3BpInt3L2BstCreate(PUVM pUVM, uint32_t idxL1, uint32_t u32EntryOld,
1131 DBGFBP hBpRoot, RTGCUINTPTR GCPtrRoot,
1132 DBGFBP hBpLeaf, RTGCUINTPTR GCPtrLeaf)
1133{
1134 AssertReturn(GCPtrRoot != GCPtrLeaf, VERR_DBGF_BP_IPE_9);
1135 Assert(DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtrRoot) == DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtrLeaf));
1136
1137 /* Allocate two nodes. */
1138 uint32_t idxL2Root = 0;
1139 PDBGFBPL2ENTRY pL2Root = NULL;
1140 int rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Root, &pL2Root);
1141 if (RT_SUCCESS(rc))
1142 {
1143 uint32_t idxL2Leaf = 0;
1144 PDBGFBPL2ENTRY pL2Leaf = NULL;
1145 rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Leaf, &pL2Leaf);
1146 if (RT_SUCCESS(rc))
1147 {
1148 dbgfBpL2TblEntryInit(pL2Leaf, hBpLeaf, GCPtrLeaf, DBGF_BP_L2_ENTRY_IDX_END, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
1149 if (GCPtrLeaf < GCPtrRoot)
1150 dbgfBpL2TblEntryInit(pL2Root, hBpRoot, GCPtrRoot, idxL2Leaf, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
1151 else
1152 dbgfBpL2TblEntryInit(pL2Root, hBpRoot, GCPtrRoot, DBGF_BP_L2_ENTRY_IDX_END, idxL2Leaf, 0 /*iDepth*/);
1153
1154 uint32_t const u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_L2_IDX(idxL2Root);
1155 if (ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], u32Entry, u32EntryOld))
1156 return VINF_SUCCESS;
1157
1158 /* The L1 entry has changed due to another thread racing us during insertion, free nodes and try again. */
1159 dbgfR3BpL2TblEntryFree(pUVM, idxL2Leaf, pL2Leaf);
1160 rc = VINF_TRY_AGAIN;
1161 }
1162
1163 dbgfR3BpL2TblEntryFree(pUVM, idxL2Root, pL2Root);
1164 }
1165
1166 return rc;
1167}
1168
1169
1170/**
1171 * Inserts the given breakpoint handle into an existing binary search tree.
1172 *
1173 * @returns VBox status code.
1174 * @param pUVM The user mode VM handle.
1175 * @param idxL2Root The index of the tree root in the L2 table.
1176 * @param hBp The node DBGF handle to insert.
1177 * @param GCPtr The nodes GC pointer to use as a key.
1178 */
1179static int dbgfR3BpInt2L2BstNodeInsert(PUVM pUVM, uint32_t idxL2Root, DBGFBP hBp, RTGCUINTPTR GCPtr)
1180{
1181 GCPtr = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
1182
1183 /* Allocate a new node first. */
1184 uint32_t idxL2Nd = 0;
1185 PDBGFBPL2ENTRY pL2Nd = NULL;
1186 int rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Nd, &pL2Nd);
1187 if (RT_SUCCESS(rc))
1188 {
1189 /* Walk the tree and find the correct node to insert to. */
1190 PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Root);
1191 while (RT_LIKELY(pL2Entry))
1192 {
1193 /* Make a copy of the entry. */
1194 DBGFBPL2ENTRY L2Entry;
1195 L2Entry.u64GCPtrKeyAndBpHnd1 = ASMAtomicReadU64(&pL2Entry->u64GCPtrKeyAndBpHnd1);
1196 L2Entry.u64LeftRightIdxDepthBpHnd2 = ASMAtomicReadU64(&pL2Entry->u64LeftRightIdxDepthBpHnd2);
1197
1198 RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(L2Entry.u64GCPtrKeyAndBpHnd1);
1199 AssertBreak(GCPtr != GCPtrL2Entry);
1200
1201 /* Not found, get to the next level. */
1202 uint32_t idxL2Next = GCPtr < GCPtrL2Entry
1203 ? DBGF_BP_L2_ENTRY_GET_IDX_LEFT(L2Entry.u64LeftRightIdxDepthBpHnd2)
1204 : DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(L2Entry.u64LeftRightIdxDepthBpHnd2);
1205 if (idxL2Next == DBGF_BP_L2_ENTRY_IDX_END)
1206 {
1207 /* Insert the new node here. */
1208 dbgfBpL2TblEntryInit(pL2Nd, hBp, GCPtr, DBGF_BP_L2_ENTRY_IDX_END, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
1209 if (GCPtr < GCPtrL2Entry)
1210 dbgfBpL2TblEntryUpdateLeft(pL2Entry, idxL2Next, 0 /*iDepth*/);
1211 else
1212 dbgfBpL2TblEntryUpdateRight(pL2Entry, idxL2Next, 0 /*iDepth*/);
1213 return VINF_SUCCESS;
1214 }
1215
1216 pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Next);
1217 }
1218
1219 dbgfR3BpL2TblEntryFree(pUVM, idxL2Nd, pL2Nd);
1220 rc = VERR_DBGF_BP_L2_LOOKUP_FAILED;
1221 }
1222
1223 return rc;
1224}
1225
1226
1227/**
1228 * Adds the given breakpoint handle keyed with the GC pointer to the proper L2 binary search tree
1229 * possibly creating a new tree.
1230 *
1231 * @returns VBox status code.
1232 * @param pUVM The user mode VM handle.
1233 * @param idxL1 The index into the L1 table the breakpoint uses.
1234 * @param hBp The breakpoint handle which is to be added.
1235 * @param GCPtr The GC pointer the breakpoint is keyed with.
1236 */
1237static int dbgfR3BpInt3L2BstNodeAdd(PUVM pUVM, uint32_t idxL1, DBGFBP hBp, RTGCUINTPTR GCPtr)
1238{
1239 int rc = RTSemFastMutexRequest(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc);
1240
1241 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]); /* Re-read, could get raced by a remove operation. */
1242 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
1243 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
1244 {
1245 /* Create a new search tree, gather the necessary information first. */
1246 DBGFBP hBp2 = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32Entry);
1247 PDBGFBPINT pBp2 = dbgfR3BpGetByHnd(pUVM, hBp2);
1248 AssertStmt(RT_VALID_PTR(pBp2), rc = VERR_DBGF_BP_IPE_7);
1249 if (RT_SUCCESS(rc))
1250 rc = dbgfR3BpInt3L2BstCreate(pUVM, idxL1, u32Entry, hBp, GCPtr, hBp2, pBp2->Pub.u.Sw.GCPtr);
1251 }
1252 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
1253 rc = dbgfR3BpInt2L2BstNodeInsert(pUVM, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry), hBp, GCPtr);
1254
1255 int rc2 = RTSemFastMutexRelease(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc2);
1256 return rc;
1257}
1258
1259
1260/**
1261 * Gets the leftmost from the given tree node start index.
1262 *
1263 * @returns VBox status code.
1264 * @param pUVM The user mode VM handle.
1265 * @param idxL2Start The start index to walk from.
1266 * @param pidxL2Leftmost Where to store the L2 table index of the leftmost entry.
1267 * @param ppL2NdLeftmost Where to store the pointer to the leftmost L2 table entry.
1268 * @param pidxL2NdLeftParent Where to store the L2 table index of the leftmost entries parent.
1269 * @param ppL2NdLeftParent Where to store the pointer to the leftmost L2 table entries parent.
1270 */
1271static int dbgfR33BpInt3BstGetLeftmostEntryFromNode(PUVM pUVM, uint32_t idxL2Start,
1272 uint32_t *pidxL2Leftmost, PDBGFBPL2ENTRY *ppL2NdLeftmost,
1273 uint32_t *pidxL2NdLeftParent, PDBGFBPL2ENTRY *ppL2NdLeftParent)
1274{
1275 uint32_t idxL2Parent = DBGF_BP_L2_ENTRY_IDX_END;
1276 PDBGFBPL2ENTRY pL2NdParent = NULL;
1277
1278 for (;;)
1279 {
1280 PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Start);
1281 AssertPtr(pL2Entry);
1282
1283 uint32_t idxL2Left = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
1284 if (idxL2Start == DBGF_BP_L2_ENTRY_IDX_END)
1285 {
1286 *pidxL2Leftmost = idxL2Start;
1287 *ppL2NdLeftmost = pL2Entry;
1288 *pidxL2NdLeftParent = idxL2Parent;
1289 *ppL2NdLeftParent = pL2NdParent;
1290 break;
1291 }
1292
1293 idxL2Parent = idxL2Start;
1294 idxL2Start = idxL2Left;
1295 pL2NdParent = pL2Entry;
1296 }
1297
1298 return VINF_SUCCESS;
1299}
1300
1301
1302/**
1303 * Removes the given node rearranging the tree.
1304 *
1305 * @returns VBox status code.
1306 * @param pUVM The user mode VM handle.
1307 * @param idxL1 The index into the L1 table pointing to the binary search tree containing the node.
1308 * @param idxL2Root The L2 table index where the tree root is located.
1309 * @param idxL2Nd The node index to remove.
1310 * @param pL2Nd The L2 table entry to remove.
1311 * @param idxL2NdParent The parents index, can be DBGF_BP_L2_ENTRY_IDX_END if the root is about to be removed.
1312 * @param pL2NdParent The parents L2 table entry, can be NULL if the root is about to be removed.
1313 * @param fLeftChild Flag whether the node is the left child of the parent or the right one.
1314 */
1315static int dbgfR3BpInt3BstNodeRemove(PUVM pUVM, uint32_t idxL1, uint32_t idxL2Root,
1316 uint32_t idxL2Nd, PDBGFBPL2ENTRY pL2Nd,
1317 uint32_t idxL2NdParent, PDBGFBPL2ENTRY pL2NdParent,
1318 bool fLeftChild)
1319{
1320 /*
1321 * If there are only two nodes remaining the tree will get destroyed and the
1322 * L1 entry will be converted to the direct handle type.
1323 */
1324 uint32_t idxL2Left = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
1325 uint32_t idxL2Right = DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
1326
1327 Assert(idxL2NdParent != DBGF_BP_L2_ENTRY_IDX_END || !pL2NdParent); RT_NOREF(idxL2NdParent);
1328 uint32_t idxL2ParentNew = DBGF_BP_L2_ENTRY_IDX_END;
1329 if (idxL2Right == DBGF_BP_L2_ENTRY_IDX_END)
1330 idxL2ParentNew = idxL2Left;
1331 else
1332 {
1333 /* Find the leftmost entry of the right subtree and move it to the to be removed nodes location in the tree. */
1334 PDBGFBPL2ENTRY pL2NdLeftmostParent = NULL;
1335 PDBGFBPL2ENTRY pL2NdLeftmost = NULL;
1336 uint32_t idxL2NdLeftmostParent = DBGF_BP_L2_ENTRY_IDX_END;
1337 uint32_t idxL2Leftmost = DBGF_BP_L2_ENTRY_IDX_END;
1338 int rc = dbgfR33BpInt3BstGetLeftmostEntryFromNode(pUVM, idxL2Right, &idxL2Leftmost ,&pL2NdLeftmost,
1339 &idxL2NdLeftmostParent, &pL2NdLeftmostParent);
1340 AssertRCReturn(rc, rc);
1341
1342 if (pL2NdLeftmostParent)
1343 {
1344 /* Rearrange the leftmost entries parents pointer. */
1345 dbgfBpL2TblEntryUpdateLeft(pL2NdLeftmostParent, DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2NdLeftmost->u64LeftRightIdxDepthBpHnd2), 0 /*iDepth*/);
1346 dbgfBpL2TblEntryUpdateRight(pL2NdLeftmost, idxL2Right, 0 /*iDepth*/);
1347 }
1348
1349 dbgfBpL2TblEntryUpdateLeft(pL2NdLeftmost, idxL2Left, 0 /*iDepth*/);
1350
1351 /* Update the remove nodes parent to point to the new node. */
1352 idxL2ParentNew = idxL2Leftmost;
1353 }
1354
1355 if (pL2NdParent)
1356 {
1357 /* Asssign the new L2 index to proper parents left or right pointer. */
1358 if (fLeftChild)
1359 dbgfBpL2TblEntryUpdateLeft(pL2NdParent, idxL2ParentNew, 0 /*iDepth*/);
1360 else
1361 dbgfBpL2TblEntryUpdateRight(pL2NdParent, idxL2ParentNew, 0 /*iDepth*/);
1362 }
1363 else
1364 {
1365 /* The root node is removed, set the new root in the L1 table. */
1366 Assert(idxL2ParentNew != DBGF_BP_L2_ENTRY_IDX_END);
1367 idxL2Root = idxL2ParentNew;
1368 ASMAtomicXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_CREATE_L2_IDX(idxL2Left));
1369 }
1370
1371 /* Free the node. */
1372 dbgfR3BpL2TblEntryFree(pUVM, idxL2Nd, pL2Nd);
1373
1374 /*
1375 * Check whether the old/new root is the only node remaining and convert the L1
1376 * table entry to a direct breakpoint handle one in that case.
1377 */
1378 pL2Nd = dbgfR3BpL2GetByIdx(pUVM, idxL2Root);
1379 AssertPtr(pL2Nd);
1380 if ( DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2) == DBGF_BP_L2_ENTRY_IDX_END
1381 && DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2) == DBGF_BP_L2_ENTRY_IDX_END)
1382 {
1383 DBGFBP hBp = DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Nd->u64GCPtrKeyAndBpHnd1, pL2Nd->u64LeftRightIdxDepthBpHnd2);
1384 dbgfR3BpL2TblEntryFree(pUVM, idxL2Root, pL2Nd);
1385 ASMAtomicXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp));
1386 }
1387
1388 return VINF_SUCCESS;
1389}
1390
1391
1392/**
1393 * Removes the given breakpoint handle keyed with the GC pointer from the L2 binary search tree
1394 * pointed to by the given L2 root index.
1395 *
1396 * @returns VBox status code.
1397 * @param pUVM The user mode VM handle.
1398 * @param idxL1 The index into the L1 table pointing to the binary search tree.
1399 * @param idxL2Root The L2 table index where the tree root is located.
1400 * @param hBp The breakpoint handle which is to be removed.
1401 * @param GCPtr The GC pointer the breakpoint is keyed with.
1402 */
1403static int dbgfR3BpInt3L2BstRemove(PUVM pUVM, uint32_t idxL1, uint32_t idxL2Root, DBGFBP hBp, RTGCUINTPTR GCPtr)
1404{
1405 GCPtr = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
1406
1407 int rc = RTSemFastMutexRequest(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc);
1408
1409 uint32_t idxL2Cur = idxL2Root;
1410 uint32_t idxL2Parent = DBGF_BP_L2_ENTRY_IDX_END;
1411 bool fLeftChild = false;
1412 PDBGFBPL2ENTRY pL2EntryParent = NULL;
1413 for (;;)
1414 {
1415 PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Cur);
1416 AssertPtr(pL2Entry);
1417
1418 /* Check whether this node is to be removed.. */
1419 RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(pL2Entry->u64GCPtrKeyAndBpHnd1);
1420 if (GCPtrL2Entry == GCPtr)
1421 {
1422 Assert(DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Entry->u64GCPtrKeyAndBpHnd1, pL2Entry->u64LeftRightIdxDepthBpHnd2) == hBp); RT_NOREF(hBp);
1423
1424 rc = dbgfR3BpInt3BstNodeRemove(pUVM, idxL1, idxL2Root, idxL2Cur, pL2Entry, idxL2Parent, pL2EntryParent, fLeftChild);
1425 break;
1426 }
1427
1428 pL2EntryParent = pL2Entry;
1429 idxL2Parent = idxL2Cur;
1430
1431 if (GCPtrL2Entry < GCPtr)
1432 {
1433 fLeftChild = true;
1434 idxL2Cur = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
1435 }
1436 else
1437 {
1438 fLeftChild = false;
1439 idxL2Cur = DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
1440 }
1441
1442 AssertBreakStmt(idxL2Cur != DBGF_BP_L2_ENTRY_IDX_END, rc = VERR_DBGF_BP_L2_LOOKUP_FAILED);
1443 }
1444
1445 int rc2 = RTSemFastMutexRelease(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc2);
1446
1447 return rc;
1448}
1449
1450
1451/**
1452 * Adds the given int3 breakpoint to the appropriate lookup tables.
1453 *
1454 * @returns VBox status code.
1455 * @param pUVM The user mode VM handle.
1456 * @param hBp The breakpoint handle to add.
1457 * @param pBp The internal breakpoint state.
1458 */
1459static int dbgfR3BpInt3Add(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1460{
1461 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_SOFTWARE, VERR_DBGF_BP_IPE_3);
1462
1463 int rc = VINF_SUCCESS;
1464 uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(pBp->Pub.u.Sw.GCPtr);
1465 uint8_t cTries = 16;
1466
1467 while (cTries--)
1468 {
1469 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1470 if (u32Entry == DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
1471 {
1472 /*
1473 * No breakpoint assigned so far for this entry, create an entry containing
1474 * the direct breakpoint handle and try to exchange it atomically.
1475 */
1476 u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp);
1477 if (ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], u32Entry, DBGF_BP_INT3_L1_ENTRY_TYPE_NULL))
1478 break;
1479 }
1480 else
1481 {
1482 rc = dbgfR3BpInt3L2BstNodeAdd(pUVM, idxL1, hBp, pBp->Pub.u.Sw.GCPtr);
1483 if (rc != VINF_TRY_AGAIN)
1484 break;
1485 }
1486 }
1487
1488 if ( RT_SUCCESS(rc)
1489 && !cTries) /* Too much contention, abort with an error. */
1490 rc = VERR_DBGF_BP_INT3_ADD_TRIES_REACHED;
1491
1492 return rc;
1493}
1494
1495
1496/**
1497 * Adds the given port I/O breakpoint to the appropriate lookup tables.
1498 *
1499 * @returns VBox status code.
1500 * @param pUVM The user mode VM handle.
1501 * @param hBp The breakpoint handle to add.
1502 * @param pBp The internal breakpoint state.
1503 */
1504static int dbgfR3BpPortIoAdd(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1505{
1506 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_PORT_IO, VERR_DBGF_BP_IPE_3);
1507
1508 uint16_t uPortExcl = pBp->Pub.u.PortIo.uPort + pBp->Pub.u.PortIo.cPorts;
1509 uint32_t u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp);
1510 for (uint16_t idxPort = pBp->Pub.u.PortIo.uPort; idxPort < uPortExcl; idxPort++)
1511 {
1512 bool fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], u32Entry, DBGF_BP_INT3_L1_ENTRY_TYPE_NULL);
1513 if (!fXchg)
1514 {
1515 /* Something raced us, so roll back the other registrations. */
1516 while (idxPort > pBp->Pub.u.PortIo.uPort)
1517 {
1518 fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry);
1519 Assert(fXchg); RT_NOREF(fXchg);
1520 }
1521
1522 return VERR_DBGF_BP_INT3_ADD_TRIES_REACHED; /** @todo New status code */
1523 }
1524 }
1525
1526 return VINF_SUCCESS;
1527}
1528
1529
1530/**
1531 * Get a breakpoint give by address.
1532 *
1533 * @returns The breakpoint handle on success or NIL_DBGFBP if not found.
1534 * @param pUVM The user mode VM handle.
1535 * @param enmType The breakpoint type.
1536 * @param GCPtr The breakpoint address.
1537 * @param ppBp Where to store the pointer to the internal breakpoint state on success, optional.
1538 */
1539static DBGFBP dbgfR3BpGetByAddr(PUVM pUVM, DBGFBPTYPE enmType, RTGCUINTPTR GCPtr, PDBGFBPINT *ppBp)
1540{
1541 DBGFBP hBp = NIL_DBGFBP;
1542
1543 switch (enmType)
1544 {
1545 case DBGFBPTYPE_REG:
1546 {
1547 PVM pVM = pUVM->pVM;
1548 VM_ASSERT_VALID_EXT_RETURN(pVM, NIL_DBGFBP);
1549
1550 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
1551 {
1552 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
1553
1554 AssertCompileSize(DBGFBP, sizeof(uint32_t));
1555 DBGFBP hBpTmp = ASMAtomicReadU32(&pHwBp->hBp);
1556 if ( pHwBp->GCPtr == GCPtr
1557 && hBpTmp != NIL_DBGFBP)
1558 {
1559 hBp = hBpTmp;
1560 break;
1561 }
1562 }
1563 break;
1564 }
1565
1566 case DBGFBPTYPE_SOFTWARE:
1567 {
1568 const uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtr);
1569 const uint32_t u32L1Entry = ASMAtomicReadU32(&pUVM->dbgf.s.CTX_SUFF(paBpLocL1)[idxL1]);
1570
1571 if (u32L1Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
1572 {
1573 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32L1Entry);
1574 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
1575 hBp = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32L1Entry);
1576 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
1577 {
1578 RTGCUINTPTR GCPtrKey = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
1579 PDBGFBPL2ENTRY pL2Nd = dbgfR3BpL2GetByIdx(pUVM, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32L1Entry));
1580
1581 for (;;)
1582 {
1583 AssertPtr(pL2Nd);
1584
1585 RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(pL2Nd->u64GCPtrKeyAndBpHnd1);
1586 if (GCPtrKey == GCPtrL2Entry)
1587 {
1588 hBp = DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Nd->u64GCPtrKeyAndBpHnd1, pL2Nd->u64LeftRightIdxDepthBpHnd2);
1589 break;
1590 }
1591
1592 /* Not found, get to the next level. */
1593 uint32_t idxL2Next = GCPtrKey < GCPtrL2Entry
1594 ? DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2)
1595 : DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
1596 /* Address not found if the entry denotes the end. */
1597 if (idxL2Next == DBGF_BP_L2_ENTRY_IDX_END)
1598 break;
1599
1600 pL2Nd = dbgfR3BpL2GetByIdx(pUVM, idxL2Next);
1601 }
1602 }
1603 }
1604 break;
1605 }
1606
1607 default:
1608 AssertMsgFailed(("enmType=%d\n", enmType));
1609 break;
1610 }
1611
1612 if ( hBp != NIL_DBGFBP
1613 && ppBp)
1614 *ppBp = dbgfR3BpGetByHnd(pUVM, hBp);
1615 return hBp;
1616}
1617
1618
1619/**
1620 * Get a port I/O breakpoint given by the range.
1621 *
1622 * @returns The breakpoint handle on success or NIL_DBGF if not found.
1623 * @param pUVM The user mode VM handle.
1624 * @param uPort First port in the range.
1625 * @param cPorts Number of ports in the range.
1626 * @param ppBp Where to store the pointer to the internal breakpoint state on success, optional.
1627 */
1628static DBGFBP dbgfR3BpPortIoGetByRange(PUVM pUVM, RTIOPORT uPort, RTIOPORT cPorts, PDBGFBPINT *ppBp)
1629{
1630 DBGFBP hBp = NIL_DBGFBP;
1631
1632 for (RTIOPORT idxPort = uPort; idxPort < uPort + cPorts; idxPort++)
1633 {
1634 const uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.CTX_SUFF(paBpLocPortIo)[idxPort]);
1635 if (u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
1636 {
1637 hBp = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32Entry);
1638 break;
1639 }
1640 }
1641
1642 if ( hBp != NIL_DBGFBP
1643 && ppBp)
1644 *ppBp = dbgfR3BpGetByHnd(pUVM, hBp);
1645 return hBp;
1646}
1647
1648
1649/**
1650 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
1651 */
1652static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpInt3RemoveEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
1653{
1654 DBGFBP hBp = (DBGFBP)(uintptr_t)pvUser;
1655
1656 VMCPU_ASSERT_EMT(pVCpu);
1657 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1658
1659 PUVM pUVM = pVM->pUVM;
1660 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
1661 AssertPtrReturn(pBp, VERR_DBGF_BP_IPE_8);
1662
1663 int rc = VINF_SUCCESS;
1664 if (pVCpu->idCpu == 0)
1665 {
1666 uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(pBp->Pub.u.Sw.GCPtr);
1667 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1668 AssertReturn(u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, VERR_DBGF_BP_IPE_6);
1669
1670 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
1671 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
1672 {
1673 /* Single breakpoint, just exchange atomically with the null value. */
1674 if (!ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry))
1675 {
1676 /*
1677 * A breakpoint addition must have raced us converting the L1 entry to an L2 index type, re-read
1678 * and remove the node from the created binary search tree.
1679 *
1680 * This works because after the entry was converted to an L2 index it can only be converted back
1681 * to a direct handle by removing one or more nodes which always goes through the fast mutex
1682 * protecting the L2 table. Likewise adding a new breakpoint requires grabbing the mutex as well
1683 * so there is serialization here and the node can be removed safely without having to worry about
1684 * concurrent tree modifications.
1685 */
1686 u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1687 AssertReturn(DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry) == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX, VERR_DBGF_BP_IPE_9);
1688
1689 rc = dbgfR3BpInt3L2BstRemove(pUVM, idxL1, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry),
1690 hBp, pBp->Pub.u.Sw.GCPtr);
1691 }
1692 }
1693 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
1694 rc = dbgfR3BpInt3L2BstRemove(pUVM, idxL1, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry),
1695 hBp, pBp->Pub.u.Sw.GCPtr);
1696 }
1697
1698 return rc;
1699}
1700
1701
1702/**
1703 * Removes the given int3 breakpoint from all lookup tables.
1704 *
1705 * @returns VBox status code.
1706 * @param pUVM The user mode VM handle.
1707 * @param hBp The breakpoint handle to remove.
1708 * @param pBp The internal breakpoint state.
1709 */
1710static int dbgfR3BpInt3Remove(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1711{
1712 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_SOFTWARE, VERR_DBGF_BP_IPE_3);
1713
1714 /*
1715 * This has to be done by an EMT rendezvous in order to not have an EMT traversing
1716 * any L2 trees while it is being removed.
1717 */
1718 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpInt3RemoveEmtWorker, (void *)(uintptr_t)hBp);
1719}
1720
1721
1722/**
1723 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
1724 */
1725static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpPortIoRemoveEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
1726{
1727 DBGFBP hBp = (DBGFBP)(uintptr_t)pvUser;
1728
1729 VMCPU_ASSERT_EMT(pVCpu);
1730 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1731
1732 PUVM pUVM = pVM->pUVM;
1733 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
1734 AssertPtrReturn(pBp, VERR_DBGF_BP_IPE_8);
1735
1736 int rc = VINF_SUCCESS;
1737 if (pVCpu->idCpu == 0)
1738 {
1739 /*
1740 * Remove the whole range, there shouldn't be any other breakpoint configured for this range as this is not
1741 * allowed right now.
1742 */
1743 uint16_t uPortExcl = pBp->Pub.u.PortIo.uPort + pBp->Pub.u.PortIo.cPorts;
1744 for (uint16_t idxPort = pBp->Pub.u.PortIo.uPort; idxPort < uPortExcl; idxPort++)
1745 {
1746 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort]);
1747 AssertReturn(u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, VERR_DBGF_BP_IPE_6);
1748
1749 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
1750 AssertReturn(u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND, VERR_DBGF_BP_IPE_7);
1751
1752 bool fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry);
1753 Assert(fXchg); RT_NOREF(fXchg);
1754 }
1755 }
1756
1757 return rc;
1758}
1759
1760
1761/**
1762 * Removes the given port I/O breakpoint from all lookup tables.
1763 *
1764 * @returns VBox status code.
1765 * @param pUVM The user mode VM handle.
1766 * @param hBp The breakpoint handle to remove.
1767 * @param pBp The internal breakpoint state.
1768 */
1769static int dbgfR3BpPortIoRemove(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1770{
1771 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_PORT_IO, VERR_DBGF_BP_IPE_3);
1772
1773 /*
1774 * This has to be done by an EMT rendezvous in order to not have an EMT accessing
1775 * the breakpoint while it is removed.
1776 */
1777 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpPortIoRemoveEmtWorker, (void *)(uintptr_t)hBp);
1778}
1779
1780
1781/**
1782 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
1783 */
1784static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpRegRecalcOnCpu(PVM pVM, PVMCPU pVCpu, void *pvUser)
1785{
1786 RT_NOREF(pvUser);
1787
1788#if defined(VBOX_VMM_TARGET_ARMV8)
1789 RT_NOREF(pVM, pVCpu);
1790 AssertReleaseFailed();
1791 return VERR_NOT_IMPLEMENTED;
1792#else
1793 /*
1794 * CPU 0 updates the enabled hardware breakpoint counts.
1795 */
1796 if (pVCpu->idCpu == 0)
1797 {
1798 pVM->dbgf.s.cEnabledHwBreakpoints = 0;
1799 pVM->dbgf.s.cEnabledHwIoBreakpoints = 0;
1800
1801 for (uint32_t iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++)
1802 {
1803 if (pVM->dbgf.s.aHwBreakpoints[iBp].fEnabled)
1804 {
1805 pVM->dbgf.s.cEnabledHwBreakpoints += 1;
1806 pVM->dbgf.s.cEnabledHwIoBreakpoints += pVM->dbgf.s.aHwBreakpoints[iBp].fType == X86_DR7_RW_IO;
1807 }
1808 }
1809 }
1810
1811 return CPUMRecalcHyperDRx(pVCpu, UINT8_MAX);
1812#endif
1813}
1814
1815
1816/**
1817 * Arms the given breakpoint.
1818 *
1819 * @returns VBox status code.
1820 * @param pUVM The user mode VM handle.
1821 * @param hBp The breakpoint handle to arm.
1822 * @param pBp The internal breakpoint state pointer for the handle.
1823 *
1824 * @thread Any thread.
1825 */
1826static int dbgfR3BpArm(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1827{
1828 int rc;
1829 PVM pVM = pUVM->pVM;
1830
1831 Assert(!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub));
1832 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
1833 {
1834 case DBGFBPTYPE_REG:
1835 {
1836 Assert(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints));
1837 PDBGFBPHW pBpHw = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
1838 Assert(pBpHw->hBp == hBp); RT_NOREF(hBp);
1839
1840 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1841 ASMAtomicWriteBool(&pBpHw->fEnabled, true);
1842 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpRegRecalcOnCpu, NULL);
1843 if (RT_FAILURE(rc))
1844 {
1845 ASMAtomicWriteBool(&pBpHw->fEnabled, false);
1846 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1847 }
1848 break;
1849 }
1850 case DBGFBPTYPE_SOFTWARE:
1851 {
1852 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1853
1854 /** @todo When we enable the first software breakpoint we should do this in an EMT rendezvous
1855 * as the VMX code intercepts #BP only when at least one int3 breakpoint is enabled.
1856 * A racing vCPU might trigger it and forward it to the guest causing panics/crashes/havoc. */
1857#ifdef VBOX_VMM_TARGET_ARMV8
1858 /*
1859 * Save original instruction and replace with brk
1860 */
1861 rc = PGMPhysSimpleReadGCPhys(pVM, &pBp->Pub.u.Sw.Arch.armv8.u32Org, pBp->Pub.u.Sw.PhysAddr, sizeof(pBp->Pub.u.Sw.Arch.armv8.u32Org));
1862 if (RT_SUCCESS(rc))
1863 {
1864 static const uint32_t s_u32Brk = Armv8A64MkInstrBrk(0xc0de);
1865 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Sw.PhysAddr, &s_u32Brk, sizeof(s_u32Brk));
1866 }
1867#else
1868 /*
1869 * Save current byte and write the int3 instruction byte.
1870 */
1871 rc = PGMPhysSimpleReadGCPhys(pVM, &pBp->Pub.u.Sw.Arch.x86.bOrg, pBp->Pub.u.Sw.PhysAddr, sizeof(pBp->Pub.u.Sw.Arch.x86.bOrg));
1872 if (RT_SUCCESS(rc))
1873 {
1874 static const uint8_t s_bInt3 = 0xcc;
1875 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Sw.PhysAddr, &s_bInt3, sizeof(s_bInt3));
1876 }
1877#endif
1878 if (RT_SUCCESS(rc))
1879 {
1880 ASMAtomicIncU32(&pVM->dbgf.s.cEnabledSwBreakpoints);
1881 Log(("DBGF: Set breakpoint at %RGv (Phys %RGp)\n", pBp->Pub.u.Sw.GCPtr, pBp->Pub.u.Sw.PhysAddr));
1882 }
1883
1884 if (RT_FAILURE(rc))
1885 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1886
1887 break;
1888 }
1889 case DBGFBPTYPE_PORT_IO:
1890 {
1891 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1892 ASMAtomicIncU32(&pUVM->dbgf.s.cPortIoBps);
1893 IOMR3NotifyBreakpointCountChange(pVM, true /*fPortIo*/, false /*fMmio*/);
1894 rc = VINF_SUCCESS;
1895 break;
1896 }
1897 case DBGFBPTYPE_MMIO:
1898 rc = VERR_NOT_IMPLEMENTED;
1899 break;
1900 default:
1901 AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
1902 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1903 }
1904
1905 return rc;
1906}
1907
1908
1909/**
1910 * Disarms the given breakpoint.
1911 *
1912 * @returns VBox status code.
1913 * @param pUVM The user mode VM handle.
1914 * @param hBp The breakpoint handle to disarm.
1915 * @param pBp The internal breakpoint state pointer for the handle.
1916 *
1917 * @thread Any thread.
1918 */
1919static int dbgfR3BpDisarm(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1920{
1921 int rc;
1922 PVM pVM = pUVM->pVM;
1923
1924 Assert(DBGF_BP_PUB_IS_ENABLED(&pBp->Pub));
1925 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
1926 {
1927 case DBGFBPTYPE_REG:
1928 {
1929 Assert(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints));
1930 PDBGFBPHW pBpHw = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
1931 Assert(pBpHw->hBp == hBp); RT_NOREF(hBp);
1932
1933 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1934 ASMAtomicWriteBool(&pBpHw->fEnabled, false);
1935 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpRegRecalcOnCpu, NULL);
1936 if (RT_FAILURE(rc))
1937 {
1938 ASMAtomicWriteBool(&pBpHw->fEnabled, true);
1939 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1940 }
1941 break;
1942 }
1943 case DBGFBPTYPE_SOFTWARE:
1944 {
1945 /*
1946 * Check that the current byte is the int3 instruction, and restore the original one.
1947 * We currently ignore invalid bytes.
1948 */
1949#ifdef VBOX_VMM_TARGET_ARMV8
1950 uint32_t u32Current = 0;
1951 rc = PGMPhysSimpleReadGCPhys(pVM, &u32Current, pBp->Pub.u.Sw.PhysAddr, sizeof(u32Current));
1952 if ( RT_SUCCESS(rc)
1953 && u32Current == Armv8A64MkInstrBrk(0xc0de))
1954 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Sw.PhysAddr, &pBp->Pub.u.Sw.Arch.armv8.u32Org, sizeof(pBp->Pub.u.Sw.Arch.armv8.u32Org));
1955#else
1956 uint8_t bCurrent = 0;
1957 rc = PGMPhysSimpleReadGCPhys(pVM, &bCurrent, pBp->Pub.u.Sw.PhysAddr, sizeof(bCurrent));
1958 if ( RT_SUCCESS(rc)
1959 && bCurrent == 0xcc)
1960 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Sw.PhysAddr, &pBp->Pub.u.Sw.Arch.x86.bOrg, sizeof(pBp->Pub.u.Sw.Arch.x86.bOrg));
1961#endif
1962
1963 if (RT_SUCCESS(rc))
1964 {
1965 ASMAtomicDecU32(&pVM->dbgf.s.cEnabledSwBreakpoints);
1966 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1967 Log(("DBGF: Removed breakpoint at %RGv (Phys %RGp)\n", pBp->Pub.u.Sw.GCPtr, pBp->Pub.u.Sw.PhysAddr));
1968 }
1969 break;
1970 }
1971 case DBGFBPTYPE_PORT_IO:
1972 {
1973 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1974 uint32_t cPortIoBps = ASMAtomicDecU32(&pUVM->dbgf.s.cPortIoBps);
1975 if (!cPortIoBps) /** @todo Need to gather all EMTs to not have a stray EMT accessing BP data when it might go away. */
1976 IOMR3NotifyBreakpointCountChange(pVM, false /*fPortIo*/, false /*fMmio*/);
1977 rc = VINF_SUCCESS;
1978 break;
1979 }
1980 case DBGFBPTYPE_MMIO:
1981 rc = VERR_NOT_IMPLEMENTED;
1982 break;
1983 default:
1984 AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
1985 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1986 }
1987
1988 return rc;
1989}
1990
1991
1992/**
1993 * Worker for DBGFR3BpHit() differnetiating on the breakpoint type.
1994 *
1995 * @returns Strict VBox status code.
1996 * @param pVM The cross context VM structure.
1997 * @param pVCpu The vCPU the breakpoint event happened on.
1998 * @param hBp The breakpoint handle.
1999 * @param pBp The breakpoint data.
2000 * @param pBpOwner The breakpoint owner data.
2001 *
2002 * @thread EMT
2003 */
2004static VBOXSTRICTRC dbgfR3BpHit(PVM pVM, PVMCPU pVCpu, DBGFBP hBp, PDBGFBPINT pBp, PCDBGFBPOWNERINT pBpOwner)
2005{
2006 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2007
2008 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
2009 {
2010 case DBGFBPTYPE_REG:
2011 case DBGFBPTYPE_SOFTWARE:
2012 {
2013 if (DBGF_BP_PUB_IS_EXEC_BEFORE(&pBp->Pub))
2014 rcStrict = pBpOwner->pfnBpHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub, DBGF_BP_F_HIT_EXEC_BEFORE);
2015 if (rcStrict == VINF_SUCCESS)
2016 {
2017 uint8_t abInstr[DBGF_BP_INSN_MAX];
2018 RTGCPTR const GCPtrInstr = CPUMGetGuestFlatPC(pVCpu);
2019 rcStrict = PGMPhysSimpleReadGCPtr(pVCpu, &abInstr[0], GCPtrInstr, sizeof(abInstr));
2020 if (rcStrict == VINF_SUCCESS)
2021 {
2022#ifdef VBOX_VMM_TARGET_ARMV8
2023 AssertFailed();
2024 rcStrict = VERR_NOT_IMPLEMENTED;
2025#else
2026 /* Replace the int3 with the original instruction byte. */
2027 abInstr[0] = pBp->Pub.u.Sw.Arch.x86.bOrg;
2028 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, GCPtrInstr, &abInstr[0], sizeof(abInstr));
2029#endif
2030 if ( rcStrict == VINF_SUCCESS
2031 && DBGF_BP_PUB_IS_EXEC_AFTER(&pBp->Pub))
2032 {
2033 VBOXSTRICTRC rcStrict2 = pBpOwner->pfnBpHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub,
2034 DBGF_BP_F_HIT_EXEC_AFTER);
2035 if (rcStrict2 == VINF_SUCCESS)
2036 return rcStrict;
2037 if (rcStrict2 != VINF_DBGF_BP_HALT)
2038 return VERR_DBGF_BP_OWNER_CALLBACK_WRONG_STATUS;
2039 }
2040 else
2041 return rcStrict;
2042 }
2043 }
2044 break;
2045 }
2046 case DBGFBPTYPE_PORT_IO:
2047 case DBGFBPTYPE_MMIO:
2048 {
2049 pVCpu->dbgf.s.fBpIoActive = false;
2050 rcStrict = pBpOwner->pfnBpIoHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub,
2051 pVCpu->dbgf.s.fBpIoBefore
2052 ? DBGF_BP_F_HIT_EXEC_BEFORE
2053 : DBGF_BP_F_HIT_EXEC_AFTER,
2054 pVCpu->dbgf.s.fBpIoAccess, pVCpu->dbgf.s.uBpIoAddress,
2055 pVCpu->dbgf.s.uBpIoValue);
2056
2057 break;
2058 }
2059 default:
2060 AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
2061 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
2062 }
2063
2064 return rcStrict;
2065}
2066
2067
2068/**
2069 * Creates a new breakpoint owner returning a handle which can be used when setting breakpoints.
2070 *
2071 * @returns VBox status code.
2072 * @retval VERR_DBGF_BP_OWNER_NO_MORE_HANDLES if there are no more free owner handles available.
2073 * @param pUVM The user mode VM handle.
2074 * @param pfnBpHit The R3 callback which is called when a breakpoint with the owner handle is hit.
2075 * @param pfnBpIoHit The R3 callback which is called when a I/O breakpoint with the owner handle is hit.
2076 * @param phBpOwner Where to store the owner handle on success.
2077 *
2078 * @thread Any thread but might defer work to EMT on the first call.
2079 */
2080VMMR3DECL(int) DBGFR3BpOwnerCreate(PUVM pUVM, PFNDBGFBPHIT pfnBpHit, PFNDBGFBPIOHIT pfnBpIoHit, PDBGFBPOWNER phBpOwner)
2081{
2082 /*
2083 * Validate the input.
2084 */
2085 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2086 AssertReturn(pfnBpHit || pfnBpIoHit, VERR_INVALID_PARAMETER);
2087 AssertPtrReturn(phBpOwner, VERR_INVALID_POINTER);
2088
2089 int rc = dbgfR3BpOwnerEnsureInit(pUVM);
2090 AssertRCReturn(rc ,rc);
2091
2092 /* Try to find a free entry in the owner table. */
2093 for (;;)
2094 {
2095 /* Scan the associated bitmap for a free entry. */
2096 int32_t iClr = ASMBitFirstClear(pUVM->dbgf.s.pbmBpOwnersAllocR3, DBGF_BP_OWNER_COUNT_MAX);
2097 if (iClr != -1)
2098 {
2099 /*
2100 * Try to allocate, we could get raced here as well. In that case
2101 * we try again.
2102 */
2103 if (!ASMAtomicBitTestAndSet(pUVM->dbgf.s.pbmBpOwnersAllocR3, iClr))
2104 {
2105 PDBGFBPOWNERINT pBpOwner = &pUVM->dbgf.s.paBpOwnersR3[iClr];
2106 pBpOwner->cRefs = 1;
2107 pBpOwner->pfnBpHitR3 = pfnBpHit;
2108 pBpOwner->pfnBpIoHitR3 = pfnBpIoHit;
2109
2110 *phBpOwner = (DBGFBPOWNER)iClr;
2111 return VINF_SUCCESS;
2112 }
2113 /* else Retry with another spot. */
2114 }
2115 else /* no free entry in bitmap, out of entries. */
2116 {
2117 rc = VERR_DBGF_BP_OWNER_NO_MORE_HANDLES;
2118 break;
2119 }
2120 }
2121
2122 return rc;
2123}
2124
2125
2126/**
2127 * Destroys the owner identified by the given handle.
2128 *
2129 * @returns VBox status code.
2130 * @retval VERR_INVALID_HANDLE if the given owner handle is invalid.
2131 * @retval VERR_DBGF_OWNER_BUSY if there are still breakpoints set with the given owner handle.
2132 * @param pUVM The user mode VM handle.
2133 * @param hBpOwner The breakpoint owner handle to destroy.
2134 */
2135VMMR3DECL(int) DBGFR3BpOwnerDestroy(PUVM pUVM, DBGFBPOWNER hBpOwner)
2136{
2137 /*
2138 * Validate the input.
2139 */
2140 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2141 AssertReturn(hBpOwner != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2142
2143 int rc = dbgfR3BpOwnerEnsureInit(pUVM);
2144 AssertRCReturn(rc ,rc);
2145
2146 PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
2147 if (RT_LIKELY(pBpOwner))
2148 {
2149 if (ASMAtomicReadU32(&pBpOwner->cRefs) == 1)
2150 {
2151 pBpOwner->pfnBpHitR3 = NULL;
2152 ASMAtomicDecU32(&pBpOwner->cRefs);
2153 ASMAtomicBitClear(pUVM->dbgf.s.pbmBpOwnersAllocR3, hBpOwner);
2154 }
2155 else
2156 rc = VERR_DBGF_OWNER_BUSY;
2157 }
2158 else
2159 rc = VERR_INVALID_HANDLE;
2160
2161 return rc;
2162}
2163
2164
2165/**
2166 * Sets a breakpoint (int 3 based).
2167 *
2168 * @returns VBox status code.
2169 * @param pUVM The user mode VM handle.
2170 * @param idSrcCpu The ID of the virtual CPU used for the
2171 * breakpoint address resolution.
2172 * @param pAddress The address of the breakpoint.
2173 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2174 * Use 0 (or 1) if it's gonna trigger at once.
2175 * @param iHitDisable The hit count which disables the breakpoint.
2176 * Use ~(uint64_t) if it's never gonna be disabled.
2177 * @param phBp Where to store the breakpoint handle on success.
2178 *
2179 * @thread Any thread.
2180 */
2181VMMR3DECL(int) DBGFR3BpSetInt3(PUVM pUVM, VMCPUID idSrcCpu, PCDBGFADDRESS pAddress,
2182 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2183{
2184 return DBGFR3BpSetInt3Ex(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, idSrcCpu, pAddress,
2185 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
2186}
2187
2188
2189/**
2190 * Sets a breakpoint (int 3 based) - extended version.
2191 *
2192 * @returns VBox status code.
2193 * @param pUVM The user mode VM handle.
2194 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2195 * @param pvUser Opaque user data to pass in the owner callback.
2196 * @param idSrcCpu The ID of the virtual CPU used for the
2197 * breakpoint address resolution.
2198 * @param pAddress The address of the breakpoint.
2199 * @param fFlags Combination of DBGF_BP_F_XXX.
2200 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2201 * Use 0 (or 1) if it's gonna trigger at once.
2202 * @param iHitDisable The hit count which disables the breakpoint.
2203 * Use ~(uint64_t) if it's never gonna be disabled.
2204 * @param phBp Where to store the breakpoint handle on success.
2205 *
2206 * @thread Any thread.
2207 */
2208VMMR3DECL(int) DBGFR3BpSetInt3Ex(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2209 VMCPUID idSrcCpu, PCDBGFADDRESS pAddress, uint16_t fFlags,
2210 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2211{
2212 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2213 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2214 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
2215 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2216 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2217
2218 int rc = dbgfR3BpEnsureInit(pUVM);
2219 AssertRCReturn(rc, rc);
2220
2221 /*
2222 * Translate & save the breakpoint address into a guest-physical address.
2223 */
2224 RTGCPHYS GCPhysBpAddr = NIL_RTGCPHYS;
2225 rc = DBGFR3AddrToPhys(pUVM, idSrcCpu, pAddress, &GCPhysBpAddr);
2226 if (RT_SUCCESS(rc))
2227 {
2228 /*
2229 * The physical address from DBGFR3AddrToPhys() is the start of the page,
2230 * we need the exact byte offset into the page while writing to it in dbgfR3BpInt3Arm().
2231 */
2232 GCPhysBpAddr |= (pAddress->FlatPtr & X86_PAGE_OFFSET_MASK);
2233
2234 PDBGFBPINT pBp = NULL;
2235 DBGFBP hBp = dbgfR3BpGetByAddr(pUVM, DBGFBPTYPE_SOFTWARE, pAddress->FlatPtr, &pBp);
2236 if ( hBp != NIL_DBGFBP
2237 && pBp->Pub.u.Sw.PhysAddr == GCPhysBpAddr)
2238 {
2239 rc = VINF_SUCCESS;
2240 if ( !DBGF_BP_PUB_IS_ENABLED(&pBp->Pub)
2241 && (fFlags & DBGF_BP_F_ENABLED))
2242 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2243 if (RT_SUCCESS(rc))
2244 {
2245 rc = VINF_DBGF_BP_ALREADY_EXIST;
2246 *phBp = hBp;
2247 }
2248 return rc;
2249 }
2250
2251 rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_SOFTWARE, fFlags, iHitTrigger, iHitDisable, &hBp, &pBp);
2252 if (RT_SUCCESS(rc))
2253 {
2254 pBp->Pub.u.Sw.PhysAddr = GCPhysBpAddr;
2255 pBp->Pub.u.Sw.GCPtr = pAddress->FlatPtr;
2256
2257 /* Add the breakpoint to the lookup tables. */
2258 rc = dbgfR3BpInt3Add(pUVM, hBp, pBp);
2259 if (RT_SUCCESS(rc))
2260 {
2261 /* Enable the breakpoint if requested. */
2262 if (fFlags & DBGF_BP_F_ENABLED)
2263 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2264 if (RT_SUCCESS(rc))
2265 {
2266 *phBp = hBp;
2267 return VINF_SUCCESS;
2268 }
2269
2270 int rc2 = dbgfR3BpInt3Remove(pUVM, hBp, pBp); AssertRC(rc2);
2271 }
2272
2273 dbgfR3BpFree(pUVM, hBp, pBp);
2274 }
2275 }
2276
2277 return rc;
2278}
2279
2280
2281/**
2282 * Sets a register breakpoint.
2283 *
2284 * @returns VBox status code.
2285 * @param pUVM The user mode VM handle.
2286 * @param pAddress The address of the breakpoint.
2287 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2288 * Use 0 (or 1) if it's gonna trigger at once.
2289 * @param iHitDisable The hit count which disables the breakpoint.
2290 * Use ~(uint64_t) if it's never gonna be disabled.
2291 * @param fType The access type (one of the X86_DR7_RW_* defines).
2292 * @param cb The access size - 1,2,4 or 8 (the latter is AMD64 long mode only.
2293 * Must be 1 if fType is X86_DR7_RW_EO.
2294 * @param phBp Where to store the breakpoint handle.
2295 *
2296 * @thread Any thread.
2297 */
2298VMMR3DECL(int) DBGFR3BpSetReg(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger,
2299 uint64_t iHitDisable, uint8_t fType, uint8_t cb, PDBGFBP phBp)
2300{
2301 return DBGFR3BpSetRegEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, pAddress,
2302 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, fType, cb, phBp);
2303}
2304
2305
2306/**
2307 * Sets a register breakpoint - extended version.
2308 *
2309 * @returns VBox status code.
2310 * @param pUVM The user mode VM handle.
2311 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2312 * @param pvUser Opaque user data to pass in the owner callback.
2313 * @param pAddress The address of the breakpoint.
2314 * @param fFlags Combination of DBGF_BP_F_XXX.
2315 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2316 * Use 0 (or 1) if it's gonna trigger at once.
2317 * @param iHitDisable The hit count which disables the breakpoint.
2318 * Use ~(uint64_t) if it's never gonna be disabled.
2319 * @param fType The access type (one of the X86_DR7_RW_* defines).
2320 * @param cb The access size - 1,2,4 or 8 (the latter is AMD64 long mode only.
2321 * Must be 1 if fType is X86_DR7_RW_EO.
2322 * @param phBp Where to store the breakpoint handle.
2323 *
2324 * @thread Any thread.
2325 */
2326VMMR3DECL(int) DBGFR3BpSetRegEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2327 PCDBGFADDRESS pAddress, uint16_t fFlags,
2328 uint64_t iHitTrigger, uint64_t iHitDisable,
2329 uint8_t fType, uint8_t cb, PDBGFBP phBp)
2330{
2331 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2332 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2333 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
2334 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2335 AssertReturn(cb > 0 && cb <= 8 && RT_IS_POWER_OF_TWO(cb), VERR_INVALID_PARAMETER);
2336 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2337 switch (fType)
2338 {
2339 case X86_DR7_RW_EO:
2340 AssertMsgReturn(cb == 1, ("fType=%#x cb=%d != 1\n", fType, cb), VERR_INVALID_PARAMETER);
2341 break;
2342 case X86_DR7_RW_IO:
2343 case X86_DR7_RW_RW:
2344 case X86_DR7_RW_WO:
2345 break;
2346 default:
2347 AssertMsgFailedReturn(("fType=%#x\n", fType), VERR_INVALID_PARAMETER);
2348 }
2349
2350 int rc = dbgfR3BpEnsureInit(pUVM);
2351 AssertRCReturn(rc, rc);
2352
2353 /*
2354 * Check if we've already got a matching breakpoint for that address.
2355 */
2356 PDBGFBPINT pBp = NULL;
2357 DBGFBP hBp = dbgfR3BpGetByAddr(pUVM, DBGFBPTYPE_REG, pAddress->FlatPtr, &pBp);
2358 if ( hBp != NIL_DBGFBP
2359 && pBp->Pub.u.Reg.cb == cb
2360 && pBp->Pub.u.Reg.fType == fType)
2361 {
2362 rc = VINF_SUCCESS;
2363 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub) && (fFlags & DBGF_BP_F_ENABLED))
2364 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2365 /* else: We don't disable it when DBGF_BP_F_ENABLED isn't given. */
2366 if (RT_SUCCESS(rc))
2367 {
2368 rc = VINF_DBGF_BP_ALREADY_EXIST;
2369 *phBp = hBp;
2370 }
2371 return rc;
2372 }
2373
2374 /*
2375 * Allocate new breakpoint.
2376 */
2377 rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_REG, fFlags, iHitTrigger, iHitDisable, &hBp, &pBp);
2378 if (RT_SUCCESS(rc))
2379 {
2380 pBp->Pub.u.Reg.GCPtr = pAddress->FlatPtr;
2381 pBp->Pub.u.Reg.fType = fType;
2382 pBp->Pub.u.Reg.cb = cb;
2383 pBp->Pub.u.Reg.iReg = UINT8_MAX;
2384 ASMCompilerBarrier();
2385
2386 /* Assign the proper hardware breakpoint. */
2387 rc = dbgfR3BpRegAssign(pUVM->pVM, hBp, pBp);
2388 if (RT_SUCCESS(rc))
2389 {
2390 /* Arm the breakpoint. */
2391 if (fFlags & DBGF_BP_F_ENABLED)
2392 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2393 if (RT_SUCCESS(rc))
2394 {
2395 *phBp = hBp;
2396 return VINF_SUCCESS;
2397 }
2398
2399 int rc2 = dbgfR3BpRegRemove(pUVM->pVM, hBp, pBp);
2400 AssertRC(rc2); RT_NOREF(rc2);
2401 }
2402
2403 dbgfR3BpFree(pUVM, hBp, pBp);
2404 }
2405
2406 return rc;
2407}
2408
2409
2410/**
2411 * This is only kept for now to not mess with the debugger implementation at this point,
2412 * recompiler breakpoints are not supported anymore (IEM has some API but it isn't implemented
2413 * and should probably be merged with the DBGF breakpoints).
2414 */
2415VMMR3DECL(int) DBGFR3BpSetREM(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger,
2416 uint64_t iHitDisable, PDBGFBP phBp)
2417{
2418 RT_NOREF(pUVM, pAddress, iHitTrigger, iHitDisable, phBp);
2419 return VERR_NOT_SUPPORTED;
2420}
2421
2422
2423/**
2424 * Sets an I/O port breakpoint.
2425 *
2426 * @returns VBox status code.
2427 * @param pUVM The user mode VM handle.
2428 * @param uPort The first I/O port.
2429 * @param cPorts The number of I/O ports, see DBGFBPIOACCESS_XXX.
2430 * @param fAccess The access we want to break on.
2431 * @param iHitTrigger The hit count at which the breakpoint start
2432 * triggering. Use 0 (or 1) if it's gonna trigger at
2433 * once.
2434 * @param iHitDisable The hit count which disables the breakpoint.
2435 * Use ~(uint64_t) if it's never gonna be disabled.
2436 * @param phBp Where to store the breakpoint handle.
2437 *
2438 * @thread Any thread.
2439 */
2440VMMR3DECL(int) DBGFR3BpSetPortIo(PUVM pUVM, RTIOPORT uPort, RTIOPORT cPorts, uint32_t fAccess,
2441 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2442{
2443 return DBGFR3BpSetPortIoEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, uPort, cPorts, fAccess,
2444 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
2445}
2446
2447
2448/**
2449 * Sets an I/O port breakpoint - extended version.
2450 *
2451 * @returns VBox status code.
2452 * @param pUVM The user mode VM handle.
2453 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2454 * @param pvUser Opaque user data to pass in the owner callback.
2455 * @param uPort The first I/O port.
2456 * @param cPorts The number of I/O ports, see DBGFBPIOACCESS_XXX.
2457 * @param fAccess The access we want to break on.
2458 * @param fFlags Combination of DBGF_BP_F_XXX.
2459 * @param iHitTrigger The hit count at which the breakpoint start
2460 * triggering. Use 0 (or 1) if it's gonna trigger at
2461 * once.
2462 * @param iHitDisable The hit count which disables the breakpoint.
2463 * Use ~(uint64_t) if it's never gonna be disabled.
2464 * @param phBp Where to store the breakpoint handle.
2465 *
2466 * @thread Any thread.
2467 */
2468VMMR3DECL(int) DBGFR3BpSetPortIoEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2469 RTIOPORT uPort, RTIOPORT cPorts, uint32_t fAccess,
2470 uint32_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2471{
2472 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2473 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2474 AssertReturn(!(fAccess & ~DBGFBPIOACCESS_VALID_MASK_PORT_IO), VERR_INVALID_FLAGS);
2475 AssertReturn(fAccess, VERR_INVALID_FLAGS);
2476 AssertReturn(!(fFlags & ~DBGF_BP_F_VALID_MASK), VERR_INVALID_FLAGS);
2477 AssertReturn(fFlags, VERR_INVALID_FLAGS);
2478 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2479 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2480 AssertReturn(cPorts > 0, VERR_OUT_OF_RANGE);
2481 AssertReturn((RTIOPORT)(uPort + (cPorts - 1)) >= uPort, VERR_OUT_OF_RANGE);
2482
2483 int rc = dbgfR3BpPortIoEnsureInit(pUVM);
2484 AssertRCReturn(rc, rc);
2485
2486 PDBGFBPINT pBp = NULL;
2487 DBGFBP hBp = dbgfR3BpPortIoGetByRange(pUVM, uPort, cPorts, &pBp);
2488 if ( hBp != NIL_DBGFBP
2489 && pBp->Pub.u.PortIo.uPort == uPort
2490 && pBp->Pub.u.PortIo.cPorts == cPorts
2491 && pBp->Pub.u.PortIo.fAccess == fAccess)
2492 {
2493 rc = VINF_SUCCESS;
2494 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2495 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2496 if (RT_SUCCESS(rc))
2497 {
2498 rc = VINF_DBGF_BP_ALREADY_EXIST;
2499 *phBp = hBp;
2500 }
2501 return rc;
2502 }
2503
2504 rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_PORT_IO, fFlags, iHitTrigger, iHitDisable, &hBp, &pBp);
2505 if (RT_SUCCESS(rc))
2506 {
2507 pBp->Pub.u.PortIo.uPort = uPort;
2508 pBp->Pub.u.PortIo.cPorts = cPorts;
2509 pBp->Pub.u.PortIo.fAccess = fAccess;
2510
2511 /* Add the breakpoint to the lookup tables. */
2512 rc = dbgfR3BpPortIoAdd(pUVM, hBp, pBp);
2513 if (RT_SUCCESS(rc))
2514 {
2515 /* Enable the breakpoint if requested. */
2516 if (fFlags & DBGF_BP_F_ENABLED)
2517 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2518 if (RT_SUCCESS(rc))
2519 {
2520 *phBp = hBp;
2521 return VINF_SUCCESS;
2522 }
2523
2524 int rc2 = dbgfR3BpPortIoRemove(pUVM, hBp, pBp); AssertRC(rc2);
2525 }
2526
2527 dbgfR3BpFree(pUVM, hBp, pBp);
2528 }
2529
2530 return rc;
2531}
2532
2533
2534/**
2535 * Sets a memory mapped I/O breakpoint.
2536 *
2537 * @returns VBox status code.
2538 * @param pUVM The user mode VM handle.
2539 * @param GCPhys The first MMIO address.
2540 * @param cb The size of the MMIO range to break on.
2541 * @param fAccess The access we want to break on.
2542 * @param iHitTrigger The hit count at which the breakpoint start
2543 * triggering. Use 0 (or 1) if it's gonna trigger at
2544 * once.
2545 * @param iHitDisable The hit count which disables the breakpoint.
2546 * Use ~(uint64_t) if it's never gonna be disabled.
2547 * @param phBp Where to store the breakpoint handle.
2548 *
2549 * @thread Any thread.
2550 */
2551VMMR3DECL(int) DBGFR3BpSetMmio(PUVM pUVM, RTGCPHYS GCPhys, uint32_t cb, uint32_t fAccess,
2552 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2553{
2554 return DBGFR3BpSetMmioEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, GCPhys, cb, fAccess,
2555 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
2556}
2557
2558
2559/**
2560 * Sets a memory mapped I/O breakpoint - extended version.
2561 *
2562 * @returns VBox status code.
2563 * @param pUVM The user mode VM handle.
2564 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2565 * @param pvUser Opaque user data to pass in the owner callback.
2566 * @param GCPhys The first MMIO address.
2567 * @param cb The size of the MMIO range to break on.
2568 * @param fAccess The access we want to break on.
2569 * @param fFlags Combination of DBGF_BP_F_XXX.
2570 * @param iHitTrigger The hit count at which the breakpoint start
2571 * triggering. Use 0 (or 1) if it's gonna trigger at
2572 * once.
2573 * @param iHitDisable The hit count which disables the breakpoint.
2574 * Use ~(uint64_t) if it's never gonna be disabled.
2575 * @param phBp Where to store the breakpoint handle.
2576 *
2577 * @thread Any thread.
2578 */
2579VMMR3DECL(int) DBGFR3BpSetMmioEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2580 RTGCPHYS GCPhys, uint32_t cb, uint32_t fAccess,
2581 uint32_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2582{
2583 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2584 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2585 AssertReturn(!(fAccess & ~DBGFBPIOACCESS_VALID_MASK_MMIO), VERR_INVALID_FLAGS);
2586 AssertReturn(fAccess, VERR_INVALID_FLAGS);
2587 AssertReturn(!(fFlags & ~DBGF_BP_F_VALID_MASK), VERR_INVALID_FLAGS);
2588 AssertReturn(fFlags, VERR_INVALID_FLAGS);
2589 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2590 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2591 AssertReturn(cb, VERR_OUT_OF_RANGE);
2592 AssertReturn(GCPhys + cb < GCPhys, VERR_OUT_OF_RANGE);
2593
2594 int rc = dbgfR3BpEnsureInit(pUVM);
2595 AssertRCReturn(rc, rc);
2596
2597 return VERR_NOT_IMPLEMENTED;
2598}
2599
2600
2601/**
2602 * Clears a breakpoint.
2603 *
2604 * @returns VBox status code.
2605 * @param pUVM The user mode VM handle.
2606 * @param hBp The handle of the breakpoint which should be removed (cleared).
2607 *
2608 * @thread Any thread.
2609 */
2610VMMR3DECL(int) DBGFR3BpClear(PUVM pUVM, DBGFBP hBp)
2611{
2612 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2613 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2614
2615 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2616 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
2617
2618 /* Disarm the breakpoint when it is enabled. */
2619 if (DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2620 {
2621 int rc = dbgfR3BpDisarm(pUVM, hBp, pBp);
2622 AssertRC(rc);
2623 }
2624
2625 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
2626 {
2627 case DBGFBPTYPE_REG:
2628 {
2629 int rc = dbgfR3BpRegRemove(pUVM->pVM, hBp, pBp);
2630 AssertRC(rc);
2631 break;
2632 }
2633 case DBGFBPTYPE_SOFTWARE:
2634 {
2635 int rc = dbgfR3BpInt3Remove(pUVM, hBp, pBp);
2636 AssertRC(rc);
2637 break;
2638 }
2639 case DBGFBPTYPE_PORT_IO:
2640 {
2641 int rc = dbgfR3BpPortIoRemove(pUVM, hBp, pBp);
2642 AssertRC(rc);
2643 break;
2644 }
2645 default:
2646 break;
2647 }
2648
2649 dbgfR3BpFree(pUVM, hBp, pBp);
2650 return VINF_SUCCESS;
2651}
2652
2653
2654/**
2655 * Enables a breakpoint.
2656 *
2657 * @returns VBox status code.
2658 * @param pUVM The user mode VM handle.
2659 * @param hBp The handle of the breakpoint which should be enabled.
2660 *
2661 * @thread Any thread.
2662 */
2663VMMR3DECL(int) DBGFR3BpEnable(PUVM pUVM, DBGFBP hBp)
2664{
2665 /*
2666 * Validate the input.
2667 */
2668 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2669 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2670
2671 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2672 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
2673
2674 int rc;
2675 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2676 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2677 else
2678 rc = VINF_DBGF_BP_ALREADY_ENABLED;
2679
2680 return rc;
2681}
2682
2683
2684/**
2685 * Disables a breakpoint.
2686 *
2687 * @returns VBox status code.
2688 * @param pUVM The user mode VM handle.
2689 * @param hBp The handle of the breakpoint which should be disabled.
2690 *
2691 * @thread Any thread.
2692 */
2693VMMR3DECL(int) DBGFR3BpDisable(PUVM pUVM, DBGFBP hBp)
2694{
2695 /*
2696 * Validate the input.
2697 */
2698 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2699 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2700
2701 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2702 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
2703
2704 int rc;
2705 if (DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2706 rc = dbgfR3BpDisarm(pUVM, hBp, pBp);
2707 else
2708 rc = VINF_DBGF_BP_ALREADY_DISABLED;
2709
2710 return rc;
2711}
2712
2713
2714/**
2715 * Enumerate the breakpoints.
2716 *
2717 * @returns VBox status code.
2718 * @param pUVM The user mode VM handle.
2719 * @param pfnCallback The callback function.
2720 * @param pvUser The user argument to pass to the callback.
2721 *
2722 * @thread Any thread.
2723 */
2724VMMR3DECL(int) DBGFR3BpEnum(PUVM pUVM, PFNDBGFBPENUM pfnCallback, void *pvUser)
2725{
2726 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2727
2728 for (uint32_t idChunk = 0; idChunk < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); idChunk++)
2729 {
2730 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
2731
2732 if (pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID)
2733 break; /* Stop here as the first non allocated chunk means there is no one allocated afterwards as well. */
2734
2735 if (pBpChunk->cBpsFree < DBGF_BP_COUNT_PER_CHUNK)
2736 {
2737 /* Scan the bitmap for allocated entries. */
2738 int32_t iAlloc = ASMBitFirstSet(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK);
2739 if (iAlloc != -1)
2740 {
2741 do
2742 {
2743 DBGFBP hBp = DBGF_BP_HND_CREATE(idChunk, (uint32_t)iAlloc);
2744 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2745
2746 /* Make a copy of the breakpoints public data to have a consistent view. */
2747 DBGFBPPUB BpPub;
2748 BpPub.cHits = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.cHits);
2749 BpPub.iHitTrigger = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.iHitTrigger);
2750 BpPub.iHitDisable = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.iHitDisable);
2751 BpPub.hOwner = ASMAtomicReadU32((volatile uint32_t *)&pBp->Pub.hOwner);
2752 BpPub.u16Type = ASMAtomicReadU16((volatile uint16_t *)&pBp->Pub.u16Type); /* Actually constant. */
2753 BpPub.fFlags = ASMAtomicReadU16((volatile uint16_t *)&pBp->Pub.fFlags);
2754 memcpy(&BpPub.u, &pBp->Pub.u, sizeof(pBp->Pub.u)); /* Is constant after allocation. */
2755
2756 /* Check if a removal raced us. */
2757 if (ASMBitTest(pBpChunk->pbmAlloc, iAlloc))
2758 {
2759 int rc = pfnCallback(pUVM, pvUser, hBp, &BpPub);
2760 if (RT_FAILURE(rc) || rc == VINF_CALLBACK_RETURN)
2761 return rc;
2762 }
2763
2764 iAlloc = ASMBitNextSet(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK, iAlloc);
2765 } while (iAlloc != -1);
2766 }
2767 }
2768 }
2769
2770 return VINF_SUCCESS;
2771}
2772
2773
2774/**
2775 * Called whenever a breakpoint event needs to be serviced in ring-3 to decide what to do.
2776 *
2777 * @returns VBox status code.
2778 * @param pVM The cross context VM structure.
2779 * @param pVCpu The vCPU the breakpoint event happened on.
2780 *
2781 * @thread EMT
2782 */
2783VMMR3_INT_DECL(int) DBGFR3BpHit(PVM pVM, PVMCPU pVCpu)
2784{
2785 /* Send it straight into the debugger?. */
2786 if (pVCpu->dbgf.s.fBpInvokeOwnerCallback)
2787 {
2788 DBGFBP hBp = pVCpu->dbgf.s.hBpActive;
2789 pVCpu->dbgf.s.fBpInvokeOwnerCallback = false;
2790
2791 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pVM->pUVM, hBp);
2792 AssertReturn(pBp, VERR_DBGF_BP_IPE_9);
2793
2794 /* Resolve owner (can be NIL_DBGFBPOWNER) and invoke callback if there is one. */
2795 if (pBp->Pub.hOwner != NIL_DBGFBPOWNER)
2796 {
2797 PCDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pVM->pUVM, pBp->Pub.hOwner);
2798 if (pBpOwner)
2799 {
2800 VBOXSTRICTRC rcStrict = dbgfR3BpHit(pVM, pVCpu, hBp, pBp, pBpOwner);
2801 if (VBOXSTRICTRC_VAL(rcStrict) == VINF_SUCCESS)
2802 {
2803 pVCpu->dbgf.s.hBpActive = NIL_DBGFBP;
2804 return VINF_SUCCESS;
2805 }
2806 if (VBOXSTRICTRC_VAL(rcStrict) != VINF_DBGF_BP_HALT) /* Guru meditation. */
2807 return VERR_DBGF_BP_OWNER_CALLBACK_WRONG_STATUS;
2808 /* else: Halt in the debugger. */
2809 }
2810 }
2811 }
2812
2813 return DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
2814}
2815
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette