VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGFR3Bp.cpp@ 97200

Last change on this file since 97200 was 97200, checked in by vboxsync, 2 years ago

VMM/IEM,EM: More CPUMCTXCORE elimination and trimming of interpret functions.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 106.2 KB
Line 
1/* $Id: DBGFR3Bp.cpp 97200 2022-10-18 11:38:42Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility, Breakpoint Management.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_dbgf_bp DBGF - The Debugger Facility, Breakpoint Management
30 *
31 * The debugger facilities breakpoint managers purpose is to efficiently manage
32 * large amounts of breakpoints for various use cases like dtrace like operations
33 * or execution flow tracing for instance. Especially execution flow tracing can
34 * require thousands of breakpoints which need to be managed efficiently to not slow
35 * down guest operation too much. Before the rewrite starting end of 2020, DBGF could
36 * only handle 32 breakpoints (+ 4 hardware assisted breakpoints). The new
37 * manager is supposed to be able to handle up to one million breakpoints.
38 *
39 * @see grp_dbgf
40 *
41 *
42 * @section sec_dbgf_bp_owner Breakpoint owners
43 *
44 * A single breakpoint owner has a mandatory ring-3 callback and an optional ring-0
45 * callback assigned which is called whenever a breakpoint with the owner assigned is hit.
46 * The common part of the owner is managed by a single table mapped into both ring-0
47 * and ring-3 and the handle being the index into the table. This allows resolving
48 * the handle to the internal structure efficiently. Searching for a free entry is
49 * done using a bitmap indicating free and occupied entries. For the optional
50 * ring-0 owner part there is a separate ring-0 only table for security reasons.
51 *
52 * The callback of the owner can be used to gather and log guest state information
53 * and decide whether to continue guest execution or stop and drop into the debugger.
54 * Breakpoints which don't have an owner assigned will always drop the VM right into
55 * the debugger.
56 *
57 *
58 * @section sec_dbgf_bp_bps Breakpoints
59 *
60 * Breakpoints are referenced by an opaque handle which acts as an index into a global table
61 * mapped into ring-3 and ring-0. Each entry contains the necessary state to manage the breakpoint
62 * like trigger conditions, type, owner, etc. If an owner is given an optional opaque user argument
63 * can be supplied which is passed in the respective owner callback. For owners with ring-0 callbacks
64 * a dedicated ring-0 table is held saving possible ring-0 user arguments.
65 *
66 * To keep memory consumption under control and still support large amounts of
67 * breakpoints the table is split into fixed sized chunks and the chunk index and index
68 * into the chunk can be derived from the handle with only a few logical operations.
69 *
70 *
71 * @section sec_dbgf_bp_resolv Resolving breakpoint addresses
72 *
73 * Whenever a \#BP(0) event is triggered DBGF needs to decide whether the event originated
74 * from within the guest or whether a DBGF breakpoint caused it. This has to happen as fast
75 * as possible. The following scheme is employed to achieve this:
76 *
77 * @verbatim
78 * 7 6 5 4 3 2 1 0
79 * +---+---+---+---+---+---+---+---+
80 * | | | | | | | | | BP address
81 * +---+---+---+---+---+---+---+---+
82 * \_____________________/ \_____/
83 * | |
84 * | +---------------+
85 * | |
86 * BP table | v
87 * +------------+ | +-----------+
88 * | hBp 0 | | X <- | 0 | xxxxx |
89 * | hBp 1 | <----------------+------------------------ | 1 | hBp 1 |
90 * | | | +--- | 2 | idxL2 |
91 * | hBp <m> | <---+ v | |...| ... |
92 * | | | +-----------+ | |...| ... |
93 * | | | | | | |...| ... |
94 * | hBp <n> | <-+ +----- | +> leaf | | | . |
95 * | | | | | | | | . |
96 * | | | | + root + | <------------+ | . |
97 * | | | | | | +-----------+
98 * | | +------- | leaf<+ | L1: 65536
99 * | . | | . |
100 * | . | | . |
101 * | . | | . |
102 * +------------+ +-----------+
103 * L2 idx BST
104 * @endverbatim
105 *
106 * -# Take the lowest 16 bits of the breakpoint address and use it as an direct index
107 * into the L1 table. The L1 table is contiguous and consists of 4 byte entries
108 * resulting in 256KiB of memory used. The topmost 4 bits indicate how to proceed
109 * and the meaning of the remaining 28bits depends on the topmost 4 bits:
110 * - A 0 type entry means no breakpoint is registered with the matching lowest 16bits,
111 * so forward the event to the guest.
112 * - A 1 in the topmost 4 bits means that the remaining 28bits directly denote a breakpoint
113 * handle which can be resolved by extracting the chunk index and index into the chunk
114 * of the global breakpoint table. If the address matches the breakpoint is processed
115 * according to the configuration. Otherwise the breakpoint is again forwarded to the guest.
116 * - A 2 in the topmost 4 bits means that there are multiple breakpoints registered
117 * matching the lowest 16bits and the search must continue in the L2 table with the
118 * remaining 28bits acting as an index into the L2 table indicating the search root.
119 * -# The L2 table consists of multiple index based binary search trees, there is one for each reference
120 * from the L1 table. The key for the table are the upper 6 bytes of the breakpoint address
121 * used for searching. This tree is traversed until either a matching address is found and
122 * the breakpoint is being processed or again forwarded to the guest if it isn't successful.
123 * Each entry in the L2 table is 16 bytes big and densly packed to avoid excessive memory usage.
124 *
125 * @section sec_dbgf_bp_ioport Handling I/O port breakpoints
126 *
127 * Because of the limited amount of I/O ports being available (65536) a single table with 65536 entries,
128 * each 4 byte big will be allocated. This amounts to 256KiB of memory being used additionally as soon as
129 * an I/O breakpoint is enabled. The entries contain the breakpoint handle directly allowing only one breakpoint
130 * per port right now, which is something we accept as a limitation right now to keep things relatively simple.
131 * When there is at least one I/O breakpoint active IOM will be notified and it will afterwards call the DBGF API
132 * whenever the guest does an I/O port access to decide whether a breakpoint was hit. This keeps the overhead small
133 * when there is no I/O port breakpoint enabled.
134 *
135 * @section sec_dbgf_bp_note Random thoughts and notes for the implementation
136 *
137 * - The assumption for this approach is that the lowest 16bits of the breakpoint address are
138 * hopefully the ones being the most varying ones across breakpoints so the traversal
139 * can skip the L2 table in most of the cases. Even if the L2 table must be taken the
140 * individual trees should be quite shallow resulting in low overhead when walking it
141 * (though only real world testing can assert this assumption).
142 * - Index based tables and trees are used instead of pointers because the tables
143 * are always mapped into ring-0 and ring-3 with different base addresses.
144 * - Efficent breakpoint allocation is done by having a global bitmap indicating free
145 * and occupied breakpoint entries. Same applies for the L2 BST table.
146 * - Special care must be taken when modifying the L1 and L2 tables as other EMTs
147 * might still access it (want to try a lockless approach first using
148 * atomic updates, have to resort to locking if that turns out to be too difficult).
149 * - Each BP entry is supposed to be 64 byte big and each chunk should contain 65536
150 * breakpoints which results in 4MiB for each chunk plus the allocation bitmap.
151 * - ring-0 has to take special care when traversing the L2 BST to not run into cycles
152 * and do strict bounds checking before accessing anything. The L1 and L2 table
153 * are written to from ring-3 only. Same goes for the breakpoint table with the
154 * exception being the opaque user argument for ring-0 which is stored in ring-0 only
155 * memory.
156 */
157
158
159/*********************************************************************************************************************************
160* Header Files *
161*********************************************************************************************************************************/
162#define LOG_GROUP LOG_GROUP_DBGF
163#define VMCPU_INCL_CPUM_GST_CTX
164#include <VBox/vmm/dbgf.h>
165#include <VBox/vmm/selm.h>
166#include <VBox/vmm/iem.h>
167#include <VBox/vmm/mm.h>
168#include <VBox/vmm/iom.h>
169#include <VBox/vmm/hm.h>
170#include "DBGFInternal.h"
171#include <VBox/vmm/vm.h>
172#include <VBox/vmm/uvm.h>
173
174#include <VBox/err.h>
175#include <VBox/log.h>
176#include <iprt/assert.h>
177#include <iprt/mem.h>
178
179#include "DBGFInline.h"
180
181
182/*********************************************************************************************************************************
183* Structures and Typedefs *
184*********************************************************************************************************************************/
185
186
187/*********************************************************************************************************************************
188* Internal Functions *
189*********************************************************************************************************************************/
190RT_C_DECLS_BEGIN
191RT_C_DECLS_END
192
193
194/**
195 * Initialize the breakpoint mangement.
196 *
197 * @returns VBox status code.
198 * @param pUVM The user mode VM handle.
199 */
200DECLHIDDEN(int) dbgfR3BpInit(PUVM pUVM)
201{
202 PVM pVM = pUVM->pVM;
203
204 //pUVM->dbgf.s.paBpOwnersR3 = NULL;
205 //pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
206
207 /* Init hardware breakpoint states. */
208 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
209 {
210 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
211
212 AssertCompileSize(DBGFBP, sizeof(uint32_t));
213 pHwBp->hBp = NIL_DBGFBP;
214 //pHwBp->fEnabled = false;
215 }
216
217 /* Now the global breakpoint table chunks. */
218 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
219 {
220 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
221
222 //pBpChunk->pBpBaseR3 = NULL;
223 //pBpChunk->pbmAlloc = NULL;
224 //pBpChunk->cBpsFree = 0;
225 pBpChunk->idChunk = DBGF_BP_CHUNK_ID_INVALID; /* Not allocated. */
226 }
227
228 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
229 {
230 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
231
232 //pL2Chunk->pL2BaseR3 = NULL;
233 //pL2Chunk->pbmAlloc = NULL;
234 //pL2Chunk->cFree = 0;
235 pL2Chunk->idChunk = DBGF_BP_CHUNK_ID_INVALID; /* Not allocated. */
236 }
237
238 //pUVM->dbgf.s.paBpLocL1R3 = NULL;
239 //pUVM->dbgf.s.paBpLocPortIoR3 = NULL;
240 pUVM->dbgf.s.hMtxBpL2Wr = NIL_RTSEMFASTMUTEX;
241 return RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxBpL2Wr);
242}
243
244
245/**
246 * Terminates the breakpoint mangement.
247 *
248 * @returns VBox status code.
249 * @param pUVM The user mode VM handle.
250 */
251DECLHIDDEN(int) dbgfR3BpTerm(PUVM pUVM)
252{
253 if (pUVM->dbgf.s.pbmBpOwnersAllocR3)
254 {
255 RTMemFree((void *)pUVM->dbgf.s.pbmBpOwnersAllocR3);
256 pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
257 }
258
259 /* Free all allocated chunk bitmaps (the chunks itself are destroyed during ring-0 VM destruction). */
260 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
261 {
262 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
263
264 if (pBpChunk->idChunk != DBGF_BP_CHUNK_ID_INVALID)
265 {
266 AssertPtr(pBpChunk->pbmAlloc);
267 RTMemFree((void *)pBpChunk->pbmAlloc);
268 pBpChunk->pbmAlloc = NULL;
269 pBpChunk->idChunk = DBGF_BP_CHUNK_ID_INVALID;
270 }
271 }
272
273 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
274 {
275 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
276
277 if (pL2Chunk->idChunk != DBGF_BP_CHUNK_ID_INVALID)
278 {
279 AssertPtr(pL2Chunk->pbmAlloc);
280 RTMemFree((void *)pL2Chunk->pbmAlloc);
281 pL2Chunk->pbmAlloc = NULL;
282 pL2Chunk->idChunk = DBGF_BP_CHUNK_ID_INVALID;
283 }
284 }
285
286 if (pUVM->dbgf.s.hMtxBpL2Wr != NIL_RTSEMFASTMUTEX)
287 {
288 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxBpL2Wr);
289 pUVM->dbgf.s.hMtxBpL2Wr = NIL_RTSEMFASTMUTEX;
290 }
291
292 return VINF_SUCCESS;
293}
294
295
296/**
297 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
298 */
299static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
300{
301 RT_NOREF(pvUser);
302
303 VMCPU_ASSERT_EMT(pVCpu);
304 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
305
306 /*
307 * The initialization will be done on EMT(0). It is possible that multiple
308 * initialization attempts are done because dbgfR3BpEnsureInit() can be called
309 * from racing non EMT threads when trying to set a breakpoint for the first time.
310 * Just fake success if the L1 is already present which means that a previous rendezvous
311 * successfully initialized the breakpoint manager.
312 */
313 PUVM pUVM = pVM->pUVM;
314 if ( pVCpu->idCpu == 0
315 && !pUVM->dbgf.s.paBpLocL1R3)
316 {
317 if (!SUPR3IsDriverless())
318 {
319 DBGFBPINITREQ Req;
320 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
321 Req.Hdr.cbReq = sizeof(Req);
322 Req.paBpLocL1R3 = NULL;
323 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_INIT, 0 /*u64Arg*/, &Req.Hdr);
324 AssertLogRelMsgRCReturn(rc, ("VMMR0_DO_DBGF_BP_INIT failed: %Rrc\n", rc), rc);
325 pUVM->dbgf.s.paBpLocL1R3 = Req.paBpLocL1R3;
326 }
327 else
328 {
329 /* Driverless: Do dbgfR0BpInitWorker here, ring-3 style. */
330 uint32_t const cbL1Loc = RT_ALIGN_32(UINT16_MAX * sizeof(uint32_t), HOST_PAGE_SIZE);
331 pUVM->dbgf.s.paBpLocL1R3 = (uint32_t *)RTMemPageAllocZ(cbL1Loc);
332 AssertLogRelMsgReturn(pUVM->dbgf.s.paBpLocL1R3, ("cbL1Loc=%#x\n", cbL1Loc), VERR_NO_PAGE_MEMORY);
333 }
334 }
335
336 return VINF_SUCCESS;
337}
338
339
340/**
341 * Ensures that the breakpoint manager is fully initialized.
342 *
343 * @returns VBox status code.
344 * @param pUVM The user mode VM handle.
345 *
346 * @thread Any thread.
347 */
348static int dbgfR3BpEnsureInit(PUVM pUVM)
349{
350 /* If the L1 lookup table is allocated initialization succeeded before. */
351 if (RT_LIKELY(pUVM->dbgf.s.paBpLocL1R3))
352 return VINF_SUCCESS;
353
354 /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
355 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpInitEmtWorker, NULL /*pvUser*/);
356}
357
358
359/**
360 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
361 */
362static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpPortIoInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
363{
364 RT_NOREF(pvUser);
365
366 VMCPU_ASSERT_EMT(pVCpu);
367 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
368
369 /*
370 * The initialization will be done on EMT(0). It is possible that multiple
371 * initialization attempts are done because dbgfR3BpPortIoEnsureInit() can be called
372 * from racing non EMT threads when trying to set a breakpoint for the first time.
373 * Just fake success if the L1 is already present which means that a previous rendezvous
374 * successfully initialized the breakpoint manager.
375 */
376 PUVM pUVM = pVM->pUVM;
377 if ( pVCpu->idCpu == 0
378 && !pUVM->dbgf.s.paBpLocPortIoR3)
379 {
380 if (!SUPR3IsDriverless())
381 {
382 DBGFBPINITREQ Req;
383 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
384 Req.Hdr.cbReq = sizeof(Req);
385 Req.paBpLocL1R3 = NULL;
386 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_PORTIO_INIT, 0 /*u64Arg*/, &Req.Hdr);
387 AssertLogRelMsgRCReturn(rc, ("VMMR0_DO_DBGF_BP_PORTIO_INIT failed: %Rrc\n", rc), rc);
388 pUVM->dbgf.s.paBpLocPortIoR3 = Req.paBpLocL1R3;
389 }
390 else
391 {
392 /* Driverless: Do dbgfR0BpPortIoInitWorker here, ring-3 style. */
393 uint32_t const cbPortIoLoc = RT_ALIGN_32(UINT16_MAX * sizeof(uint32_t), HOST_PAGE_SIZE);
394 pUVM->dbgf.s.paBpLocPortIoR3 = (uint32_t *)RTMemPageAllocZ(cbPortIoLoc);
395 AssertLogRelMsgReturn(pUVM->dbgf.s.paBpLocPortIoR3, ("cbPortIoLoc=%#x\n", cbPortIoLoc), VERR_NO_PAGE_MEMORY);
396 }
397 }
398
399 return VINF_SUCCESS;
400}
401
402
403/**
404 * Ensures that the breakpoint manager is initialized to handle I/O port breakpoint.
405 *
406 * @returns VBox status code.
407 * @param pUVM The user mode VM handle.
408 *
409 * @thread Any thread.
410 */
411static int dbgfR3BpPortIoEnsureInit(PUVM pUVM)
412{
413 /* If the L1 lookup table is allocated initialization succeeded before. */
414 if (RT_LIKELY(pUVM->dbgf.s.paBpLocPortIoR3))
415 return VINF_SUCCESS;
416
417 /* Ensure that the breakpoint manager is initialized. */
418 int rc = dbgfR3BpEnsureInit(pUVM);
419 if (RT_FAILURE(rc))
420 return rc;
421
422 /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
423 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpPortIoInitEmtWorker, NULL /*pvUser*/);
424}
425
426
427/**
428 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
429 */
430static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpOwnerInitEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
431{
432 RT_NOREF(pvUser);
433
434 VMCPU_ASSERT_EMT(pVCpu);
435 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
436
437 /*
438 * The initialization will be done on EMT(0). It is possible that multiple
439 * initialization attempts are done because dbgfR3BpOwnerEnsureInit() can be called
440 * from racing non EMT threads when trying to create a breakpoint owner for the first time.
441 * Just fake success if the pointers are initialized already, meaning that a previous rendezvous
442 * successfully initialized the breakpoint owner table.
443 */
444 int rc = VINF_SUCCESS;
445 PUVM pUVM = pVM->pUVM;
446 if ( pVCpu->idCpu == 0
447 && !pUVM->dbgf.s.pbmBpOwnersAllocR3)
448 {
449 AssertCompile(!(DBGF_BP_OWNER_COUNT_MAX % 64));
450 pUVM->dbgf.s.pbmBpOwnersAllocR3 = RTMemAllocZ(DBGF_BP_OWNER_COUNT_MAX / 8);
451 if (pUVM->dbgf.s.pbmBpOwnersAllocR3)
452 {
453 if (!SUPR3IsDriverless())
454 {
455 DBGFBPOWNERINITREQ Req;
456 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
457 Req.Hdr.cbReq = sizeof(Req);
458 Req.paBpOwnerR3 = NULL;
459 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_OWNER_INIT, 0 /*u64Arg*/, &Req.Hdr);
460 if (RT_SUCCESS(rc))
461 {
462 pUVM->dbgf.s.paBpOwnersR3 = (PDBGFBPOWNERINT)Req.paBpOwnerR3;
463 return VINF_SUCCESS;
464 }
465 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_OWNER_INIT failed: %Rrc\n", rc));
466 }
467 else
468 {
469 /* Driverless: Do dbgfR0BpOwnerInitWorker here, ring-3 style. */
470 uint32_t const cbBpOwnerR3 = RT_ALIGN_32(DBGF_BP_OWNER_COUNT_MAX * sizeof(DBGFBPOWNERINT), HOST_PAGE_SIZE);
471 pUVM->dbgf.s.paBpLocPortIoR3 = (uint32_t *)RTMemPageAllocZ(cbBpOwnerR3);
472 if (pUVM->dbgf.s.paBpLocPortIoR3)
473 return VINF_SUCCESS;
474 AssertLogRelMsgFailed(("cbBpOwnerR3=%#x\n", cbBpOwnerR3));
475 rc = VERR_NO_PAGE_MEMORY;
476 }
477
478 RTMemFree((void *)pUVM->dbgf.s.pbmBpOwnersAllocR3);
479 pUVM->dbgf.s.pbmBpOwnersAllocR3 = NULL;
480 }
481 else
482 rc = VERR_NO_MEMORY;
483 }
484
485 return rc;
486}
487
488
489/**
490 * Ensures that the breakpoint manager is fully initialized.
491 *
492 * @returns VBox status code.
493 * @param pUVM The user mode VM handle.
494 *
495 * @thread Any thread.
496 */
497static int dbgfR3BpOwnerEnsureInit(PUVM pUVM)
498{
499 /* If the allocation bitmap is allocated initialization succeeded before. */
500 if (RT_LIKELY(pUVM->dbgf.s.pbmBpOwnersAllocR3))
501 return VINF_SUCCESS;
502
503 /* Gather all EMTs and call into ring-0 to initialize the breakpoint manager. */
504 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpOwnerInitEmtWorker, NULL /*pvUser*/);
505}
506
507
508/**
509 * Retains the given breakpoint owner handle for use.
510 *
511 * @returns VBox status code.
512 * @retval VERR_INVALID_HANDLE if the given breakpoint owner handle is invalid.
513 * @param pUVM The user mode VM handle.
514 * @param hBpOwner The breakpoint owner handle to retain, NIL_DBGFOWNER is accepted without doing anything.
515 * @param fIo Flag whether the owner must have the I/O handler set because it used by an I/O breakpoint.
516 */
517DECLINLINE(int) dbgfR3BpOwnerRetain(PUVM pUVM, DBGFBPOWNER hBpOwner, bool fIo)
518{
519 if (hBpOwner == NIL_DBGFBPOWNER)
520 return VINF_SUCCESS;
521
522 PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
523 if (pBpOwner)
524 {
525 AssertReturn ( ( fIo
526 && pBpOwner->pfnBpIoHitR3)
527 || ( !fIo
528 && pBpOwner->pfnBpHitR3),
529 VERR_INVALID_HANDLE);
530 ASMAtomicIncU32(&pBpOwner->cRefs);
531 return VINF_SUCCESS;
532 }
533
534 return VERR_INVALID_HANDLE;
535}
536
537
538/**
539 * Releases the given breakpoint owner handle.
540 *
541 * @returns VBox status code.
542 * @retval VERR_INVALID_HANDLE if the given breakpoint owner handle is invalid.
543 * @param pUVM The user mode VM handle.
544 * @param hBpOwner The breakpoint owner handle to retain, NIL_DBGFOWNER is accepted without doing anything.
545 */
546DECLINLINE(int) dbgfR3BpOwnerRelease(PUVM pUVM, DBGFBPOWNER hBpOwner)
547{
548 if (hBpOwner == NIL_DBGFBPOWNER)
549 return VINF_SUCCESS;
550
551 PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
552 if (pBpOwner)
553 {
554 Assert(pBpOwner->cRefs > 1);
555 ASMAtomicDecU32(&pBpOwner->cRefs);
556 return VINF_SUCCESS;
557 }
558
559 return VERR_INVALID_HANDLE;
560}
561
562
563/**
564 * Returns the internal breakpoint state for the given handle.
565 *
566 * @returns Pointer to the internal breakpoint state or NULL if the handle is invalid.
567 * @param pUVM The user mode VM handle.
568 * @param hBp The breakpoint handle to resolve.
569 */
570DECLINLINE(PDBGFBPINT) dbgfR3BpGetByHnd(PUVM pUVM, DBGFBP hBp)
571{
572 uint32_t idChunk = DBGF_BP_HND_GET_CHUNK_ID(hBp);
573 uint32_t idxEntry = DBGF_BP_HND_GET_ENTRY(hBp);
574
575 AssertReturn(idChunk < DBGF_BP_CHUNK_COUNT, NULL);
576 AssertReturn(idxEntry < DBGF_BP_COUNT_PER_CHUNK, NULL);
577
578 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
579 AssertReturn(pBpChunk->idChunk == idChunk, NULL);
580 AssertPtrReturn(pBpChunk->pbmAlloc, NULL);
581 AssertReturn(ASMBitTest(pBpChunk->pbmAlloc, idxEntry), NULL);
582
583 return &pBpChunk->pBpBaseR3[idxEntry];
584}
585
586
587/**
588 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
589 */
590static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpChunkAllocEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
591{
592 uint32_t idChunk = (uint32_t)(uintptr_t)pvUser;
593
594 VMCPU_ASSERT_EMT(pVCpu);
595 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
596
597 AssertReturn(idChunk < DBGF_BP_CHUNK_COUNT, VERR_DBGF_BP_IPE_1);
598
599 PUVM pUVM = pVM->pUVM;
600 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
601
602 AssertReturn( pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID
603 || pBpChunk->idChunk == idChunk,
604 VERR_DBGF_BP_IPE_2);
605
606 /*
607 * The initialization will be done on EMT(0). It is possible that multiple
608 * allocation attempts are done when multiple racing non EMT threads try to
609 * allocate a breakpoint and a new chunk needs to be allocated.
610 * Ignore the request and succeed if the chunk is allocated meaning that a
611 * previous rendezvous successfully allocated the chunk.
612 */
613 int rc = VINF_SUCCESS;
614 if ( pVCpu->idCpu == 0
615 && pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID)
616 {
617 /* Allocate the bitmap first so we can skip calling into VMMR0 if it fails. */
618 AssertCompile(!(DBGF_BP_COUNT_PER_CHUNK % 64));
619 void *pbmAlloc = RTMemAllocZ(DBGF_BP_COUNT_PER_CHUNK / 8);
620 if (RT_LIKELY(pbmAlloc))
621 {
622 if (!SUPR3IsDriverless())
623 {
624 DBGFBPCHUNKALLOCREQ Req;
625 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
626 Req.Hdr.cbReq = sizeof(Req);
627 Req.idChunk = idChunk;
628 Req.pChunkBaseR3 = NULL;
629 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_CHUNK_ALLOC, 0 /*u64Arg*/, &Req.Hdr);
630 if (RT_SUCCESS(rc))
631 pBpChunk->pBpBaseR3 = (PDBGFBPINT)Req.pChunkBaseR3;
632 else
633 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_CHUNK_ALLOC failed: %Rrc\n", rc));
634 }
635 else
636 {
637 /* Driverless: Do dbgfR0BpChunkAllocWorker here, ring-3 style. */
638 uint32_t const cbShared = RT_ALIGN_32(DBGF_BP_COUNT_PER_CHUNK * sizeof(DBGFBPINT), HOST_PAGE_SIZE);
639 pBpChunk->pBpBaseR3 = (PDBGFBPINT)RTMemPageAllocZ(cbShared);
640 AssertLogRelMsgStmt(pBpChunk->pBpBaseR3, ("cbShared=%#x\n", cbShared), rc = VERR_NO_PAGE_MEMORY);
641 }
642 if (RT_SUCCESS(rc))
643 {
644 pBpChunk->pbmAlloc = (void volatile *)pbmAlloc;
645 pBpChunk->cBpsFree = DBGF_BP_COUNT_PER_CHUNK;
646 pBpChunk->idChunk = idChunk;
647 return VINF_SUCCESS;
648 }
649
650 RTMemFree(pbmAlloc);
651 }
652 else
653 rc = VERR_NO_MEMORY;
654 }
655
656 return rc;
657}
658
659
660/**
661 * Tries to allocate the given chunk which requires an EMT rendezvous.
662 *
663 * @returns VBox status code.
664 * @param pUVM The user mode VM handle.
665 * @param idChunk The chunk to allocate.
666 *
667 * @thread Any thread.
668 */
669DECLINLINE(int) dbgfR3BpChunkAlloc(PUVM pUVM, uint32_t idChunk)
670{
671 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpChunkAllocEmtWorker, (void *)(uintptr_t)idChunk);
672}
673
674
675/**
676 * Tries to allocate a new breakpoint of the given type.
677 *
678 * @returns VBox status code.
679 * @param pUVM The user mode VM handle.
680 * @param hOwner The owner handle, NIL_DBGFBPOWNER if none assigned.
681 * @param pvUser Opaque user data passed in the owner callback.
682 * @param enmType Breakpoint type to allocate.
683 * @param fFlags Flags assoicated with the allocated breakpoint.
684 * @param iHitTrigger The hit count at which the breakpoint start triggering.
685 * Use 0 (or 1) if it's gonna trigger at once.
686 * @param iHitDisable The hit count which disables the breakpoint.
687 * Use ~(uint64_t) if it's never gonna be disabled.
688 * @param phBp Where to return the opaque breakpoint handle on success.
689 * @param ppBp Where to return the pointer to the internal breakpoint state on success.
690 *
691 * @thread Any thread.
692 */
693static int dbgfR3BpAlloc(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser, DBGFBPTYPE enmType,
694 uint16_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp,
695 PDBGFBPINT *ppBp)
696{
697 bool fIo = enmType == DBGFBPTYPE_PORT_IO
698 || enmType == DBGFBPTYPE_MMIO;
699 int rc = dbgfR3BpOwnerRetain(pUVM, hOwner, fIo);
700 if (RT_FAILURE(rc))
701 return rc;
702
703 /*
704 * Search for a chunk having a free entry, allocating new chunks
705 * if the encountered ones are full.
706 *
707 * This can be called from multiple threads at the same time so special care
708 * has to be taken to not require any locking here.
709 */
710 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); i++)
711 {
712 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[i];
713
714 uint32_t idChunk = ASMAtomicReadU32(&pBpChunk->idChunk);
715 if (idChunk == DBGF_BP_CHUNK_ID_INVALID)
716 {
717 rc = dbgfR3BpChunkAlloc(pUVM, i);
718 if (RT_FAILURE(rc))
719 {
720 LogRel(("DBGF/Bp: Allocating new breakpoint table chunk failed with %Rrc\n", rc));
721 break;
722 }
723
724 idChunk = ASMAtomicReadU32(&pBpChunk->idChunk);
725 Assert(idChunk == i);
726 }
727
728 /** @todo Optimize with some hinting if this turns out to be too slow. */
729 for (;;)
730 {
731 uint32_t cBpsFree = ASMAtomicReadU32(&pBpChunk->cBpsFree);
732 if (cBpsFree)
733 {
734 /*
735 * Scan the associated bitmap for a free entry, if none can be found another thread
736 * raced us and we go to the next chunk.
737 */
738 int32_t iClr = ASMBitFirstClear(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK);
739 if (iClr != -1)
740 {
741 /*
742 * Try to allocate, we could get raced here as well. In that case
743 * we try again.
744 */
745 if (!ASMAtomicBitTestAndSet(pBpChunk->pbmAlloc, iClr))
746 {
747 /* Success, immediately mark as allocated, initialize the breakpoint state and return. */
748 ASMAtomicDecU32(&pBpChunk->cBpsFree);
749
750 PDBGFBPINT pBp = &pBpChunk->pBpBaseR3[iClr];
751 pBp->Pub.cHits = 0;
752 pBp->Pub.iHitTrigger = iHitTrigger;
753 pBp->Pub.iHitDisable = iHitDisable;
754 pBp->Pub.hOwner = hOwner;
755 pBp->Pub.u16Type = DBGF_BP_PUB_MAKE_TYPE(enmType);
756 pBp->Pub.fFlags = fFlags & ~DBGF_BP_F_ENABLED; /* The enabled flag is handled in the respective APIs. */
757 pBp->pvUserR3 = pvUser;
758
759 /** @todo Owner handling (reference and call ring-0 if it has an ring-0 callback). */
760
761 *phBp = DBGF_BP_HND_CREATE(idChunk, iClr);
762 *ppBp = pBp;
763 return VINF_SUCCESS;
764 }
765 /* else Retry with another spot. */
766 }
767 else /* no free entry in bitmap, go to the next chunk */
768 break;
769 }
770 else /* !cBpsFree, go to the next chunk */
771 break;
772 }
773 }
774
775 rc = dbgfR3BpOwnerRelease(pUVM, hOwner); AssertRC(rc);
776 return VERR_DBGF_NO_MORE_BP_SLOTS;
777}
778
779
780/**
781 * Frees the given breakpoint handle.
782 *
783 * @returns nothing.
784 * @param pUVM The user mode VM handle.
785 * @param hBp The breakpoint handle to free.
786 * @param pBp The internal breakpoint state pointer.
787 */
788static void dbgfR3BpFree(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
789{
790 uint32_t idChunk = DBGF_BP_HND_GET_CHUNK_ID(hBp);
791 uint32_t idxEntry = DBGF_BP_HND_GET_ENTRY(hBp);
792
793 AssertReturnVoid(idChunk < DBGF_BP_CHUNK_COUNT);
794 AssertReturnVoid(idxEntry < DBGF_BP_COUNT_PER_CHUNK);
795
796 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
797 AssertPtrReturnVoid(pBpChunk->pbmAlloc);
798 AssertReturnVoid(ASMBitTest(pBpChunk->pbmAlloc, idxEntry));
799
800 /** @todo Need a trip to Ring-0 if an owner is assigned with a Ring-0 part to clear the breakpoint. */
801 int rc = dbgfR3BpOwnerRelease(pUVM, pBp->Pub.hOwner); AssertRC(rc); RT_NOREF(rc);
802 memset(pBp, 0, sizeof(*pBp));
803
804 ASMAtomicBitClear(pBpChunk->pbmAlloc, idxEntry);
805 ASMAtomicIncU32(&pBpChunk->cBpsFree);
806}
807
808
809/**
810 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
811 */
812static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpL2TblChunkAllocEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
813{
814 uint32_t idChunk = (uint32_t)(uintptr_t)pvUser;
815
816 VMCPU_ASSERT_EMT(pVCpu);
817 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
818
819 AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, VERR_DBGF_BP_IPE_1);
820
821 PUVM pUVM = pVM->pUVM;
822 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
823
824 AssertReturn( pL2Chunk->idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID
825 || pL2Chunk->idChunk == idChunk,
826 VERR_DBGF_BP_IPE_2);
827
828 /*
829 * The initialization will be done on EMT(0). It is possible that multiple
830 * allocation attempts are done when multiple racing non EMT threads try to
831 * allocate a breakpoint and a new chunk needs to be allocated.
832 * Ignore the request and succeed if the chunk is allocated meaning that a
833 * previous rendezvous successfully allocated the chunk.
834 */
835 int rc = VINF_SUCCESS;
836 if ( pVCpu->idCpu == 0
837 && pL2Chunk->idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID)
838 {
839 /* Allocate the bitmap first so we can skip calling into VMMR0 if it fails. */
840 AssertCompile(!(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK % 64));
841 void *pbmAlloc = RTMemAllocZ(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK / 8);
842 if (RT_LIKELY(pbmAlloc))
843 {
844 if (!SUPR3IsDriverless())
845 {
846 DBGFBPL2TBLCHUNKALLOCREQ Req;
847 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
848 Req.Hdr.cbReq = sizeof(Req);
849 Req.idChunk = idChunk;
850 Req.pChunkBaseR3 = NULL;
851 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC, 0 /*u64Arg*/, &Req.Hdr);
852 if (RT_SUCCESS(rc))
853 pL2Chunk->pL2BaseR3 = (PDBGFBPL2ENTRY)Req.pChunkBaseR3;
854 else
855 AssertLogRelMsgRC(rc, ("VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC failed: %Rrc\n", rc));
856 }
857 else
858 {
859 /* Driverless: Do dbgfR0BpL2TblChunkAllocWorker here, ring-3 style. */
860 uint32_t const cbTotal = RT_ALIGN_32(DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK * sizeof(DBGFBPL2ENTRY), HOST_PAGE_SIZE);
861 pL2Chunk->pL2BaseR3 = (PDBGFBPL2ENTRY)RTMemPageAllocZ(cbTotal);
862 AssertLogRelMsgStmt(pL2Chunk->pL2BaseR3, ("cbTotal=%#x\n", cbTotal), rc = VERR_NO_PAGE_MEMORY);
863 }
864 if (RT_SUCCESS(rc))
865 {
866 pL2Chunk->pbmAlloc = (void volatile *)pbmAlloc;
867 pL2Chunk->cFree = DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK;
868 pL2Chunk->idChunk = idChunk;
869 return VINF_SUCCESS;
870 }
871
872 RTMemFree(pbmAlloc);
873 }
874 else
875 rc = VERR_NO_MEMORY;
876 }
877
878 return rc;
879}
880
881
882/**
883 * Tries to allocate the given L2 table chunk which requires an EMT rendezvous.
884 *
885 * @returns VBox status code.
886 * @param pUVM The user mode VM handle.
887 * @param idChunk The chunk to allocate.
888 *
889 * @thread Any thread.
890 */
891DECLINLINE(int) dbgfR3BpL2TblChunkAlloc(PUVM pUVM, uint32_t idChunk)
892{
893 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpL2TblChunkAllocEmtWorker, (void *)(uintptr_t)idChunk);
894}
895
896
897/**
898 * Tries to allocate a new breakpoint of the given type.
899 *
900 * @returns VBox status code.
901 * @param pUVM The user mode VM handle.
902 * @param pidxL2Tbl Where to return the L2 table entry index on success.
903 * @param ppL2TblEntry Where to return the pointer to the L2 table entry on success.
904 *
905 * @thread Any thread.
906 */
907static int dbgfR3BpL2TblEntryAlloc(PUVM pUVM, uint32_t *pidxL2Tbl, PDBGFBPL2ENTRY *ppL2TblEntry)
908{
909 /*
910 * Search for a chunk having a free entry, allocating new chunks
911 * if the encountered ones are full.
912 *
913 * This can be called from multiple threads at the same time so special care
914 * has to be taken to not require any locking here.
915 */
916 for (uint32_t i = 0; i < RT_ELEMENTS(pUVM->dbgf.s.aBpL2TblChunks); i++)
917 {
918 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[i];
919
920 uint32_t idChunk = ASMAtomicReadU32(&pL2Chunk->idChunk);
921 if (idChunk == DBGF_BP_L2_IDX_CHUNK_ID_INVALID)
922 {
923 int rc = dbgfR3BpL2TblChunkAlloc(pUVM, i);
924 if (RT_FAILURE(rc))
925 {
926 LogRel(("DBGF/Bp: Allocating new breakpoint L2 lookup table chunk failed with %Rrc\n", rc));
927 break;
928 }
929
930 idChunk = ASMAtomicReadU32(&pL2Chunk->idChunk);
931 Assert(idChunk == i);
932 }
933
934 /** @todo Optimize with some hinting if this turns out to be too slow. */
935 for (;;)
936 {
937 uint32_t cFree = ASMAtomicReadU32(&pL2Chunk->cFree);
938 if (cFree)
939 {
940 /*
941 * Scan the associated bitmap for a free entry, if none can be found another thread
942 * raced us and we go to the next chunk.
943 */
944 int32_t iClr = ASMBitFirstClear(pL2Chunk->pbmAlloc, DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK);
945 if (iClr != -1)
946 {
947 /*
948 * Try to allocate, we could get raced here as well. In that case
949 * we try again.
950 */
951 if (!ASMAtomicBitTestAndSet(pL2Chunk->pbmAlloc, iClr))
952 {
953 /* Success, immediately mark as allocated, initialize the breakpoint state and return. */
954 ASMAtomicDecU32(&pL2Chunk->cFree);
955
956 PDBGFBPL2ENTRY pL2Entry = &pL2Chunk->pL2BaseR3[iClr];
957
958 *pidxL2Tbl = DBGF_BP_L2_IDX_CREATE(idChunk, iClr);
959 *ppL2TblEntry = pL2Entry;
960 return VINF_SUCCESS;
961 }
962 /* else Retry with another spot. */
963 }
964 else /* no free entry in bitmap, go to the next chunk */
965 break;
966 }
967 else /* !cFree, go to the next chunk */
968 break;
969 }
970 }
971
972 return VERR_DBGF_NO_MORE_BP_SLOTS;
973}
974
975
976/**
977 * Frees the given breakpoint handle.
978 *
979 * @returns nothing.
980 * @param pUVM The user mode VM handle.
981 * @param idxL2Tbl The L2 table index to free.
982 * @param pL2TblEntry The L2 table entry pointer to free.
983 */
984static void dbgfR3BpL2TblEntryFree(PUVM pUVM, uint32_t idxL2Tbl, PDBGFBPL2ENTRY pL2TblEntry)
985{
986 uint32_t idChunk = DBGF_BP_L2_IDX_GET_CHUNK_ID(idxL2Tbl);
987 uint32_t idxEntry = DBGF_BP_L2_IDX_GET_ENTRY(idxL2Tbl);
988
989 AssertReturnVoid(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT);
990 AssertReturnVoid(idxEntry < DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK);
991
992 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
993 AssertPtrReturnVoid(pL2Chunk->pbmAlloc);
994 AssertReturnVoid(ASMBitTest(pL2Chunk->pbmAlloc, idxEntry));
995
996 memset(pL2TblEntry, 0, sizeof(*pL2TblEntry));
997
998 ASMAtomicBitClear(pL2Chunk->pbmAlloc, idxEntry);
999 ASMAtomicIncU32(&pL2Chunk->cFree);
1000}
1001
1002
1003/**
1004 * Sets the enabled flag of the given breakpoint to the given value.
1005 *
1006 * @returns nothing.
1007 * @param pBp The breakpoint to set the state.
1008 * @param fEnabled Enabled status.
1009 */
1010DECLINLINE(void) dbgfR3BpSetEnabled(PDBGFBPINT pBp, bool fEnabled)
1011{
1012 if (fEnabled)
1013 pBp->Pub.fFlags |= DBGF_BP_F_ENABLED;
1014 else
1015 pBp->Pub.fFlags &= ~DBGF_BP_F_ENABLED;
1016}
1017
1018
1019/**
1020 * Assigns a hardware breakpoint state to the given register breakpoint.
1021 *
1022 * @returns VBox status code.
1023 * @param pVM The cross-context VM structure pointer.
1024 * @param hBp The breakpoint handle to assign.
1025 * @param pBp The internal breakpoint state.
1026 *
1027 * @thread Any thread.
1028 */
1029static int dbgfR3BpRegAssign(PVM pVM, DBGFBP hBp, PDBGFBPINT pBp)
1030{
1031 AssertReturn(pBp->Pub.u.Reg.iReg == UINT8_MAX, VERR_DBGF_BP_IPE_3);
1032
1033 for (uint8_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
1034 {
1035 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
1036
1037 AssertCompileSize(DBGFBP, sizeof(uint32_t));
1038 if (ASMAtomicCmpXchgU32(&pHwBp->hBp, hBp, NIL_DBGFBP))
1039 {
1040 pHwBp->GCPtr = pBp->Pub.u.Reg.GCPtr;
1041 pHwBp->fType = pBp->Pub.u.Reg.fType;
1042 pHwBp->cb = pBp->Pub.u.Reg.cb;
1043 pHwBp->fEnabled = DBGF_BP_PUB_IS_ENABLED(&pBp->Pub);
1044
1045 pBp->Pub.u.Reg.iReg = i;
1046 return VINF_SUCCESS;
1047 }
1048 }
1049
1050 return VERR_DBGF_NO_MORE_BP_SLOTS;
1051}
1052
1053
1054/**
1055 * Removes the assigned hardware breakpoint state from the given register breakpoint.
1056 *
1057 * @returns VBox status code.
1058 * @param pVM The cross-context VM structure pointer.
1059 * @param hBp The breakpoint handle to remove.
1060 * @param pBp The internal breakpoint state.
1061 *
1062 * @thread Any thread.
1063 */
1064static int dbgfR3BpRegRemove(PVM pVM, DBGFBP hBp, PDBGFBPINT pBp)
1065{
1066 AssertReturn(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints), VERR_DBGF_BP_IPE_3);
1067
1068 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
1069 AssertReturn(pHwBp->hBp == hBp, VERR_DBGF_BP_IPE_4);
1070 AssertReturn(!pHwBp->fEnabled, VERR_DBGF_BP_IPE_5);
1071
1072 pHwBp->GCPtr = 0;
1073 pHwBp->fType = 0;
1074 pHwBp->cb = 0;
1075 ASMCompilerBarrier();
1076
1077 ASMAtomicWriteU32(&pHwBp->hBp, NIL_DBGFBP);
1078 return VINF_SUCCESS;
1079}
1080
1081
1082/**
1083 * Returns the pointer to the L2 table entry from the given index.
1084 *
1085 * @returns Current context pointer to the L2 table entry or NULL if the provided index value is invalid.
1086 * @param pUVM The user mode VM handle.
1087 * @param idxL2 The L2 table index to resolve.
1088 *
1089 * @note The content of the resolved L2 table entry is not validated!.
1090 */
1091DECLINLINE(PDBGFBPL2ENTRY) dbgfR3BpL2GetByIdx(PUVM pUVM, uint32_t idxL2)
1092{
1093 uint32_t idChunk = DBGF_BP_L2_IDX_GET_CHUNK_ID(idxL2);
1094 uint32_t idxEntry = DBGF_BP_L2_IDX_GET_ENTRY(idxL2);
1095
1096 AssertReturn(idChunk < DBGF_BP_L2_TBL_CHUNK_COUNT, NULL);
1097 AssertReturn(idxEntry < DBGF_BP_L2_TBL_ENTRIES_PER_CHUNK, NULL);
1098
1099 PDBGFBPL2TBLCHUNKR3 pL2Chunk = &pUVM->dbgf.s.aBpL2TblChunks[idChunk];
1100 AssertPtrReturn(pL2Chunk->pbmAlloc, NULL);
1101 AssertReturn(ASMBitTest(pL2Chunk->pbmAlloc, idxEntry), NULL);
1102
1103 return &pL2Chunk->CTX_SUFF(pL2Base)[idxEntry];
1104}
1105
1106
1107/**
1108 * Creates a binary search tree with the given root and leaf nodes.
1109 *
1110 * @returns VBox status code.
1111 * @param pUVM The user mode VM handle.
1112 * @param idxL1 The index into the L1 table where the created tree should be linked into.
1113 * @param u32EntryOld The old entry in the L1 table used to compare with in the atomic update.
1114 * @param hBpRoot The root node DBGF handle to assign.
1115 * @param GCPtrRoot The root nodes GC pointer to use as a key.
1116 * @param hBpLeaf The leafs node DBGF handle to assign.
1117 * @param GCPtrLeaf The leafs node GC pointer to use as a key.
1118 */
1119static int dbgfR3BpInt3L2BstCreate(PUVM pUVM, uint32_t idxL1, uint32_t u32EntryOld,
1120 DBGFBP hBpRoot, RTGCUINTPTR GCPtrRoot,
1121 DBGFBP hBpLeaf, RTGCUINTPTR GCPtrLeaf)
1122{
1123 AssertReturn(GCPtrRoot != GCPtrLeaf, VERR_DBGF_BP_IPE_9);
1124 Assert(DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtrRoot) == DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtrLeaf));
1125
1126 /* Allocate two nodes. */
1127 uint32_t idxL2Root = 0;
1128 PDBGFBPL2ENTRY pL2Root = NULL;
1129 int rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Root, &pL2Root);
1130 if (RT_SUCCESS(rc))
1131 {
1132 uint32_t idxL2Leaf = 0;
1133 PDBGFBPL2ENTRY pL2Leaf = NULL;
1134 rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Leaf, &pL2Leaf);
1135 if (RT_SUCCESS(rc))
1136 {
1137 dbgfBpL2TblEntryInit(pL2Leaf, hBpLeaf, GCPtrLeaf, DBGF_BP_L2_ENTRY_IDX_END, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
1138 if (GCPtrLeaf < GCPtrRoot)
1139 dbgfBpL2TblEntryInit(pL2Root, hBpRoot, GCPtrRoot, idxL2Leaf, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
1140 else
1141 dbgfBpL2TblEntryInit(pL2Root, hBpRoot, GCPtrRoot, DBGF_BP_L2_ENTRY_IDX_END, idxL2Leaf, 0 /*iDepth*/);
1142
1143 uint32_t const u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_L2_IDX(idxL2Root);
1144 if (ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], u32Entry, u32EntryOld))
1145 return VINF_SUCCESS;
1146
1147 /* The L1 entry has changed due to another thread racing us during insertion, free nodes and try again. */
1148 dbgfR3BpL2TblEntryFree(pUVM, idxL2Leaf, pL2Leaf);
1149 rc = VINF_TRY_AGAIN;
1150 }
1151
1152 dbgfR3BpL2TblEntryFree(pUVM, idxL2Root, pL2Root);
1153 }
1154
1155 return rc;
1156}
1157
1158
1159/**
1160 * Inserts the given breakpoint handle into an existing binary search tree.
1161 *
1162 * @returns VBox status code.
1163 * @param pUVM The user mode VM handle.
1164 * @param idxL2Root The index of the tree root in the L2 table.
1165 * @param hBp The node DBGF handle to insert.
1166 * @param GCPtr The nodes GC pointer to use as a key.
1167 */
1168static int dbgfR3BpInt2L2BstNodeInsert(PUVM pUVM, uint32_t idxL2Root, DBGFBP hBp, RTGCUINTPTR GCPtr)
1169{
1170 GCPtr = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
1171
1172 /* Allocate a new node first. */
1173 uint32_t idxL2Nd = 0;
1174 PDBGFBPL2ENTRY pL2Nd = NULL;
1175 int rc = dbgfR3BpL2TblEntryAlloc(pUVM, &idxL2Nd, &pL2Nd);
1176 if (RT_SUCCESS(rc))
1177 {
1178 /* Walk the tree and find the correct node to insert to. */
1179 PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Root);
1180 while (RT_LIKELY(pL2Entry))
1181 {
1182 /* Make a copy of the entry. */
1183 DBGFBPL2ENTRY L2Entry;
1184 L2Entry.u64GCPtrKeyAndBpHnd1 = ASMAtomicReadU64(&pL2Entry->u64GCPtrKeyAndBpHnd1);
1185 L2Entry.u64LeftRightIdxDepthBpHnd2 = ASMAtomicReadU64(&pL2Entry->u64LeftRightIdxDepthBpHnd2);
1186
1187 RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(L2Entry.u64GCPtrKeyAndBpHnd1);
1188 AssertBreak(GCPtr != GCPtrL2Entry);
1189
1190 /* Not found, get to the next level. */
1191 uint32_t idxL2Next = GCPtr < GCPtrL2Entry
1192 ? DBGF_BP_L2_ENTRY_GET_IDX_LEFT(L2Entry.u64LeftRightIdxDepthBpHnd2)
1193 : DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(L2Entry.u64LeftRightIdxDepthBpHnd2);
1194 if (idxL2Next == DBGF_BP_L2_ENTRY_IDX_END)
1195 {
1196 /* Insert the new node here. */
1197 dbgfBpL2TblEntryInit(pL2Nd, hBp, GCPtr, DBGF_BP_L2_ENTRY_IDX_END, DBGF_BP_L2_ENTRY_IDX_END, 0 /*iDepth*/);
1198 if (GCPtr < GCPtrL2Entry)
1199 dbgfBpL2TblEntryUpdateLeft(pL2Entry, idxL2Next, 0 /*iDepth*/);
1200 else
1201 dbgfBpL2TblEntryUpdateRight(pL2Entry, idxL2Next, 0 /*iDepth*/);
1202 return VINF_SUCCESS;
1203 }
1204
1205 pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Next);
1206 }
1207
1208 dbgfR3BpL2TblEntryFree(pUVM, idxL2Nd, pL2Nd);
1209 rc = VERR_DBGF_BP_L2_LOOKUP_FAILED;
1210 }
1211
1212 return rc;
1213}
1214
1215
1216/**
1217 * Adds the given breakpoint handle keyed with the GC pointer to the proper L2 binary search tree
1218 * possibly creating a new tree.
1219 *
1220 * @returns VBox status code.
1221 * @param pUVM The user mode VM handle.
1222 * @param idxL1 The index into the L1 table the breakpoint uses.
1223 * @param hBp The breakpoint handle which is to be added.
1224 * @param GCPtr The GC pointer the breakpoint is keyed with.
1225 */
1226static int dbgfR3BpInt3L2BstNodeAdd(PUVM pUVM, uint32_t idxL1, DBGFBP hBp, RTGCUINTPTR GCPtr)
1227{
1228 int rc = RTSemFastMutexRequest(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc);
1229
1230 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]); /* Re-read, could get raced by a remove operation. */
1231 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
1232 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
1233 {
1234 /* Create a new search tree, gather the necessary information first. */
1235 DBGFBP hBp2 = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32Entry);
1236 PDBGFBPINT pBp2 = dbgfR3BpGetByHnd(pUVM, hBp2);
1237 AssertStmt(RT_VALID_PTR(pBp2), rc = VERR_DBGF_BP_IPE_7);
1238 if (RT_SUCCESS(rc))
1239 rc = dbgfR3BpInt3L2BstCreate(pUVM, idxL1, u32Entry, hBp, GCPtr, hBp2, pBp2->Pub.u.Int3.GCPtr);
1240 }
1241 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
1242 rc = dbgfR3BpInt2L2BstNodeInsert(pUVM, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry), hBp, GCPtr);
1243
1244 int rc2 = RTSemFastMutexRelease(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc2);
1245 return rc;
1246}
1247
1248
1249/**
1250 * Gets the leftmost from the given tree node start index.
1251 *
1252 * @returns VBox status code.
1253 * @param pUVM The user mode VM handle.
1254 * @param idxL2Start The start index to walk from.
1255 * @param pidxL2Leftmost Where to store the L2 table index of the leftmost entry.
1256 * @param ppL2NdLeftmost Where to store the pointer to the leftmost L2 table entry.
1257 * @param pidxL2NdLeftParent Where to store the L2 table index of the leftmost entries parent.
1258 * @param ppL2NdLeftParent Where to store the pointer to the leftmost L2 table entries parent.
1259 */
1260static int dbgfR33BpInt3BstGetLeftmostEntryFromNode(PUVM pUVM, uint32_t idxL2Start,
1261 uint32_t *pidxL2Leftmost, PDBGFBPL2ENTRY *ppL2NdLeftmost,
1262 uint32_t *pidxL2NdLeftParent, PDBGFBPL2ENTRY *ppL2NdLeftParent)
1263{
1264 uint32_t idxL2Parent = DBGF_BP_L2_ENTRY_IDX_END;
1265 PDBGFBPL2ENTRY pL2NdParent = NULL;
1266
1267 for (;;)
1268 {
1269 PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Start);
1270 AssertPtr(pL2Entry);
1271
1272 uint32_t idxL2Left = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
1273 if (idxL2Start == DBGF_BP_L2_ENTRY_IDX_END)
1274 {
1275 *pidxL2Leftmost = idxL2Start;
1276 *ppL2NdLeftmost = pL2Entry;
1277 *pidxL2NdLeftParent = idxL2Parent;
1278 *ppL2NdLeftParent = pL2NdParent;
1279 break;
1280 }
1281
1282 idxL2Parent = idxL2Start;
1283 idxL2Start = idxL2Left;
1284 pL2NdParent = pL2Entry;
1285 }
1286
1287 return VINF_SUCCESS;
1288}
1289
1290
1291/**
1292 * Removes the given node rearranging the tree.
1293 *
1294 * @returns VBox status code.
1295 * @param pUVM The user mode VM handle.
1296 * @param idxL1 The index into the L1 table pointing to the binary search tree containing the node.
1297 * @param idxL2Root The L2 table index where the tree root is located.
1298 * @param idxL2Nd The node index to remove.
1299 * @param pL2Nd The L2 table entry to remove.
1300 * @param idxL2NdParent The parents index, can be DBGF_BP_L2_ENTRY_IDX_END if the root is about to be removed.
1301 * @param pL2NdParent The parents L2 table entry, can be NULL if the root is about to be removed.
1302 * @param fLeftChild Flag whether the node is the left child of the parent or the right one.
1303 */
1304static int dbgfR3BpInt3BstNodeRemove(PUVM pUVM, uint32_t idxL1, uint32_t idxL2Root,
1305 uint32_t idxL2Nd, PDBGFBPL2ENTRY pL2Nd,
1306 uint32_t idxL2NdParent, PDBGFBPL2ENTRY pL2NdParent,
1307 bool fLeftChild)
1308{
1309 /*
1310 * If there are only two nodes remaining the tree will get destroyed and the
1311 * L1 entry will be converted to the direct handle type.
1312 */
1313 uint32_t idxL2Left = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
1314 uint32_t idxL2Right = DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
1315
1316 Assert(idxL2NdParent != DBGF_BP_L2_ENTRY_IDX_END || !pL2NdParent); RT_NOREF(idxL2NdParent);
1317 uint32_t idxL2ParentNew = DBGF_BP_L2_ENTRY_IDX_END;
1318 if (idxL2Right == DBGF_BP_L2_ENTRY_IDX_END)
1319 idxL2ParentNew = idxL2Left;
1320 else
1321 {
1322 /* Find the leftmost entry of the right subtree and move it to the to be removed nodes location in the tree. */
1323 PDBGFBPL2ENTRY pL2NdLeftmostParent = NULL;
1324 PDBGFBPL2ENTRY pL2NdLeftmost = NULL;
1325 uint32_t idxL2NdLeftmostParent = DBGF_BP_L2_ENTRY_IDX_END;
1326 uint32_t idxL2Leftmost = DBGF_BP_L2_ENTRY_IDX_END;
1327 int rc = dbgfR33BpInt3BstGetLeftmostEntryFromNode(pUVM, idxL2Right, &idxL2Leftmost ,&pL2NdLeftmost,
1328 &idxL2NdLeftmostParent, &pL2NdLeftmostParent);
1329 AssertRCReturn(rc, rc);
1330
1331 if (pL2NdLeftmostParent)
1332 {
1333 /* Rearrange the leftmost entries parents pointer. */
1334 dbgfBpL2TblEntryUpdateLeft(pL2NdLeftmostParent, DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2NdLeftmost->u64LeftRightIdxDepthBpHnd2), 0 /*iDepth*/);
1335 dbgfBpL2TblEntryUpdateRight(pL2NdLeftmost, idxL2Right, 0 /*iDepth*/);
1336 }
1337
1338 dbgfBpL2TblEntryUpdateLeft(pL2NdLeftmost, idxL2Left, 0 /*iDepth*/);
1339
1340 /* Update the remove nodes parent to point to the new node. */
1341 idxL2ParentNew = idxL2Leftmost;
1342 }
1343
1344 if (pL2NdParent)
1345 {
1346 /* Asssign the new L2 index to proper parents left or right pointer. */
1347 if (fLeftChild)
1348 dbgfBpL2TblEntryUpdateLeft(pL2NdParent, idxL2ParentNew, 0 /*iDepth*/);
1349 else
1350 dbgfBpL2TblEntryUpdateRight(pL2NdParent, idxL2ParentNew, 0 /*iDepth*/);
1351 }
1352 else
1353 {
1354 /* The root node is removed, set the new root in the L1 table. */
1355 Assert(idxL2ParentNew != DBGF_BP_L2_ENTRY_IDX_END);
1356 idxL2Root = idxL2ParentNew;
1357 ASMAtomicXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_CREATE_L2_IDX(idxL2Left));
1358 }
1359
1360 /* Free the node. */
1361 dbgfR3BpL2TblEntryFree(pUVM, idxL2Nd, pL2Nd);
1362
1363 /*
1364 * Check whether the old/new root is the only node remaining and convert the L1
1365 * table entry to a direct breakpoint handle one in that case.
1366 */
1367 pL2Nd = dbgfR3BpL2GetByIdx(pUVM, idxL2Root);
1368 AssertPtr(pL2Nd);
1369 if ( DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2) == DBGF_BP_L2_ENTRY_IDX_END
1370 && DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2) == DBGF_BP_L2_ENTRY_IDX_END)
1371 {
1372 DBGFBP hBp = DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Nd->u64GCPtrKeyAndBpHnd1, pL2Nd->u64LeftRightIdxDepthBpHnd2);
1373 dbgfR3BpL2TblEntryFree(pUVM, idxL2Root, pL2Nd);
1374 ASMAtomicXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp));
1375 }
1376
1377 return VINF_SUCCESS;
1378}
1379
1380
1381/**
1382 * Removes the given breakpoint handle keyed with the GC pointer from the L2 binary search tree
1383 * pointed to by the given L2 root index.
1384 *
1385 * @returns VBox status code.
1386 * @param pUVM The user mode VM handle.
1387 * @param idxL1 The index into the L1 table pointing to the binary search tree.
1388 * @param idxL2Root The L2 table index where the tree root is located.
1389 * @param hBp The breakpoint handle which is to be removed.
1390 * @param GCPtr The GC pointer the breakpoint is keyed with.
1391 */
1392static int dbgfR3BpInt3L2BstRemove(PUVM pUVM, uint32_t idxL1, uint32_t idxL2Root, DBGFBP hBp, RTGCUINTPTR GCPtr)
1393{
1394 GCPtr = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
1395
1396 int rc = RTSemFastMutexRequest(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc);
1397
1398 uint32_t idxL2Cur = idxL2Root;
1399 uint32_t idxL2Parent = DBGF_BP_L2_ENTRY_IDX_END;
1400 bool fLeftChild = false;
1401 PDBGFBPL2ENTRY pL2EntryParent = NULL;
1402 for (;;)
1403 {
1404 PDBGFBPL2ENTRY pL2Entry = dbgfR3BpL2GetByIdx(pUVM, idxL2Cur);
1405 AssertPtr(pL2Entry);
1406
1407 /* Check whether this node is to be removed.. */
1408 RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(pL2Entry->u64GCPtrKeyAndBpHnd1);
1409 if (GCPtrL2Entry == GCPtr)
1410 {
1411 Assert(DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Entry->u64GCPtrKeyAndBpHnd1, pL2Entry->u64LeftRightIdxDepthBpHnd2) == hBp); RT_NOREF(hBp);
1412
1413 rc = dbgfR3BpInt3BstNodeRemove(pUVM, idxL1, idxL2Root, idxL2Cur, pL2Entry, idxL2Parent, pL2EntryParent, fLeftChild);
1414 break;
1415 }
1416
1417 pL2EntryParent = pL2Entry;
1418 idxL2Parent = idxL2Cur;
1419
1420 if (GCPtrL2Entry < GCPtr)
1421 {
1422 fLeftChild = true;
1423 idxL2Cur = DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
1424 }
1425 else
1426 {
1427 fLeftChild = false;
1428 idxL2Cur = DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Entry->u64LeftRightIdxDepthBpHnd2);
1429 }
1430
1431 AssertBreakStmt(idxL2Cur != DBGF_BP_L2_ENTRY_IDX_END, rc = VERR_DBGF_BP_L2_LOOKUP_FAILED);
1432 }
1433
1434 int rc2 = RTSemFastMutexRelease(pUVM->dbgf.s.hMtxBpL2Wr); AssertRC(rc2);
1435
1436 return rc;
1437}
1438
1439
1440/**
1441 * Adds the given int3 breakpoint to the appropriate lookup tables.
1442 *
1443 * @returns VBox status code.
1444 * @param pUVM The user mode VM handle.
1445 * @param hBp The breakpoint handle to add.
1446 * @param pBp The internal breakpoint state.
1447 */
1448static int dbgfR3BpInt3Add(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1449{
1450 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_INT3, VERR_DBGF_BP_IPE_3);
1451
1452 int rc = VINF_SUCCESS;
1453 uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(pBp->Pub.u.Int3.GCPtr);
1454 uint8_t cTries = 16;
1455
1456 while (cTries--)
1457 {
1458 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1459 if (u32Entry == DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
1460 {
1461 /*
1462 * No breakpoint assigned so far for this entry, create an entry containing
1463 * the direct breakpoint handle and try to exchange it atomically.
1464 */
1465 u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp);
1466 if (ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], u32Entry, DBGF_BP_INT3_L1_ENTRY_TYPE_NULL))
1467 break;
1468 }
1469 else
1470 {
1471 rc = dbgfR3BpInt3L2BstNodeAdd(pUVM, idxL1, hBp, pBp->Pub.u.Int3.GCPtr);
1472 if (rc != VINF_TRY_AGAIN)
1473 break;
1474 }
1475 }
1476
1477 if ( RT_SUCCESS(rc)
1478 && !cTries) /* Too much contention, abort with an error. */
1479 rc = VERR_DBGF_BP_INT3_ADD_TRIES_REACHED;
1480
1481 return rc;
1482}
1483
1484
1485/**
1486 * Adds the given port I/O breakpoint to the appropriate lookup tables.
1487 *
1488 * @returns VBox status code.
1489 * @param pUVM The user mode VM handle.
1490 * @param hBp The breakpoint handle to add.
1491 * @param pBp The internal breakpoint state.
1492 */
1493static int dbgfR3BpPortIoAdd(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1494{
1495 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_PORT_IO, VERR_DBGF_BP_IPE_3);
1496
1497 uint16_t uPortExcl = pBp->Pub.u.PortIo.uPort + pBp->Pub.u.PortIo.cPorts;
1498 uint32_t u32Entry = DBGF_BP_INT3_L1_ENTRY_CREATE_BP_HND(hBp);
1499 for (uint16_t idxPort = pBp->Pub.u.PortIo.uPort; idxPort < uPortExcl; idxPort++)
1500 {
1501 bool fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], u32Entry, DBGF_BP_INT3_L1_ENTRY_TYPE_NULL);
1502 if (!fXchg)
1503 {
1504 /* Something raced us, so roll back the other registrations. */
1505 while (idxPort > pBp->Pub.u.PortIo.uPort)
1506 {
1507 fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry);
1508 Assert(fXchg); RT_NOREF(fXchg);
1509 }
1510
1511 return VERR_DBGF_BP_INT3_ADD_TRIES_REACHED; /** @todo New status code */
1512 }
1513 }
1514
1515 return VINF_SUCCESS;
1516}
1517
1518
1519/**
1520 * Get a breakpoint give by address.
1521 *
1522 * @returns The breakpoint handle on success or NIL_DBGF if not found.
1523 * @param pUVM The user mode VM handle.
1524 * @param enmType The breakpoint type.
1525 * @param GCPtr The breakpoint address.
1526 * @param ppBp Where to store the pointer to the internal breakpoint state on success, optional.
1527 */
1528static DBGFBP dbgfR3BpGetByAddr(PUVM pUVM, DBGFBPTYPE enmType, RTGCUINTPTR GCPtr, PDBGFBPINT *ppBp)
1529{
1530 DBGFBP hBp = NIL_DBGFBP;
1531
1532 switch (enmType)
1533 {
1534 case DBGFBPTYPE_REG:
1535 {
1536 PVM pVM = pUVM->pVM;
1537 VM_ASSERT_VALID_EXT_RETURN(pVM, NIL_DBGFBP);
1538
1539 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); i++)
1540 {
1541 PDBGFBPHW pHwBp = &pVM->dbgf.s.aHwBreakpoints[i];
1542
1543 AssertCompileSize(DBGFBP, sizeof(uint32_t));
1544 DBGFBP hBpTmp = ASMAtomicReadU32(&pHwBp->hBp);
1545 if ( pHwBp->GCPtr == GCPtr
1546 && hBpTmp != NIL_DBGFBP)
1547 {
1548 hBp = hBpTmp;
1549 break;
1550 }
1551 }
1552 break;
1553 }
1554
1555 case DBGFBPTYPE_INT3:
1556 {
1557 const uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(GCPtr);
1558 const uint32_t u32L1Entry = ASMAtomicReadU32(&pUVM->dbgf.s.CTX_SUFF(paBpLocL1)[idxL1]);
1559
1560 if (u32L1Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
1561 {
1562 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32L1Entry);
1563 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
1564 hBp = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32L1Entry);
1565 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
1566 {
1567 RTGCUINTPTR GCPtrKey = DBGF_BP_INT3_L2_KEY_EXTRACT_FROM_ADDR(GCPtr);
1568 PDBGFBPL2ENTRY pL2Nd = dbgfR3BpL2GetByIdx(pUVM, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32L1Entry));
1569
1570 for (;;)
1571 {
1572 AssertPtr(pL2Nd);
1573
1574 RTGCUINTPTR GCPtrL2Entry = DBGF_BP_L2_ENTRY_GET_GCPTR(pL2Nd->u64GCPtrKeyAndBpHnd1);
1575 if (GCPtrKey == GCPtrL2Entry)
1576 {
1577 hBp = DBGF_BP_L2_ENTRY_GET_BP_HND(pL2Nd->u64GCPtrKeyAndBpHnd1, pL2Nd->u64LeftRightIdxDepthBpHnd2);
1578 break;
1579 }
1580
1581 /* Not found, get to the next level. */
1582 uint32_t idxL2Next = GCPtrKey < GCPtrL2Entry
1583 ? DBGF_BP_L2_ENTRY_GET_IDX_LEFT(pL2Nd->u64LeftRightIdxDepthBpHnd2)
1584 : DBGF_BP_L2_ENTRY_GET_IDX_RIGHT(pL2Nd->u64LeftRightIdxDepthBpHnd2);
1585 /* Address not found if the entry denotes the end. */
1586 if (idxL2Next == DBGF_BP_L2_ENTRY_IDX_END)
1587 break;
1588
1589 pL2Nd = dbgfR3BpL2GetByIdx(pUVM, idxL2Next);
1590 }
1591 }
1592 }
1593 break;
1594 }
1595
1596 default:
1597 AssertMsgFailed(("enmType=%d\n", enmType));
1598 break;
1599 }
1600
1601 if ( hBp != NIL_DBGFBP
1602 && ppBp)
1603 *ppBp = dbgfR3BpGetByHnd(pUVM, hBp);
1604 return hBp;
1605}
1606
1607
1608/**
1609 * Get a port I/O breakpoint given by the range.
1610 *
1611 * @returns The breakpoint handle on success or NIL_DBGF if not found.
1612 * @param pUVM The user mode VM handle.
1613 * @param uPort First port in the range.
1614 * @param cPorts Number of ports in the range.
1615 * @param ppBp Where to store the pointer to the internal breakpoint state on success, optional.
1616 */
1617static DBGFBP dbgfR3BpPortIoGetByRange(PUVM pUVM, RTIOPORT uPort, RTIOPORT cPorts, PDBGFBPINT *ppBp)
1618{
1619 DBGFBP hBp = NIL_DBGFBP;
1620
1621 for (RTIOPORT idxPort = uPort; idxPort < uPort + cPorts; idxPort++)
1622 {
1623 const uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.CTX_SUFF(paBpLocPortIo)[idxPort]);
1624 if (u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL)
1625 {
1626 hBp = DBGF_BP_INT3_L1_ENTRY_GET_BP_HND(u32Entry);
1627 break;
1628 }
1629 }
1630
1631 if ( hBp != NIL_DBGFBP
1632 && ppBp)
1633 *ppBp = dbgfR3BpGetByHnd(pUVM, hBp);
1634 return hBp;
1635}
1636
1637
1638/**
1639 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
1640 */
1641static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpInt3RemoveEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
1642{
1643 DBGFBP hBp = (DBGFBP)(uintptr_t)pvUser;
1644
1645 VMCPU_ASSERT_EMT(pVCpu);
1646 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1647
1648 PUVM pUVM = pVM->pUVM;
1649 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
1650 AssertPtrReturn(pBp, VERR_DBGF_BP_IPE_8);
1651
1652 int rc = VINF_SUCCESS;
1653 if (pVCpu->idCpu == 0)
1654 {
1655 uint16_t idxL1 = DBGF_BP_INT3_L1_IDX_EXTRACT_FROM_ADDR(pBp->Pub.u.Int3.GCPtr);
1656 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1657 AssertReturn(u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, VERR_DBGF_BP_IPE_6);
1658
1659 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
1660 if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND)
1661 {
1662 /* Single breakpoint, just exchange atomically with the null value. */
1663 if (!ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry))
1664 {
1665 /*
1666 * A breakpoint addition must have raced us converting the L1 entry to an L2 index type, re-read
1667 * and remove the node from the created binary search tree.
1668 *
1669 * This works because after the entry was converted to an L2 index it can only be converted back
1670 * to a direct handle by removing one or more nodes which always goes through the fast mutex
1671 * protecting the L2 table. Likewise adding a new breakpoint requires grabbing the mutex as well
1672 * so there is serialization here and the node can be removed safely without having to worry about
1673 * concurrent tree modifications.
1674 */
1675 u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocL1R3[idxL1]);
1676 AssertReturn(DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry) == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX, VERR_DBGF_BP_IPE_9);
1677
1678 rc = dbgfR3BpInt3L2BstRemove(pUVM, idxL1, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry),
1679 hBp, pBp->Pub.u.Int3.GCPtr);
1680 }
1681 }
1682 else if (u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_L2_IDX)
1683 rc = dbgfR3BpInt3L2BstRemove(pUVM, idxL1, DBGF_BP_INT3_L1_ENTRY_GET_L2_IDX(u32Entry),
1684 hBp, pBp->Pub.u.Int3.GCPtr);
1685 }
1686
1687 return rc;
1688}
1689
1690
1691/**
1692 * Removes the given int3 breakpoint from all lookup tables.
1693 *
1694 * @returns VBox status code.
1695 * @param pUVM The user mode VM handle.
1696 * @param hBp The breakpoint handle to remove.
1697 * @param pBp The internal breakpoint state.
1698 */
1699static int dbgfR3BpInt3Remove(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1700{
1701 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_INT3, VERR_DBGF_BP_IPE_3);
1702
1703 /*
1704 * This has to be done by an EMT rendezvous in order to not have an EMT traversing
1705 * any L2 trees while it is being removed.
1706 */
1707 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpInt3RemoveEmtWorker, (void *)(uintptr_t)hBp);
1708}
1709
1710
1711/**
1712 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
1713 */
1714static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpPortIoRemoveEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
1715{
1716 DBGFBP hBp = (DBGFBP)(uintptr_t)pvUser;
1717
1718 VMCPU_ASSERT_EMT(pVCpu);
1719 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1720
1721 PUVM pUVM = pVM->pUVM;
1722 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
1723 AssertPtrReturn(pBp, VERR_DBGF_BP_IPE_8);
1724
1725 int rc = VINF_SUCCESS;
1726 if (pVCpu->idCpu == 0)
1727 {
1728 /*
1729 * Remove the whole range, there shouldn't be any other breakpoint configured for this range as this is not
1730 * allowed right now.
1731 */
1732 uint16_t uPortExcl = pBp->Pub.u.PortIo.uPort + pBp->Pub.u.PortIo.cPorts;
1733 for (uint16_t idxPort = pBp->Pub.u.PortIo.uPort; idxPort < uPortExcl; idxPort++)
1734 {
1735 uint32_t u32Entry = ASMAtomicReadU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort]);
1736 AssertReturn(u32Entry != DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, VERR_DBGF_BP_IPE_6);
1737
1738 uint8_t u8Type = DBGF_BP_INT3_L1_ENTRY_GET_TYPE(u32Entry);
1739 AssertReturn(u8Type == DBGF_BP_INT3_L1_ENTRY_TYPE_BP_HND, VERR_DBGF_BP_IPE_7);
1740
1741 bool fXchg = ASMAtomicCmpXchgU32(&pUVM->dbgf.s.paBpLocPortIoR3[idxPort], DBGF_BP_INT3_L1_ENTRY_TYPE_NULL, u32Entry);
1742 Assert(fXchg); RT_NOREF(fXchg);
1743 }
1744 }
1745
1746 return rc;
1747}
1748
1749
1750/**
1751 * Removes the given port I/O breakpoint from all lookup tables.
1752 *
1753 * @returns VBox status code.
1754 * @param pUVM The user mode VM handle.
1755 * @param hBp The breakpoint handle to remove.
1756 * @param pBp The internal breakpoint state.
1757 */
1758static int dbgfR3BpPortIoRemove(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1759{
1760 AssertReturn(DBGF_BP_PUB_GET_TYPE(&pBp->Pub) == DBGFBPTYPE_PORT_IO, VERR_DBGF_BP_IPE_3);
1761
1762 /*
1763 * This has to be done by an EMT rendezvous in order to not have an EMT accessing
1764 * the breakpoint while it is removed.
1765 */
1766 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpPortIoRemoveEmtWorker, (void *)(uintptr_t)hBp);
1767}
1768
1769
1770/**
1771 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
1772 */
1773static DECLCALLBACK(VBOXSTRICTRC) dbgfR3BpRegRecalcOnCpu(PVM pVM, PVMCPU pVCpu, void *pvUser)
1774{
1775 RT_NOREF(pvUser);
1776
1777 /*
1778 * CPU 0 updates the enabled hardware breakpoint counts.
1779 */
1780 if (pVCpu->idCpu == 0)
1781 {
1782 pVM->dbgf.s.cEnabledHwBreakpoints = 0;
1783 pVM->dbgf.s.cEnabledHwIoBreakpoints = 0;
1784
1785 for (uint32_t iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++)
1786 {
1787 if (pVM->dbgf.s.aHwBreakpoints[iBp].fEnabled)
1788 {
1789 pVM->dbgf.s.cEnabledHwBreakpoints += 1;
1790 pVM->dbgf.s.cEnabledHwIoBreakpoints += pVM->dbgf.s.aHwBreakpoints[iBp].fType == X86_DR7_RW_IO;
1791 }
1792 }
1793 }
1794
1795 return CPUMRecalcHyperDRx(pVCpu, UINT8_MAX);
1796}
1797
1798
1799/**
1800 * Arms the given breakpoint.
1801 *
1802 * @returns VBox status code.
1803 * @param pUVM The user mode VM handle.
1804 * @param hBp The breakpoint handle to arm.
1805 * @param pBp The internal breakpoint state pointer for the handle.
1806 *
1807 * @thread Any thread.
1808 */
1809static int dbgfR3BpArm(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1810{
1811 int rc;
1812 PVM pVM = pUVM->pVM;
1813
1814 Assert(!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub));
1815 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
1816 {
1817 case DBGFBPTYPE_REG:
1818 {
1819 Assert(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints));
1820 PDBGFBPHW pBpHw = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
1821 Assert(pBpHw->hBp == hBp); RT_NOREF(hBp);
1822
1823 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1824 ASMAtomicWriteBool(&pBpHw->fEnabled, true);
1825 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpRegRecalcOnCpu, NULL);
1826 if (RT_FAILURE(rc))
1827 {
1828 ASMAtomicWriteBool(&pBpHw->fEnabled, false);
1829 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1830 }
1831 break;
1832 }
1833 case DBGFBPTYPE_INT3:
1834 {
1835 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1836
1837 /** @todo When we enable the first int3 breakpoint we should do this in an EMT rendezvous
1838 * as the VMX code intercepts #BP only when at least one int3 breakpoint is enabled.
1839 * A racing vCPU might trigger it and forward it to the guest causing panics/crashes/havoc. */
1840 /*
1841 * Save current byte and write the int3 instruction byte.
1842 */
1843 rc = PGMPhysSimpleReadGCPhys(pVM, &pBp->Pub.u.Int3.bOrg, pBp->Pub.u.Int3.PhysAddr, sizeof(pBp->Pub.u.Int3.bOrg));
1844 if (RT_SUCCESS(rc))
1845 {
1846 static const uint8_t s_bInt3 = 0xcc;
1847 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Int3.PhysAddr, &s_bInt3, sizeof(s_bInt3));
1848 if (RT_SUCCESS(rc))
1849 {
1850 ASMAtomicIncU32(&pVM->dbgf.s.cEnabledInt3Breakpoints);
1851 Log(("DBGF: Set breakpoint at %RGv (Phys %RGp)\n", pBp->Pub.u.Int3.GCPtr, pBp->Pub.u.Int3.PhysAddr));
1852 }
1853 }
1854
1855 if (RT_FAILURE(rc))
1856 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1857
1858 break;
1859 }
1860 case DBGFBPTYPE_PORT_IO:
1861 {
1862 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1863 ASMAtomicIncU32(&pUVM->dbgf.s.cPortIoBps);
1864 IOMR3NotifyBreakpointCountChange(pVM, true /*fPortIo*/, false /*fMmio*/);
1865 rc = VINF_SUCCESS;
1866 break;
1867 }
1868 case DBGFBPTYPE_MMIO:
1869 rc = VERR_NOT_IMPLEMENTED;
1870 break;
1871 default:
1872 AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
1873 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1874 }
1875
1876 return rc;
1877}
1878
1879
1880/**
1881 * Disarms the given breakpoint.
1882 *
1883 * @returns VBox status code.
1884 * @param pUVM The user mode VM handle.
1885 * @param hBp The breakpoint handle to disarm.
1886 * @param pBp The internal breakpoint state pointer for the handle.
1887 *
1888 * @thread Any thread.
1889 */
1890static int dbgfR3BpDisarm(PUVM pUVM, DBGFBP hBp, PDBGFBPINT pBp)
1891{
1892 int rc;
1893 PVM pVM = pUVM->pVM;
1894
1895 Assert(DBGF_BP_PUB_IS_ENABLED(&pBp->Pub));
1896 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
1897 {
1898 case DBGFBPTYPE_REG:
1899 {
1900 Assert(pBp->Pub.u.Reg.iReg < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints));
1901 PDBGFBPHW pBpHw = &pVM->dbgf.s.aHwBreakpoints[pBp->Pub.u.Reg.iReg];
1902 Assert(pBpHw->hBp == hBp); RT_NOREF(hBp);
1903
1904 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1905 ASMAtomicWriteBool(&pBpHw->fEnabled, false);
1906 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3BpRegRecalcOnCpu, NULL);
1907 if (RT_FAILURE(rc))
1908 {
1909 ASMAtomicWriteBool(&pBpHw->fEnabled, true);
1910 dbgfR3BpSetEnabled(pBp, true /*fEnabled*/);
1911 }
1912 break;
1913 }
1914 case DBGFBPTYPE_INT3:
1915 {
1916 /*
1917 * Check that the current byte is the int3 instruction, and restore the original one.
1918 * We currently ignore invalid bytes.
1919 */
1920 uint8_t bCurrent = 0;
1921 rc = PGMPhysSimpleReadGCPhys(pVM, &bCurrent, pBp->Pub.u.Int3.PhysAddr, sizeof(bCurrent));
1922 if ( RT_SUCCESS(rc)
1923 && bCurrent == 0xcc)
1924 {
1925 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Int3.PhysAddr, &pBp->Pub.u.Int3.bOrg, sizeof(pBp->Pub.u.Int3.bOrg));
1926 if (RT_SUCCESS(rc))
1927 {
1928 ASMAtomicDecU32(&pVM->dbgf.s.cEnabledInt3Breakpoints);
1929 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1930 Log(("DBGF: Removed breakpoint at %RGv (Phys %RGp)\n", pBp->Pub.u.Int3.GCPtr, pBp->Pub.u.Int3.PhysAddr));
1931 }
1932 }
1933 break;
1934 }
1935 case DBGFBPTYPE_PORT_IO:
1936 {
1937 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/);
1938 uint32_t cPortIoBps = ASMAtomicDecU32(&pUVM->dbgf.s.cPortIoBps);
1939 if (!cPortIoBps) /** @todo Need to gather all EMTs to not have a stray EMT accessing BP data when it might go away. */
1940 IOMR3NotifyBreakpointCountChange(pVM, false /*fPortIo*/, false /*fMmio*/);
1941 rc = VINF_SUCCESS;
1942 break;
1943 }
1944 case DBGFBPTYPE_MMIO:
1945 rc = VERR_NOT_IMPLEMENTED;
1946 break;
1947 default:
1948 AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
1949 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1950 }
1951
1952 return rc;
1953}
1954
1955
1956/**
1957 * Worker for DBGFR3BpHit() differnetiating on the breakpoint type.
1958 *
1959 * @returns Strict VBox status code.
1960 * @param pVM The cross context VM structure.
1961 * @param pVCpu The vCPU the breakpoint event happened on.
1962 * @param hBp The breakpoint handle.
1963 * @param pBp The breakpoint data.
1964 * @param pBpOwner The breakpoint owner data.
1965 *
1966 * @thread EMT
1967 */
1968static VBOXSTRICTRC dbgfR3BpHit(PVM pVM, PVMCPU pVCpu, DBGFBP hBp, PDBGFBPINT pBp, PCDBGFBPOWNERINT pBpOwner)
1969{
1970 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1971
1972 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
1973 {
1974 case DBGFBPTYPE_REG:
1975 case DBGFBPTYPE_INT3:
1976 {
1977 if (DBGF_BP_PUB_IS_EXEC_BEFORE(&pBp->Pub))
1978 rcStrict = pBpOwner->pfnBpHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub, DBGF_BP_F_HIT_EXEC_BEFORE);
1979 if (rcStrict == VINF_SUCCESS)
1980 {
1981 uint8_t abInstr[DBGF_BP_INSN_MAX];
1982 RTGCPTR const GCPtrInstr = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
1983 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &abInstr[0], GCPtrInstr, sizeof(abInstr));
1984 AssertRC(rc);
1985 if (RT_SUCCESS(rc))
1986 {
1987 /* Replace the int3 with the original instruction byte. */
1988 abInstr[0] = pBp->Pub.u.Int3.bOrg;
1989 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, GCPtrInstr, &abInstr[0], sizeof(abInstr));
1990 if ( rcStrict == VINF_SUCCESS
1991 && DBGF_BP_PUB_IS_EXEC_AFTER(&pBp->Pub))
1992 {
1993 VBOXSTRICTRC rcStrict2 = pBpOwner->pfnBpHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub,
1994 DBGF_BP_F_HIT_EXEC_AFTER);
1995 if (rcStrict2 == VINF_SUCCESS)
1996 return VBOXSTRICTRC_VAL(rcStrict);
1997 if (rcStrict2 != VINF_DBGF_BP_HALT)
1998 return VERR_DBGF_BP_OWNER_CALLBACK_WRONG_STATUS;
1999 }
2000 else
2001 return VBOXSTRICTRC_VAL(rcStrict);
2002 }
2003 }
2004 break;
2005 }
2006 case DBGFBPTYPE_PORT_IO:
2007 case DBGFBPTYPE_MMIO:
2008 {
2009 pVCpu->dbgf.s.fBpIoActive = false;
2010 rcStrict = pBpOwner->pfnBpIoHitR3(pVM, pVCpu->idCpu, pBp->pvUserR3, hBp, &pBp->Pub,
2011 pVCpu->dbgf.s.fBpIoBefore
2012 ? DBGF_BP_F_HIT_EXEC_BEFORE
2013 : DBGF_BP_F_HIT_EXEC_AFTER,
2014 pVCpu->dbgf.s.fBpIoAccess, pVCpu->dbgf.s.uBpIoAddress,
2015 pVCpu->dbgf.s.uBpIoValue);
2016
2017 break;
2018 }
2019 default:
2020 AssertMsgFailedReturn(("Invalid breakpoint type %d\n", DBGF_BP_PUB_GET_TYPE(&pBp->Pub)),
2021 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
2022 }
2023
2024 return rcStrict;
2025}
2026
2027
2028/**
2029 * Creates a new breakpoint owner returning a handle which can be used when setting breakpoints.
2030 *
2031 * @returns VBox status code.
2032 * @retval VERR_DBGF_BP_OWNER_NO_MORE_HANDLES if there are no more free owner handles available.
2033 * @param pUVM The user mode VM handle.
2034 * @param pfnBpHit The R3 callback which is called when a breakpoint with the owner handle is hit.
2035 * @param pfnBpIoHit The R3 callback which is called when a I/O breakpoint with the owner handle is hit.
2036 * @param phBpOwner Where to store the owner handle on success.
2037 *
2038 * @thread Any thread but might defer work to EMT on the first call.
2039 */
2040VMMR3DECL(int) DBGFR3BpOwnerCreate(PUVM pUVM, PFNDBGFBPHIT pfnBpHit, PFNDBGFBPIOHIT pfnBpIoHit, PDBGFBPOWNER phBpOwner)
2041{
2042 /*
2043 * Validate the input.
2044 */
2045 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2046 AssertReturn(pfnBpHit || pfnBpIoHit, VERR_INVALID_PARAMETER);
2047 AssertPtrReturn(phBpOwner, VERR_INVALID_POINTER);
2048
2049 int rc = dbgfR3BpOwnerEnsureInit(pUVM);
2050 AssertRCReturn(rc ,rc);
2051
2052 /* Try to find a free entry in the owner table. */
2053 for (;;)
2054 {
2055 /* Scan the associated bitmap for a free entry. */
2056 int32_t iClr = ASMBitFirstClear(pUVM->dbgf.s.pbmBpOwnersAllocR3, DBGF_BP_OWNER_COUNT_MAX);
2057 if (iClr != -1)
2058 {
2059 /*
2060 * Try to allocate, we could get raced here as well. In that case
2061 * we try again.
2062 */
2063 if (!ASMAtomicBitTestAndSet(pUVM->dbgf.s.pbmBpOwnersAllocR3, iClr))
2064 {
2065 PDBGFBPOWNERINT pBpOwner = &pUVM->dbgf.s.paBpOwnersR3[iClr];
2066 pBpOwner->cRefs = 1;
2067 pBpOwner->pfnBpHitR3 = pfnBpHit;
2068 pBpOwner->pfnBpIoHitR3 = pfnBpIoHit;
2069
2070 *phBpOwner = (DBGFBPOWNER)iClr;
2071 return VINF_SUCCESS;
2072 }
2073 /* else Retry with another spot. */
2074 }
2075 else /* no free entry in bitmap, out of entries. */
2076 {
2077 rc = VERR_DBGF_BP_OWNER_NO_MORE_HANDLES;
2078 break;
2079 }
2080 }
2081
2082 return rc;
2083}
2084
2085
2086/**
2087 * Destroys the owner identified by the given handle.
2088 *
2089 * @returns VBox status code.
2090 * @retval VERR_INVALID_HANDLE if the given owner handle is invalid.
2091 * @retval VERR_DBGF_OWNER_BUSY if there are still breakpoints set with the given owner handle.
2092 * @param pUVM The user mode VM handle.
2093 * @param hBpOwner The breakpoint owner handle to destroy.
2094 */
2095VMMR3DECL(int) DBGFR3BpOwnerDestroy(PUVM pUVM, DBGFBPOWNER hBpOwner)
2096{
2097 /*
2098 * Validate the input.
2099 */
2100 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2101 AssertReturn(hBpOwner != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2102
2103 int rc = dbgfR3BpOwnerEnsureInit(pUVM);
2104 AssertRCReturn(rc ,rc);
2105
2106 PDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pUVM, hBpOwner);
2107 if (RT_LIKELY(pBpOwner))
2108 {
2109 if (ASMAtomicReadU32(&pBpOwner->cRefs) == 1)
2110 {
2111 pBpOwner->pfnBpHitR3 = NULL;
2112 ASMAtomicDecU32(&pBpOwner->cRefs);
2113 ASMAtomicBitClear(pUVM->dbgf.s.pbmBpOwnersAllocR3, hBpOwner);
2114 }
2115 else
2116 rc = VERR_DBGF_OWNER_BUSY;
2117 }
2118 else
2119 rc = VERR_INVALID_HANDLE;
2120
2121 return rc;
2122}
2123
2124
2125/**
2126 * Sets a breakpoint (int 3 based).
2127 *
2128 * @returns VBox status code.
2129 * @param pUVM The user mode VM handle.
2130 * @param idSrcCpu The ID of the virtual CPU used for the
2131 * breakpoint address resolution.
2132 * @param pAddress The address of the breakpoint.
2133 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2134 * Use 0 (or 1) if it's gonna trigger at once.
2135 * @param iHitDisable The hit count which disables the breakpoint.
2136 * Use ~(uint64_t) if it's never gonna be disabled.
2137 * @param phBp Where to store the breakpoint handle on success.
2138 *
2139 * @thread Any thread.
2140 */
2141VMMR3DECL(int) DBGFR3BpSetInt3(PUVM pUVM, VMCPUID idSrcCpu, PCDBGFADDRESS pAddress,
2142 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2143{
2144 return DBGFR3BpSetInt3Ex(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, idSrcCpu, pAddress,
2145 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
2146}
2147
2148
2149/**
2150 * Sets a breakpoint (int 3 based) - extended version.
2151 *
2152 * @returns VBox status code.
2153 * @param pUVM The user mode VM handle.
2154 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2155 * @param pvUser Opaque user data to pass in the owner callback.
2156 * @param idSrcCpu The ID of the virtual CPU used for the
2157 * breakpoint address resolution.
2158 * @param pAddress The address of the breakpoint.
2159 * @param fFlags Combination of DBGF_BP_F_XXX.
2160 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2161 * Use 0 (or 1) if it's gonna trigger at once.
2162 * @param iHitDisable The hit count which disables the breakpoint.
2163 * Use ~(uint64_t) if it's never gonna be disabled.
2164 * @param phBp Where to store the breakpoint handle on success.
2165 *
2166 * @thread Any thread.
2167 */
2168VMMR3DECL(int) DBGFR3BpSetInt3Ex(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2169 VMCPUID idSrcCpu, PCDBGFADDRESS pAddress, uint16_t fFlags,
2170 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2171{
2172 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2173 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2174 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
2175 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2176 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2177
2178 int rc = dbgfR3BpEnsureInit(pUVM);
2179 AssertRCReturn(rc, rc);
2180
2181 /*
2182 * Translate & save the breakpoint address into a guest-physical address.
2183 */
2184 RTGCPHYS GCPhysBpAddr = NIL_RTGCPHYS;
2185 rc = DBGFR3AddrToPhys(pUVM, idSrcCpu, pAddress, &GCPhysBpAddr);
2186 if (RT_SUCCESS(rc))
2187 {
2188 /*
2189 * The physical address from DBGFR3AddrToPhys() is the start of the page,
2190 * we need the exact byte offset into the page while writing to it in dbgfR3BpInt3Arm().
2191 */
2192 GCPhysBpAddr |= (pAddress->FlatPtr & X86_PAGE_OFFSET_MASK);
2193
2194 PDBGFBPINT pBp = NULL;
2195 DBGFBP hBp = dbgfR3BpGetByAddr(pUVM, DBGFBPTYPE_INT3, pAddress->FlatPtr, &pBp);
2196 if ( hBp != NIL_DBGFBP
2197 && pBp->Pub.u.Int3.PhysAddr == GCPhysBpAddr)
2198 {
2199 rc = VINF_SUCCESS;
2200 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2201 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2202 if (RT_SUCCESS(rc))
2203 {
2204 rc = VINF_DBGF_BP_ALREADY_EXIST;
2205 if (phBp)
2206 *phBp = hBp;
2207 }
2208 return rc;
2209 }
2210
2211 rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_INT3, fFlags, iHitTrigger, iHitDisable, &hBp, &pBp);
2212 if (RT_SUCCESS(rc))
2213 {
2214 pBp->Pub.u.Int3.PhysAddr = GCPhysBpAddr;
2215 pBp->Pub.u.Int3.GCPtr = pAddress->FlatPtr;
2216
2217 /* Add the breakpoint to the lookup tables. */
2218 rc = dbgfR3BpInt3Add(pUVM, hBp, pBp);
2219 if (RT_SUCCESS(rc))
2220 {
2221 /* Enable the breakpoint if requested. */
2222 if (fFlags & DBGF_BP_F_ENABLED)
2223 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2224 if (RT_SUCCESS(rc))
2225 {
2226 *phBp = hBp;
2227 return VINF_SUCCESS;
2228 }
2229
2230 int rc2 = dbgfR3BpInt3Remove(pUVM, hBp, pBp); AssertRC(rc2);
2231 }
2232
2233 dbgfR3BpFree(pUVM, hBp, pBp);
2234 }
2235 }
2236
2237 return rc;
2238}
2239
2240
2241/**
2242 * Sets a register breakpoint.
2243 *
2244 * @returns VBox status code.
2245 * @param pUVM The user mode VM handle.
2246 * @param pAddress The address of the breakpoint.
2247 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2248 * Use 0 (or 1) if it's gonna trigger at once.
2249 * @param iHitDisable The hit count which disables the breakpoint.
2250 * Use ~(uint64_t) if it's never gonna be disabled.
2251 * @param fType The access type (one of the X86_DR7_RW_* defines).
2252 * @param cb The access size - 1,2,4 or 8 (the latter is AMD64 long mode only.
2253 * Must be 1 if fType is X86_DR7_RW_EO.
2254 * @param phBp Where to store the breakpoint handle.
2255 *
2256 * @thread Any thread.
2257 */
2258VMMR3DECL(int) DBGFR3BpSetReg(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger,
2259 uint64_t iHitDisable, uint8_t fType, uint8_t cb, PDBGFBP phBp)
2260{
2261 return DBGFR3BpSetRegEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, pAddress,
2262 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, fType, cb, phBp);
2263}
2264
2265
2266/**
2267 * Sets a register breakpoint - extended version.
2268 *
2269 * @returns VBox status code.
2270 * @param pUVM The user mode VM handle.
2271 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2272 * @param pvUser Opaque user data to pass in the owner callback.
2273 * @param pAddress The address of the breakpoint.
2274 * @param fFlags Combination of DBGF_BP_F_XXX.
2275 * @param iHitTrigger The hit count at which the breakpoint start triggering.
2276 * Use 0 (or 1) if it's gonna trigger at once.
2277 * @param iHitDisable The hit count which disables the breakpoint.
2278 * Use ~(uint64_t) if it's never gonna be disabled.
2279 * @param fType The access type (one of the X86_DR7_RW_* defines).
2280 * @param cb The access size - 1,2,4 or 8 (the latter is AMD64 long mode only.
2281 * Must be 1 if fType is X86_DR7_RW_EO.
2282 * @param phBp Where to store the breakpoint handle.
2283 *
2284 * @thread Any thread.
2285 */
2286VMMR3DECL(int) DBGFR3BpSetRegEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2287 PCDBGFADDRESS pAddress, uint16_t fFlags,
2288 uint64_t iHitTrigger, uint64_t iHitDisable,
2289 uint8_t fType, uint8_t cb, PDBGFBP phBp)
2290{
2291 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2292 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2293 AssertReturn(DBGFR3AddrIsValid(pUVM, pAddress), VERR_INVALID_PARAMETER);
2294 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2295 AssertReturn(cb > 0 && cb <= 8 && RT_IS_POWER_OF_TWO(cb), VERR_INVALID_PARAMETER);
2296 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2297 switch (fType)
2298 {
2299 case X86_DR7_RW_EO:
2300 if (cb == 1)
2301 break;
2302 AssertMsgFailedReturn(("fType=%#x cb=%d != 1\n", fType, cb), VERR_INVALID_PARAMETER);
2303 case X86_DR7_RW_IO:
2304 case X86_DR7_RW_RW:
2305 case X86_DR7_RW_WO:
2306 break;
2307 default:
2308 AssertMsgFailedReturn(("fType=%#x\n", fType), VERR_INVALID_PARAMETER);
2309 }
2310
2311 int rc = dbgfR3BpEnsureInit(pUVM);
2312 AssertRCReturn(rc, rc);
2313
2314 PDBGFBPINT pBp = NULL;
2315 DBGFBP hBp = dbgfR3BpGetByAddr(pUVM, DBGFBPTYPE_REG, pAddress->FlatPtr, &pBp);
2316 if ( hBp != NIL_DBGFBP
2317 && pBp->Pub.u.Reg.cb == cb
2318 && pBp->Pub.u.Reg.fType == fType)
2319 {
2320 rc = VINF_SUCCESS;
2321 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2322 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2323 if (RT_SUCCESS(rc))
2324 {
2325 rc = VINF_DBGF_BP_ALREADY_EXIST;
2326 if (phBp)
2327 *phBp = hBp;
2328 }
2329 return rc;
2330 }
2331
2332 /* Allocate new breakpoint. */
2333 rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_REG, fFlags,
2334 iHitTrigger, iHitDisable, &hBp, &pBp);
2335 if (RT_SUCCESS(rc))
2336 {
2337 pBp->Pub.u.Reg.GCPtr = pAddress->FlatPtr;
2338 pBp->Pub.u.Reg.fType = fType;
2339 pBp->Pub.u.Reg.cb = cb;
2340 pBp->Pub.u.Reg.iReg = UINT8_MAX;
2341 ASMCompilerBarrier();
2342
2343 /* Assign the proper hardware breakpoint. */
2344 rc = dbgfR3BpRegAssign(pUVM->pVM, hBp, pBp);
2345 if (RT_SUCCESS(rc))
2346 {
2347 /* Arm the breakpoint. */
2348 if (fFlags & DBGF_BP_F_ENABLED)
2349 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2350 if (RT_SUCCESS(rc))
2351 {
2352 if (phBp)
2353 *phBp = hBp;
2354 return VINF_SUCCESS;
2355 }
2356
2357 int rc2 = dbgfR3BpRegRemove(pUVM->pVM, hBp, pBp);
2358 AssertRC(rc2); RT_NOREF(rc2);
2359 }
2360
2361 dbgfR3BpFree(pUVM, hBp, pBp);
2362 }
2363
2364 return rc;
2365}
2366
2367
2368/**
2369 * This is only kept for now to not mess with the debugger implementation at this point,
2370 * recompiler breakpoints are not supported anymore (IEM has some API but it isn't implemented
2371 * and should probably be merged with the DBGF breakpoints).
2372 */
2373VMMR3DECL(int) DBGFR3BpSetREM(PUVM pUVM, PCDBGFADDRESS pAddress, uint64_t iHitTrigger,
2374 uint64_t iHitDisable, PDBGFBP phBp)
2375{
2376 RT_NOREF(pUVM, pAddress, iHitTrigger, iHitDisable, phBp);
2377 return VERR_NOT_SUPPORTED;
2378}
2379
2380
2381/**
2382 * Sets an I/O port breakpoint.
2383 *
2384 * @returns VBox status code.
2385 * @param pUVM The user mode VM handle.
2386 * @param uPort The first I/O port.
2387 * @param cPorts The number of I/O ports, see DBGFBPIOACCESS_XXX.
2388 * @param fAccess The access we want to break on.
2389 * @param iHitTrigger The hit count at which the breakpoint start
2390 * triggering. Use 0 (or 1) if it's gonna trigger at
2391 * once.
2392 * @param iHitDisable The hit count which disables the breakpoint.
2393 * Use ~(uint64_t) if it's never gonna be disabled.
2394 * @param phBp Where to store the breakpoint handle.
2395 *
2396 * @thread Any thread.
2397 */
2398VMMR3DECL(int) DBGFR3BpSetPortIo(PUVM pUVM, RTIOPORT uPort, RTIOPORT cPorts, uint32_t fAccess,
2399 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2400{
2401 return DBGFR3BpSetPortIoEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, uPort, cPorts, fAccess,
2402 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
2403}
2404
2405
2406/**
2407 * Sets an I/O port breakpoint - extended version.
2408 *
2409 * @returns VBox status code.
2410 * @param pUVM The user mode VM handle.
2411 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2412 * @param pvUser Opaque user data to pass in the owner callback.
2413 * @param uPort The first I/O port.
2414 * @param cPorts The number of I/O ports, see DBGFBPIOACCESS_XXX.
2415 * @param fAccess The access we want to break on.
2416 * @param fFlags Combination of DBGF_BP_F_XXX.
2417 * @param iHitTrigger The hit count at which the breakpoint start
2418 * triggering. Use 0 (or 1) if it's gonna trigger at
2419 * once.
2420 * @param iHitDisable The hit count which disables the breakpoint.
2421 * Use ~(uint64_t) if it's never gonna be disabled.
2422 * @param phBp Where to store the breakpoint handle.
2423 *
2424 * @thread Any thread.
2425 */
2426VMMR3DECL(int) DBGFR3BpSetPortIoEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2427 RTIOPORT uPort, RTIOPORT cPorts, uint32_t fAccess,
2428 uint32_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2429{
2430 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2431 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2432 AssertReturn(!(fAccess & ~DBGFBPIOACCESS_VALID_MASK_PORT_IO), VERR_INVALID_FLAGS);
2433 AssertReturn(fAccess, VERR_INVALID_FLAGS);
2434 AssertReturn(!(fFlags & ~DBGF_BP_F_VALID_MASK), VERR_INVALID_FLAGS);
2435 AssertReturn(fFlags, VERR_INVALID_FLAGS);
2436 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2437 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2438 AssertReturn(cPorts > 0, VERR_OUT_OF_RANGE);
2439 AssertReturn((RTIOPORT)(uPort + (cPorts - 1)) >= uPort, VERR_OUT_OF_RANGE);
2440
2441 int rc = dbgfR3BpPortIoEnsureInit(pUVM);
2442 AssertRCReturn(rc, rc);
2443
2444 PDBGFBPINT pBp = NULL;
2445 DBGFBP hBp = dbgfR3BpPortIoGetByRange(pUVM, uPort, cPorts, &pBp);
2446 if ( hBp != NIL_DBGFBP
2447 && pBp->Pub.u.PortIo.uPort == uPort
2448 && pBp->Pub.u.PortIo.cPorts == cPorts
2449 && pBp->Pub.u.PortIo.fAccess == fAccess)
2450 {
2451 rc = VINF_SUCCESS;
2452 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2453 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2454 if (RT_SUCCESS(rc))
2455 {
2456 rc = VINF_DBGF_BP_ALREADY_EXIST;
2457 if (phBp)
2458 *phBp = hBp;
2459 }
2460 return rc;
2461 }
2462
2463 rc = dbgfR3BpAlloc(pUVM, hOwner, pvUser, DBGFBPTYPE_PORT_IO, fFlags, iHitTrigger, iHitDisable, &hBp, &pBp);
2464 if (RT_SUCCESS(rc))
2465 {
2466 pBp->Pub.u.PortIo.uPort = uPort;
2467 pBp->Pub.u.PortIo.cPorts = cPorts;
2468 pBp->Pub.u.PortIo.fAccess = fAccess;
2469
2470 /* Add the breakpoint to the lookup tables. */
2471 rc = dbgfR3BpPortIoAdd(pUVM, hBp, pBp);
2472 if (RT_SUCCESS(rc))
2473 {
2474 /* Enable the breakpoint if requested. */
2475 if (fFlags & DBGF_BP_F_ENABLED)
2476 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2477 if (RT_SUCCESS(rc))
2478 {
2479 *phBp = hBp;
2480 return VINF_SUCCESS;
2481 }
2482
2483 int rc2 = dbgfR3BpPortIoRemove(pUVM, hBp, pBp); AssertRC(rc2);
2484 }
2485
2486 dbgfR3BpFree(pUVM, hBp, pBp);
2487 }
2488
2489 return rc;
2490}
2491
2492
2493/**
2494 * Sets a memory mapped I/O breakpoint.
2495 *
2496 * @returns VBox status code.
2497 * @param pUVM The user mode VM handle.
2498 * @param GCPhys The first MMIO address.
2499 * @param cb The size of the MMIO range to break on.
2500 * @param fAccess The access we want to break on.
2501 * @param iHitTrigger The hit count at which the breakpoint start
2502 * triggering. Use 0 (or 1) if it's gonna trigger at
2503 * once.
2504 * @param iHitDisable The hit count which disables the breakpoint.
2505 * Use ~(uint64_t) if it's never gonna be disabled.
2506 * @param phBp Where to store the breakpoint handle.
2507 *
2508 * @thread Any thread.
2509 */
2510VMMR3DECL(int) DBGFR3BpSetMmio(PUVM pUVM, RTGCPHYS GCPhys, uint32_t cb, uint32_t fAccess,
2511 uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2512{
2513 return DBGFR3BpSetMmioEx(pUVM, NIL_DBGFBPOWNER, NULL /*pvUser*/, GCPhys, cb, fAccess,
2514 DBGF_BP_F_DEFAULT, iHitTrigger, iHitDisable, phBp);
2515}
2516
2517
2518/**
2519 * Sets a memory mapped I/O breakpoint - extended version.
2520 *
2521 * @returns VBox status code.
2522 * @param pUVM The user mode VM handle.
2523 * @param hOwner The owner handle, use NIL_DBGFBPOWNER if no special owner attached.
2524 * @param pvUser Opaque user data to pass in the owner callback.
2525 * @param GCPhys The first MMIO address.
2526 * @param cb The size of the MMIO range to break on.
2527 * @param fAccess The access we want to break on.
2528 * @param fFlags Combination of DBGF_BP_F_XXX.
2529 * @param iHitTrigger The hit count at which the breakpoint start
2530 * triggering. Use 0 (or 1) if it's gonna trigger at
2531 * once.
2532 * @param iHitDisable The hit count which disables the breakpoint.
2533 * Use ~(uint64_t) if it's never gonna be disabled.
2534 * @param phBp Where to store the breakpoint handle.
2535 *
2536 * @thread Any thread.
2537 */
2538VMMR3DECL(int) DBGFR3BpSetMmioEx(PUVM pUVM, DBGFBPOWNER hOwner, void *pvUser,
2539 RTGCPHYS GCPhys, uint32_t cb, uint32_t fAccess,
2540 uint32_t fFlags, uint64_t iHitTrigger, uint64_t iHitDisable, PDBGFBP phBp)
2541{
2542 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2543 AssertReturn(hOwner != NIL_DBGFBPOWNER || pvUser == NULL, VERR_INVALID_PARAMETER);
2544 AssertReturn(!(fAccess & ~DBGFBPIOACCESS_VALID_MASK_MMIO), VERR_INVALID_FLAGS);
2545 AssertReturn(fAccess, VERR_INVALID_FLAGS);
2546 AssertReturn(!(fFlags & ~DBGF_BP_F_VALID_MASK), VERR_INVALID_FLAGS);
2547 AssertReturn(fFlags, VERR_INVALID_FLAGS);
2548 AssertReturn(iHitTrigger <= iHitDisable, VERR_INVALID_PARAMETER);
2549 AssertPtrReturn(phBp, VERR_INVALID_POINTER);
2550 AssertReturn(cb, VERR_OUT_OF_RANGE);
2551 AssertReturn(GCPhys + cb < GCPhys, VERR_OUT_OF_RANGE);
2552
2553 int rc = dbgfR3BpEnsureInit(pUVM);
2554 AssertRCReturn(rc, rc);
2555
2556 return VERR_NOT_IMPLEMENTED;
2557}
2558
2559
2560/**
2561 * Clears a breakpoint.
2562 *
2563 * @returns VBox status code.
2564 * @param pUVM The user mode VM handle.
2565 * @param hBp The handle of the breakpoint which should be removed (cleared).
2566 *
2567 * @thread Any thread.
2568 */
2569VMMR3DECL(int) DBGFR3BpClear(PUVM pUVM, DBGFBP hBp)
2570{
2571 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2572 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2573
2574 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2575 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
2576
2577 /* Disarm the breakpoint when it is enabled. */
2578 if (DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2579 {
2580 int rc = dbgfR3BpDisarm(pUVM, hBp, pBp);
2581 AssertRC(rc);
2582 }
2583
2584 switch (DBGF_BP_PUB_GET_TYPE(&pBp->Pub))
2585 {
2586 case DBGFBPTYPE_REG:
2587 {
2588 int rc = dbgfR3BpRegRemove(pUVM->pVM, hBp, pBp);
2589 AssertRC(rc);
2590 break;
2591 }
2592 case DBGFBPTYPE_INT3:
2593 {
2594 int rc = dbgfR3BpInt3Remove(pUVM, hBp, pBp);
2595 AssertRC(rc);
2596 break;
2597 }
2598 case DBGFBPTYPE_PORT_IO:
2599 {
2600 int rc = dbgfR3BpPortIoRemove(pUVM, hBp, pBp);
2601 AssertRC(rc);
2602 break;
2603 }
2604 default:
2605 break;
2606 }
2607
2608 dbgfR3BpFree(pUVM, hBp, pBp);
2609 return VINF_SUCCESS;
2610}
2611
2612
2613/**
2614 * Enables a breakpoint.
2615 *
2616 * @returns VBox status code.
2617 * @param pUVM The user mode VM handle.
2618 * @param hBp The handle of the breakpoint which should be enabled.
2619 *
2620 * @thread Any thread.
2621 */
2622VMMR3DECL(int) DBGFR3BpEnable(PUVM pUVM, DBGFBP hBp)
2623{
2624 /*
2625 * Validate the input.
2626 */
2627 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2628 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2629
2630 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2631 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
2632
2633 int rc;
2634 if (!DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2635 rc = dbgfR3BpArm(pUVM, hBp, pBp);
2636 else
2637 rc = VINF_DBGF_BP_ALREADY_ENABLED;
2638
2639 return rc;
2640}
2641
2642
2643/**
2644 * Disables a breakpoint.
2645 *
2646 * @returns VBox status code.
2647 * @param pUVM The user mode VM handle.
2648 * @param hBp The handle of the breakpoint which should be disabled.
2649 *
2650 * @thread Any thread.
2651 */
2652VMMR3DECL(int) DBGFR3BpDisable(PUVM pUVM, DBGFBP hBp)
2653{
2654 /*
2655 * Validate the input.
2656 */
2657 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2658 AssertReturn(hBp != NIL_DBGFBPOWNER, VERR_INVALID_HANDLE);
2659
2660 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2661 AssertPtrReturn(pBp, VERR_DBGF_BP_NOT_FOUND);
2662
2663 int rc;
2664 if (DBGF_BP_PUB_IS_ENABLED(&pBp->Pub))
2665 rc = dbgfR3BpDisarm(pUVM, hBp, pBp);
2666 else
2667 rc = VINF_DBGF_BP_ALREADY_DISABLED;
2668
2669 return rc;
2670}
2671
2672
2673/**
2674 * Enumerate the breakpoints.
2675 *
2676 * @returns VBox status code.
2677 * @param pUVM The user mode VM handle.
2678 * @param pfnCallback The callback function.
2679 * @param pvUser The user argument to pass to the callback.
2680 *
2681 * @thread Any thread.
2682 */
2683VMMR3DECL(int) DBGFR3BpEnum(PUVM pUVM, PFNDBGFBPENUM pfnCallback, void *pvUser)
2684{
2685 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2686
2687 for (uint32_t idChunk = 0; idChunk < RT_ELEMENTS(pUVM->dbgf.s.aBpChunks); idChunk++)
2688 {
2689 PDBGFBPCHUNKR3 pBpChunk = &pUVM->dbgf.s.aBpChunks[idChunk];
2690
2691 if (pBpChunk->idChunk == DBGF_BP_CHUNK_ID_INVALID)
2692 break; /* Stop here as the first non allocated chunk means there is no one allocated afterwards as well. */
2693
2694 if (pBpChunk->cBpsFree < DBGF_BP_COUNT_PER_CHUNK)
2695 {
2696 /* Scan the bitmap for allocated entries. */
2697 int32_t iAlloc = ASMBitFirstSet(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK);
2698 if (iAlloc != -1)
2699 {
2700 do
2701 {
2702 DBGFBP hBp = DBGF_BP_HND_CREATE(idChunk, (uint32_t)iAlloc);
2703 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pUVM, hBp);
2704
2705 /* Make a copy of the breakpoints public data to have a consistent view. */
2706 DBGFBPPUB BpPub;
2707 BpPub.cHits = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.cHits);
2708 BpPub.iHitTrigger = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.iHitTrigger);
2709 BpPub.iHitDisable = ASMAtomicReadU64((volatile uint64_t *)&pBp->Pub.iHitDisable);
2710 BpPub.hOwner = ASMAtomicReadU32((volatile uint32_t *)&pBp->Pub.hOwner);
2711 BpPub.u16Type = ASMAtomicReadU16((volatile uint16_t *)&pBp->Pub.u16Type); /* Actually constant. */
2712 BpPub.fFlags = ASMAtomicReadU16((volatile uint16_t *)&pBp->Pub.fFlags);
2713 memcpy(&BpPub.u, &pBp->Pub.u, sizeof(pBp->Pub.u)); /* Is constant after allocation. */
2714
2715 /* Check if a removal raced us. */
2716 if (ASMBitTest(pBpChunk->pbmAlloc, iAlloc))
2717 {
2718 int rc = pfnCallback(pUVM, pvUser, hBp, &BpPub);
2719 if (RT_FAILURE(rc) || rc == VINF_CALLBACK_RETURN)
2720 return rc;
2721 }
2722
2723 iAlloc = ASMBitNextSet(pBpChunk->pbmAlloc, DBGF_BP_COUNT_PER_CHUNK, iAlloc);
2724 } while (iAlloc != -1);
2725 }
2726 }
2727 }
2728
2729 return VINF_SUCCESS;
2730}
2731
2732
2733/**
2734 * Called whenever a breakpoint event needs to be serviced in ring-3 to decide what to do.
2735 *
2736 * @returns VBox status code.
2737 * @param pVM The cross context VM structure.
2738 * @param pVCpu The vCPU the breakpoint event happened on.
2739 *
2740 * @thread EMT
2741 */
2742VMMR3_INT_DECL(int) DBGFR3BpHit(PVM pVM, PVMCPU pVCpu)
2743{
2744 /* Send it straight into the debugger?. */
2745 if (pVCpu->dbgf.s.fBpInvokeOwnerCallback)
2746 {
2747 DBGFBP hBp = pVCpu->dbgf.s.hBpActive;
2748 pVCpu->dbgf.s.fBpInvokeOwnerCallback = false;
2749
2750 PDBGFBPINT pBp = dbgfR3BpGetByHnd(pVM->pUVM, hBp);
2751 AssertReturn(pBp, VERR_DBGF_BP_IPE_9);
2752
2753 /* Resolve owner (can be NIL_DBGFBPOWNER) and invoke callback if there is one. */
2754 if (pBp->Pub.hOwner != NIL_DBGFBPOWNER)
2755 {
2756 PCDBGFBPOWNERINT pBpOwner = dbgfR3BpOwnerGetByHnd(pVM->pUVM, pBp->Pub.hOwner);
2757 if (pBpOwner)
2758 {
2759 VBOXSTRICTRC rcStrict = dbgfR3BpHit(pVM, pVCpu, hBp, pBp, pBpOwner);
2760 if (VBOXSTRICTRC_VAL(rcStrict) == VINF_SUCCESS)
2761 {
2762 pVCpu->dbgf.s.hBpActive = NIL_DBGFBP;
2763 return VINF_SUCCESS;
2764 }
2765 if (VBOXSTRICTRC_VAL(rcStrict) != VINF_DBGF_BP_HALT) /* Guru meditation. */
2766 return VERR_DBGF_BP_OWNER_CALLBACK_WRONG_STATUS;
2767 /* else: Halt in the debugger. */
2768 }
2769 }
2770 }
2771
2772 return DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
2773}
2774
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette