VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGF.cpp@ 86361

Last change on this file since 86361 was 86361, checked in by vboxsync, 4 years ago

VMM,DBGC: Prevent leaks detection triggering in tstCFGM. bugref:9841

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 75.7 KB
Line 
1/* $Id: DBGF.cpp 86361 2020-09-30 18:59:31Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_dbgf DBGF - The Debugger Facility
20 *
21 * The purpose of the DBGF is to provide an interface for debuggers to
22 * manipulate the VMM without having to mess up the source code for each of
23 * them. The DBGF is always built in and will always work when a debugger
24 * attaches to the VM. The DBGF provides the basic debugger features, such as
25 * halting execution, handling breakpoints, single step execution, instruction
26 * disassembly, info querying, OS specific diggers, symbol and module
27 * management.
28 *
29 * The interface is working in a manner similar to the win32, linux and os2
30 * debugger interfaces. The interface has an asynchronous nature. This comes
31 * from the fact that the VMM and the Debugger are running in different threads.
32 * They are referred to as the "emulation thread" and the "debugger thread", or
33 * as the "ping thread" and the "pong thread, respectivly. (The last set of
34 * names comes from the use of the Ping-Pong synchronization construct from the
35 * RTSem API.)
36 *
37 * @see grp_dbgf
38 *
39 *
40 * @section sec_dbgf_scenario Usage Scenario
41 *
42 * The debugger starts by attaching to the VM. For practical reasons we limit the
43 * number of concurrently attached debuggers to 1 per VM. The action of
44 * attaching to the VM causes the VM to check and generate debug events.
45 *
46 * The debugger then will wait/poll for debug events and issue commands.
47 *
48 * The waiting and polling is done by the DBGFEventWait() function. It will wait
49 * for the emulation thread to send a ping, thus indicating that there is an
50 * event waiting to be processed.
51 *
52 * An event can be a response to a command issued previously, the hitting of a
53 * breakpoint, or running into a bad/fatal VMM condition. The debugger now has
54 * the ping and must respond to the event at hand - the VMM is waiting. This
55 * usually means that the user of the debugger must do something, but it doesn't
56 * have to. The debugger is free to call any DBGF function (nearly at least)
57 * while processing the event.
58 *
59 * Typically the user will issue a request for the execution to be resumed, so
60 * the debugger calls DBGFResume() and goes back to waiting/polling for events.
61 *
62 * When the user eventually terminates the debugging session or selects another
63 * VM, the debugger detaches from the VM. This means that breakpoints are
64 * disabled and that the emulation thread no longer polls for debugger commands.
65 *
66 */
67
68
69/*********************************************************************************************************************************
70* Header Files *
71*********************************************************************************************************************************/
72#define LOG_GROUP LOG_GROUP_DBGF
73#include <VBox/vmm/dbgf.h>
74#include <VBox/vmm/selm.h>
75#include <VBox/vmm/em.h>
76#include <VBox/vmm/hm.h>
77#include <VBox/vmm/mm.h>
78#include "DBGFInternal.h"
79#include <VBox/vmm/vm.h>
80#include <VBox/vmm/uvm.h>
81#include <VBox/err.h>
82
83#include <VBox/log.h>
84#include <iprt/semaphore.h>
85#include <iprt/thread.h>
86#include <iprt/asm.h>
87#include <iprt/time.h>
88#include <iprt/assert.h>
89#include <iprt/stream.h>
90#include <iprt/env.h>
91
92
93/*********************************************************************************************************************************
94* Structures and Typedefs *
95*********************************************************************************************************************************/
96/**
97 * Instruction type returned by dbgfStepGetCurInstrType.
98 */
99typedef enum DBGFSTEPINSTRTYPE
100{
101 DBGFSTEPINSTRTYPE_INVALID = 0,
102 DBGFSTEPINSTRTYPE_OTHER,
103 DBGFSTEPINSTRTYPE_RET,
104 DBGFSTEPINSTRTYPE_CALL,
105 DBGFSTEPINSTRTYPE_END,
106 DBGFSTEPINSTRTYPE_32BIT_HACK = 0x7fffffff
107} DBGFSTEPINSTRTYPE;
108
109
110/*********************************************************************************************************************************
111* Internal Functions *
112*********************************************************************************************************************************/
113DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx);
114DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu);
115static int dbgfR3CpuWait(PVMCPU pVCpu);
116static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution);
117static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu);
118static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu);
119
120
121
122/**
123 * Initializes the DBGF.
124 *
125 * @returns VBox status code.
126 * @param pVM The cross context VM structure.
127 */
128VMMR3_INT_DECL(int) DBGFR3Init(PVM pVM)
129{
130 PUVM pUVM = pVM->pUVM;
131 AssertCompile(sizeof(pUVM->dbgf.s) <= sizeof(pUVM->dbgf.padding));
132 AssertCompile(sizeof(pUVM->aCpus[0].dbgf.s) <= sizeof(pUVM->aCpus[0].dbgf.padding));
133
134 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
135
136 /*
137 * The usual sideways mountain climbing style of init:
138 */
139 int rc = dbgfR3InfoInit(pUVM); /* (First, initalizes the shared critical section.) */
140 if (RT_SUCCESS(rc))
141 {
142 rc = dbgfR3TraceInit(pVM);
143 if (RT_SUCCESS(rc))
144 {
145 rc = dbgfR3RegInit(pUVM);
146 if (RT_SUCCESS(rc))
147 {
148 rc = dbgfR3AsInit(pUVM);
149 if (RT_SUCCESS(rc))
150 {
151 rc = dbgfR3BpInit(pVM);
152 if (RT_SUCCESS(rc))
153 {
154 rc = dbgfR3OSInit(pUVM);
155 if (RT_SUCCESS(rc))
156 {
157 rc = dbgfR3PlugInInit(pUVM);
158 if (RT_SUCCESS(rc))
159 {
160 rc = dbgfR3BugCheckInit(pVM);
161 if (RT_SUCCESS(rc))
162 {
163#ifdef VBOX_WITH_DBGF_TRACING
164 rc = dbgfR3TracerInit(pVM);
165#endif
166 if (RT_SUCCESS(rc))
167 {
168 return VINF_SUCCESS;
169 }
170 }
171 dbgfR3PlugInTerm(pUVM);
172 }
173 dbgfR3OSTermPart1(pUVM);
174 dbgfR3OSTermPart2(pUVM);
175 }
176 }
177 dbgfR3AsTerm(pUVM);
178 }
179 dbgfR3RegTerm(pUVM);
180 }
181 dbgfR3TraceTerm(pVM);
182 }
183 dbgfR3InfoTerm(pUVM);
184 }
185 return rc;
186}
187
188
189/**
190 * Terminates and cleans up resources allocated by the DBGF.
191 *
192 * @returns VBox status code.
193 * @param pVM The cross context VM structure.
194 */
195VMMR3_INT_DECL(int) DBGFR3Term(PVM pVM)
196{
197 PUVM pUVM = pVM->pUVM;
198
199#ifdef VBOX_WITH_DBGF_TRACING
200 dbgfR3TracerTerm(pVM);
201#endif
202 dbgfR3OSTermPart1(pUVM);
203 dbgfR3PlugInTerm(pUVM);
204 dbgfR3OSTermPart2(pUVM);
205 dbgfR3AsTerm(pUVM);
206 dbgfR3RegTerm(pUVM);
207 dbgfR3TraceTerm(pVM);
208 dbgfR3InfoTerm(pUVM);
209
210 return VINF_SUCCESS;
211}
212
213
214/**
215 * This is for tstCFGM and others to avoid trigger leak detection.
216 *
217 * @returns VBox status code.
218 * @param pVM The cross context VM structure.
219 */
220VMMR3DECL(void) DBGFR3TermUVM(PUVM pUVM)
221{
222 dbgfR3InfoTerm(pUVM);
223}
224
225
226/**
227 * Called when the VM is powered off to detach debuggers.
228 *
229 * @param pVM The cross context VM structure.
230 */
231VMMR3_INT_DECL(void) DBGFR3PowerOff(PVM pVM)
232{
233 /*
234 * Send a termination event to any attached debugger.
235 */
236 if (pVM->dbgf.s.fAttached)
237 {
238 PVMCPU pVCpu = VMMGetCpu(pVM);
239 int rc = dbgfR3SendEventWait(pVM, pVCpu, DBGFEVENT_POWERING_OFF, DBGFEVENTCTX_OTHER);
240 AssertLogRelRC(rc);
241
242 /*
243 * Clear the FF so we won't get confused later on.
244 */
245 VM_FF_CLEAR(pVM, VM_FF_DBGF);
246 }
247}
248
249
250/**
251 * Applies relocations to data and code managed by this
252 * component. This function will be called at init and
253 * whenever the VMM need to relocate it self inside the GC.
254 *
255 * @param pVM The cross context VM structure.
256 * @param offDelta Relocation delta relative to old location.
257 */
258VMMR3_INT_DECL(void) DBGFR3Relocate(PVM pVM, RTGCINTPTR offDelta)
259{
260 dbgfR3TraceRelocate(pVM);
261 dbgfR3AsRelocate(pVM->pUVM, offDelta);
262}
263
264
265/**
266 * Waits a little while for a debuggger to attach.
267 *
268 * @returns True is a debugger have attached.
269 * @param pVM The cross context VM structure.
270 * @param pVCpu The cross context per CPU structure.
271 * @param enmEvent Event.
272 *
273 * @thread EMT(pVCpu)
274 */
275bool dbgfR3WaitForAttach(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
276{
277 /*
278 * First a message.
279 */
280#if !defined(DEBUG)
281 int cWait = 10;
282#else
283 int cWait = !VM_IS_RAW_MODE_ENABLED(pVM)
284 && ( enmEvent == DBGFEVENT_ASSERTION_HYPER
285 || enmEvent == DBGFEVENT_FATAL_ERROR)
286 && !RTEnvExist("VBOX_DBGF_WAIT_FOR_ATTACH")
287 ? 10
288 : 150;
289#endif
290 RTStrmPrintf(g_pStdErr, "DBGF: No debugger attached, waiting %d second%s for one to attach (event=%d)\n",
291 cWait / 10, cWait != 10 ? "s" : "", enmEvent);
292 RTStrmFlush(g_pStdErr);
293 while (cWait > 0)
294 {
295 RTThreadSleep(100);
296 if (pVM->dbgf.s.fAttached)
297 {
298 RTStrmPrintf(g_pStdErr, "Attached!\n");
299 RTStrmFlush(g_pStdErr);
300 return true;
301 }
302
303 /* Process rendezvous (debugger attaching involves such). */
304 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
305 {
306 int rc = VMMR3EmtRendezvousFF(pVM, pVCpu); AssertRC(rc);
307 if (rc != VINF_SUCCESS)
308 {
309 /** @todo Ignoring these could be bad. */
310 RTStrmPrintf(g_pStdErr, "[rcRendezvous=%Rrc, ignored!]", rc);
311 RTStrmFlush(g_pStdErr);
312 }
313 }
314
315 /* Process priority stuff. */
316 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
317 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
318 {
319 int rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, true /*fPriorityOnly*/);
320 if (rc == VINF_SUCCESS)
321 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, true /*fPriorityOnly*/);
322 if (rc != VINF_SUCCESS)
323 {
324 /** @todo Ignoring these could be bad. */
325 RTStrmPrintf(g_pStdErr, "[rcReq=%Rrc, ignored!]", rc);
326 RTStrmFlush(g_pStdErr);
327 }
328 }
329
330 /* next */
331 if (!(cWait % 10))
332 {
333 RTStrmPrintf(g_pStdErr, "%d.", cWait / 10);
334 RTStrmFlush(g_pStdErr);
335 }
336 cWait--;
337 }
338
339 RTStrmPrintf(g_pStdErr, "Stopping the VM!\n");
340 RTStrmFlush(g_pStdErr);
341 return false;
342}
343
344
345/**
346 * Forced action callback.
347 *
348 * The VMM will call this from it's main loop when either VM_FF_DBGF or
349 * VMCPU_FF_DBGF are set.
350 *
351 * The function checks for and executes pending commands from the debugger.
352 * Then it checks for pending debug events and serves these.
353 *
354 * @returns VINF_SUCCESS normally.
355 * @returns VERR_DBGF_RAISE_FATAL_ERROR to pretend a fatal error happened.
356 * @param pVM The cross context VM structure.
357 * @param pVCpu The cross context per CPU structure.
358 */
359VMMR3_INT_DECL(int) DBGFR3VMMForcedAction(PVM pVM, PVMCPU pVCpu)
360{
361 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
362
363 /*
364 * Dispatch pending events.
365 */
366 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_DBGF))
367 {
368 if ( pVCpu->dbgf.s.cEvents > 0
369 && pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT)
370 {
371 rcStrict = DBGFR3EventHandlePending(pVM, pVCpu);
372 /** @todo may end up with VERR_DBGF_NOT_ATTACHED here, which will prove fatal... */
373 }
374
375 /*
376 * Command pending? Process it.
377 */
378 PUVMCPU pUVCpu = pVCpu->pUVCpu;
379 if (pUVCpu->dbgf.s.enmDbgfCmd != DBGFCMD_NO_COMMAND)
380 {
381 bool fResumeExecution;
382 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
383 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
384 VBOXSTRICTRC rcStrict2 = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
385 if (!fResumeExecution)
386 rcStrict2 = dbgfR3CpuWait(pVCpu);
387 if ( rcStrict2 != VINF_SUCCESS
388 && ( rcStrict == VINF_SUCCESS
389 || RT_FAILURE(rcStrict2)
390 || rcStrict2 < rcStrict) ) /** @todo oversimplified? */
391 rcStrict = rcStrict2;
392 }
393 }
394
395 return VBOXSTRICTRC_TODO(rcStrict);
396}
397
398
399/**
400 * Try to determine the event context.
401 *
402 * @returns debug event context.
403 * @param pVCpu The cross context vCPU structure.
404 */
405static DBGFEVENTCTX dbgfR3FigureEventCtx(PVMCPU pVCpu)
406{
407 switch (EMGetState(pVCpu))
408 {
409 case EMSTATE_HM:
410 case EMSTATE_NEM:
411 case EMSTATE_DEBUG_GUEST_HM:
412 case EMSTATE_DEBUG_GUEST_NEM:
413 return DBGFEVENTCTX_HM;
414
415 case EMSTATE_IEM:
416 case EMSTATE_RAW:
417 case EMSTATE_IEM_THEN_REM:
418 case EMSTATE_DEBUG_GUEST_IEM:
419 case EMSTATE_DEBUG_GUEST_RAW:
420 return DBGFEVENTCTX_RAW;
421
422
423 case EMSTATE_REM:
424 case EMSTATE_DEBUG_GUEST_REM:
425 return DBGFEVENTCTX_REM;
426
427 case EMSTATE_DEBUG_HYPER:
428 case EMSTATE_GURU_MEDITATION:
429 return DBGFEVENTCTX_HYPER;
430
431 default:
432 return DBGFEVENTCTX_OTHER;
433 }
434}
435
436
437/**
438 * Sends the event to the debugger (i.e. adds it to the event ring buffer).
439 *
440 * @returns VBox status code.
441 * @param pVM The cross context VM structure.
442 * @param pVCpu The CPU sending the event.
443 * @param enmType The event type to send.
444 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
445 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
446 * @param cbPayload The size of the event payload, optional.
447 */
448static int dbgfR3SendEventWorker(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
449 void const *pvPayload, size_t cbPayload)
450{
451 PUVM pUVM = pVM->pUVM;
452 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID; /** @todo per vCPU stepping filter. */
453
454 /*
455 * Massage the input a little.
456 */
457 AssertStmt(cbPayload <= RT_SIZEOFMEMB(DBGFEVENT, u), cbPayload = RT_SIZEOFMEMB(DBGFEVENT, u));
458 if (enmCtx == DBGFEVENTCTX_INVALID)
459 enmCtx = dbgfR3FigureEventCtx(pVCpu);
460
461 /*
462 * Put the event into the ring buffer.
463 */
464 RTSemFastMutexRequest(pUVM->dbgf.s.hMtxDbgEvtWr);
465
466 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
467 uint32_t const idxDbgEvtWrite = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite);
468 uint32_t const idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
469 /** @todo Handle full buffer. */ RT_NOREF(idxDbgEvtRead);
470
471 PDBGFEVENT pEvent = &pUVM->dbgf.s.paDbgEvts[idxDbgEvtWrite % cDbgEvtMax];
472
473#ifdef DEBUG
474 ASMMemFill32(pEvent, sizeof(*pEvent), UINT32_C(0xdeadbeef));
475#endif
476 pEvent->enmType = enmType;
477 pEvent->enmCtx = enmCtx;
478 pEvent->idCpu = pVCpu->idCpu;
479 pEvent->uReserved = 0;
480 if (cbPayload)
481 memcpy(&pEvent->u, pvPayload, cbPayload);
482
483 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtWrite, (idxDbgEvtWrite + 1) % cDbgEvtMax);
484
485 RTSemFastMutexRelease(pUVM->dbgf.s.hMtxDbgEvtWr);
486
487 /*
488 * Signal the debugger.
489 */
490 return RTSemEventSignal(pUVM->dbgf.s.hEvtWait);
491}
492
493
494/**
495 * Send event and wait for the debugger to respond.
496 *
497 * @returns Strict VBox status code.
498 * @param pVM The cross context VM structure.
499 * @param pVCpu The CPU sending the event.
500 * @param enmType The event type to send.
501 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
502 */
503DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
504{
505 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
506 if (RT_SUCCESS(rc))
507 rc = dbgfR3CpuWait(pVCpu);
508 return rc;
509}
510
511
512/**
513 * Send event and wait for the debugger to respond, extended version.
514 *
515 * @returns Strict VBox status code.
516 * @param pVM The cross context VM structure.
517 * @param pVCpu The CPU sending the event.
518 * @param enmType The event type to send.
519 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
520 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
521 * @param cbPayload The size of the event payload, optional.
522 */
523DECLINLINE(int) dbgfR3SendEventWaitEx(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
524 void const *pvPayload, size_t cbPayload)
525{
526 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, pvPayload, cbPayload);
527 if (RT_SUCCESS(rc))
528 rc = dbgfR3CpuWait(pVCpu);
529 return rc;
530}
531
532
533/**
534 * Send event but do NOT wait for the debugger.
535 *
536 * Currently only used by dbgfR3CpuCmd().
537 *
538 * @param pVM The cross context VM structure.
539 * @param pVCpu The CPU sending the event.
540 * @param enmType The event type to send.
541 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
542 */
543DECLINLINE(int) dbgfR3SendEventNoWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
544{
545 return dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
546}
547
548
549/**
550 * The common event prologue code.
551 *
552 * It will make sure someone is attached, and perhaps process any high priority
553 * pending actions (none yet).
554 *
555 * @returns VBox status code.
556 * @param pVM The cross context VM structure.
557 * @param pVCpu The vCPU cross context structure.
558 * @param enmEvent The event to be sent.
559 */
560static int dbgfR3EventPrologue(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
561{
562 /*
563 * Check if a debugger is attached.
564 */
565 if ( !pVM->dbgf.s.fAttached
566 && !dbgfR3WaitForAttach(pVM, pVCpu, enmEvent))
567 {
568 Log(("dbgfR3EventPrologue: enmEvent=%d - debugger not attached\n", enmEvent));
569 return VERR_DBGF_NOT_ATTACHED;
570 }
571
572 /*
573 * Look thru pending commands and finish those which make sense now.
574 */
575 /** @todo Process/purge pending commands. */
576 //int rc = DBGFR3VMMForcedAction(pVM);
577 return VINF_SUCCESS;
578}
579
580
581/**
582 * Processes a pending event on the current CPU.
583 *
584 * This is called by EM in response to VINF_EM_DBG_EVENT.
585 *
586 * @returns Strict VBox status code.
587 * @param pVM The cross context VM structure.
588 * @param pVCpu The cross context per CPU structure.
589 *
590 * @thread EMT(pVCpu)
591 */
592VMMR3_INT_DECL(VBOXSTRICTRC) DBGFR3EventHandlePending(PVM pVM, PVMCPU pVCpu)
593{
594 VMCPU_ASSERT_EMT(pVCpu);
595 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
596
597 /*
598 * Check that we've got an event first.
599 */
600 AssertReturn(pVCpu->dbgf.s.cEvents > 0, VINF_SUCCESS);
601 AssertReturn(pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT, VINF_SUCCESS);
602 PDBGFEVENT pEvent = &pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].Event;
603
604 /*
605 * Make sure we've got a debugger and is allowed to speak to it.
606 */
607 int rc = dbgfR3EventPrologue(pVM, pVCpu, pEvent->enmType);
608 if (RT_FAILURE(rc))
609 {
610 /** @todo drop them events? */
611 return rc; /** @todo this will cause trouble if we're here via an FF! */
612 }
613
614 /*
615 * Send the event and mark it as ignore.
616 * ASSUMES no new events get generate while dbgfR3CpuWait is executing!
617 */
618 VBOXSTRICTRC rcStrict = dbgfR3SendEventWaitEx(pVM, pVCpu, pEvent->enmType, pEvent->enmCtx, &pEvent->u, sizeof(pEvent->u));
619 pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState = DBGFEVENTSTATE_IGNORE;
620 return rcStrict;
621}
622
623
624/**
625 * Send a generic debugger event which takes no data.
626 *
627 * @returns VBox status code.
628 * @param pVM The cross context VM structure.
629 * @param enmEvent The event to send.
630 * @internal
631 */
632VMMR3DECL(int) DBGFR3Event(PVM pVM, DBGFEVENTTYPE enmEvent)
633{
634 PVMCPU pVCpu = VMMGetCpu(pVM);
635 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
636
637 /*
638 * Do stepping filtering.
639 */
640 /** @todo Would be better if we did some of this inside the execution
641 * engines. */
642 if ( enmEvent == DBGFEVENT_STEPPED
643 || enmEvent == DBGFEVENT_STEPPED_HYPER)
644 {
645 if (!dbgfStepAreWeThereYet(pVM, pVCpu))
646 return VINF_EM_DBG_STEP;
647 }
648
649 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
650 if (RT_FAILURE(rc))
651 return rc;
652
653 /*
654 * Send the event and process the reply communication.
655 */
656 return dbgfR3SendEventWait(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID);
657}
658
659
660/**
661 * Send a debugger event which takes the full source file location.
662 *
663 * @returns VBox status code.
664 * @param pVM The cross context VM structure.
665 * @param enmEvent The event to send.
666 * @param pszFile Source file.
667 * @param uLine Line number in source file.
668 * @param pszFunction Function name.
669 * @param pszFormat Message which accompanies the event.
670 * @param ... Message arguments.
671 * @internal
672 */
673VMMR3DECL(int) DBGFR3EventSrc(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, ...)
674{
675 va_list args;
676 va_start(args, pszFormat);
677 int rc = DBGFR3EventSrcV(pVM, enmEvent, pszFile, uLine, pszFunction, pszFormat, args);
678 va_end(args);
679 return rc;
680}
681
682
683/**
684 * Send a debugger event which takes the full source file location.
685 *
686 * @returns VBox status code.
687 * @param pVM The cross context VM structure.
688 * @param enmEvent The event to send.
689 * @param pszFile Source file.
690 * @param uLine Line number in source file.
691 * @param pszFunction Function name.
692 * @param pszFormat Message which accompanies the event.
693 * @param args Message arguments.
694 * @internal
695 */
696VMMR3DECL(int) DBGFR3EventSrcV(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, va_list args)
697{
698 PVMCPU pVCpu = VMMGetCpu(pVM);
699 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
700
701 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
702 if (RT_FAILURE(rc))
703 return rc;
704
705 /*
706 * Format the message.
707 */
708 char *pszMessage = NULL;
709 char szMessage[8192];
710 if (pszFormat && *pszFormat)
711 {
712 pszMessage = &szMessage[0];
713 RTStrPrintfV(szMessage, sizeof(szMessage), pszFormat, args);
714 }
715
716 /*
717 * Send the event and process the reply communication.
718 */
719 DBGFEVENT DbgEvent; /** @todo split up DBGFEVENT so we can skip the dead wait on the stack? */
720 DbgEvent.u.Src.pszFile = pszFile;
721 DbgEvent.u.Src.uLine = uLine;
722 DbgEvent.u.Src.pszFunction = pszFunction;
723 DbgEvent.u.Src.pszMessage = pszMessage;
724 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Src));
725}
726
727
728/**
729 * Send a debugger event which takes the two assertion messages.
730 *
731 * @returns VBox status code.
732 * @param pVM The cross context VM structure.
733 * @param enmEvent The event to send.
734 * @param pszMsg1 First assertion message.
735 * @param pszMsg2 Second assertion message.
736 */
737VMMR3_INT_DECL(int) DBGFR3EventAssertion(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszMsg1, const char *pszMsg2)
738{
739 PVMCPU pVCpu = VMMGetCpu(pVM);
740 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
741
742 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
743 if (RT_FAILURE(rc))
744 return rc;
745
746 /*
747 * Send the event and process the reply communication.
748 */
749 DBGFEVENT DbgEvent;
750 DbgEvent.u.Assert.pszMsg1 = pszMsg1;
751 DbgEvent.u.Assert.pszMsg2 = pszMsg2;
752 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Assert));
753}
754
755
756/**
757 * Breakpoint was hit somewhere.
758 * Figure out which breakpoint it is and notify the debugger.
759 *
760 * @returns VBox status code.
761 * @param pVM The cross context VM structure.
762 * @param enmEvent DBGFEVENT_BREAKPOINT_HYPER or DBGFEVENT_BREAKPOINT.
763 */
764VMMR3_INT_DECL(int) DBGFR3EventBreakpoint(PVM pVM, DBGFEVENTTYPE enmEvent)
765{
766 PVMCPU pVCpu = VMMGetCpu(pVM);
767 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
768
769 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
770 if (RT_FAILURE(rc))
771 return rc;
772
773 /*
774 * Send the event and process the reply communication.
775 */
776 DBGFEVENT DbgEvent;
777 RTUINT iBp = DbgEvent.u.Bp.iBp = pVCpu->dbgf.s.iActiveBp;
778 pVCpu->dbgf.s.iActiveBp = ~0U;
779 if (iBp != ~0U)
780 {
781 DbgEvent.enmCtx = DBGFEVENTCTX_RAW;
782 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_RAW, &DbgEvent.u, sizeof(DbgEvent.u.Bp));
783 }
784
785 AssertFailed(); /** @todo this should be obsolete now... */
786
787 /* REM breakpoints has be been searched for. */
788#if 0 /** @todo get flat PC api! */
789 uint32_t eip = CPUMGetGuestEIP(pVM);
790#else
791 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
792 RTGCPTR eip = pCtx->rip + pCtx->cs.u64Base;
793#endif
794 for (size_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aBreakpoints); i++)
795 if ( pVM->dbgf.s.aBreakpoints[i].enmType == DBGFBPTYPE_REM
796 && pVM->dbgf.s.aBreakpoints[i].u.Rem.GCPtr == eip)
797 {
798 DbgEvent.u.Bp.iBp = pVM->dbgf.s.aBreakpoints[i].iBp;
799 break;
800 }
801 AssertMsg(DbgEvent.u.Bp.iBp != ~0U, ("eip=%08x\n", eip));
802 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_REM, &DbgEvent.u, sizeof(DbgEvent.u.Bp));
803}
804
805
806/**
807 * Returns whether the given vCPU is waiting for the debugger.
808 *
809 * @returns Flags whether the vCPU is currently waiting for the debugger.
810 * @param pUVCpu The user mode vCPU structure.
811 */
812DECLINLINE(bool) dbgfR3CpuIsHalted(PUVMCPU pUVCpu)
813{
814 return ASMAtomicReadBool(&pUVCpu->dbgf.s.fStopped);
815}
816
817
818/**
819 * Checks whether the given vCPU is waiting in the debugger.
820 *
821 * @returns Flag whether the indicated vCPU is halted, when VMCPUID_ALL
822 * is given true is returned when at least one vCPU is halted.
823 * @param pUVM The user mode VM structure.
824 * @param idCpu The CPU ID to check, VMCPUID_ALL to check all vCPUs.
825 */
826DECLINLINE(bool) dbgfR3CpuAreAnyHaltedByCpuId(PUVM pUVM, VMCPUID idCpu)
827{
828 AssertReturn(idCpu < pUVM->cCpus || idCpu == VMCPUID_ALL, false);
829
830 /* Check that either the given vCPU or all are actually halted. */
831 if (idCpu != VMCPUID_ALL)
832 return dbgfR3CpuIsHalted(&pUVM->aCpus[idCpu]);
833
834 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
835 if (dbgfR3CpuIsHalted(&pUVM->aCpus[i]))
836 return true;
837 return false;
838}
839
840
841/**
842 * Gets the pending debug command for this EMT/CPU, replacing it with
843 * DBGFCMD_NO_COMMAND.
844 *
845 * @returns Pending command.
846 * @param pUVCpu The user mode virtual CPU structure.
847 * @thread EMT(pUVCpu)
848 */
849DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu)
850{
851 DBGFCMD enmCmd = (DBGFCMD)ASMAtomicXchgU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, DBGFCMD_NO_COMMAND);
852 Log2(("DBGF: Getting command: %d\n", enmCmd));
853 return enmCmd;
854}
855
856
857/**
858 * Send a debug command to a CPU, making sure to notify it.
859 *
860 * @returns VBox status code.
861 * @param pUVCpu The user mode virtual CPU structure.
862 * @param enmCmd The command to submit to the CPU.
863 */
864DECLINLINE(int) dbgfR3CpuSetCmdAndNotify(PUVMCPU pUVCpu, DBGFCMD enmCmd)
865{
866 Log2(("DBGF: Setting command to %d\n", enmCmd));
867 Assert(enmCmd != DBGFCMD_NO_COMMAND);
868 AssertMsg(pUVCpu->dbgf.s.enmDbgfCmd == DBGFCMD_NO_COMMAND, ("enmCmd=%d enmDbgfCmd=%d\n", enmCmd, pUVCpu->dbgf.s.enmDbgfCmd));
869
870 ASMAtomicWriteU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, enmCmd);
871 VMCPU_FF_SET(pUVCpu->pVCpu, VMCPU_FF_DBGF);
872
873 VMR3NotifyCpuFFU(pUVCpu, 0 /*fFlags*/);
874 return VINF_SUCCESS;
875}
876
877
878/**
879 * Waits for the debugger to respond.
880 *
881 * @returns VBox status code. (clearify)
882 * @param pVCpu The cross context vCPU structure.
883 */
884static int dbgfR3CpuWait(PVMCPU pVCpu)
885{
886 PVM pVM = pVCpu->CTX_SUFF(pVM);
887 PUVMCPU pUVCpu = pVCpu->pUVCpu;
888
889 LogFlow(("dbgfR3CpuWait:\n"));
890 int rcRet = VINF_SUCCESS;
891
892 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, true);
893
894 /*
895 * Waits for the debugger to reply (i.e. issue an command).
896 */
897 for (;;)
898 {
899 /*
900 * Wait.
901 */
902 for (;;)
903 {
904 /*
905 * Process forced flags before we go sleep.
906 */
907 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_DBGF | VMCPU_FF_REQUEST)
908 || VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VMCPU_FF_REQUEST | VM_FF_CHECK_VM_STATE))
909 {
910 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF))
911 break;
912
913 int rc;
914 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
915 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
916 else if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
917 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
918 {
919 LogFlow(("dbgfR3CpuWait: Processes requests...\n"));
920 rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
921 if (rc == VINF_SUCCESS)
922 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
923 LogFlow(("dbgfR3CpuWait: VMR3ReqProcess -> %Rrc rcRet=%Rrc\n", rc, rcRet));
924 }
925 else if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
926 {
927 VMSTATE enmState = VMR3GetState(pVM);
928 switch (enmState)
929 {
930 case VMSTATE_FATAL_ERROR:
931 case VMSTATE_FATAL_ERROR_LS:
932 case VMSTATE_GURU_MEDITATION:
933 case VMSTATE_GURU_MEDITATION_LS:
934 rc = VINF_EM_SUSPEND;
935 break;
936 case VMSTATE_DESTROYING:
937 rc = VINF_EM_TERMINATE;
938 break;
939 default:
940 rc = VERR_DBGF_IPE_1;
941 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
942 }
943 }
944 else
945 rc = VINF_SUCCESS;
946 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
947 {
948 switch (rc)
949 {
950 case VINF_EM_DBG_BREAKPOINT:
951 case VINF_EM_DBG_STEPPED:
952 case VINF_EM_DBG_STEP:
953 case VINF_EM_DBG_STOP:
954 case VINF_EM_DBG_EVENT:
955 AssertMsgFailed(("rc=%Rrc\n", rc));
956 break;
957
958 /* return straight away */
959 case VINF_EM_TERMINATE:
960 case VINF_EM_OFF:
961 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
962 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
963 return rc;
964
965 /* remember return code. */
966 default:
967 AssertReleaseMsgFailed(("rc=%Rrc is not in the switch!\n", rc));
968 RT_FALL_THRU();
969 case VINF_EM_RESET:
970 case VINF_EM_SUSPEND:
971 case VINF_EM_HALT:
972 case VINF_EM_RESUME:
973 case VINF_EM_RESCHEDULE:
974 case VINF_EM_RESCHEDULE_REM:
975 case VINF_EM_RESCHEDULE_RAW:
976 if (rc < rcRet || rcRet == VINF_SUCCESS)
977 rcRet = rc;
978 break;
979 }
980 }
981 else if (RT_FAILURE(rc))
982 {
983 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
984 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
985 return rc;
986 }
987 }
988 else if (pVM->dbgf.s.fAttached)
989 {
990 int rc = VMR3WaitU(pUVCpu);
991 if (RT_FAILURE(rc))
992 {
993 LogFlow(("dbgfR3CpuWait: returns %Rrc (VMR3WaitU)\n", rc));
994 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
995 return rc;
996 }
997 }
998 else
999 {
1000 LogFlow(("dbgfR3CpuWait: Debugger detached, continuing normal execution (%Rrc)\n", rcRet));
1001 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1002 return rcRet;
1003 }
1004 }
1005
1006 /*
1007 * Process the command.
1008 */
1009 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
1010 bool fResumeExecution;
1011 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
1012 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
1013 int rc = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
1014 if (fResumeExecution)
1015 {
1016 if (RT_FAILURE(rc))
1017 rcRet = rc;
1018 else if ( rc >= VINF_EM_FIRST
1019 && rc <= VINF_EM_LAST
1020 && (rc < rcRet || rcRet == VINF_SUCCESS))
1021 rcRet = rc;
1022 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rcRet));
1023 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1024 return rcRet;
1025 }
1026 }
1027}
1028
1029
1030/**
1031 * Executes command from debugger.
1032 *
1033 * The caller is responsible for waiting or resuming execution based on the
1034 * value returned in the *pfResumeExecution indicator.
1035 *
1036 * @returns VBox status code. (clearify!)
1037 * @param pVCpu The cross context vCPU structure.
1038 * @param enmCmd The command in question.
1039 * @param pCmdData Pointer to the command data.
1040 * @param pfResumeExecution Where to store the resume execution / continue waiting indicator.
1041 */
1042static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution)
1043{
1044 RT_NOREF(pCmdData); /* for later */
1045
1046 /*
1047 * The cases in this switch returns directly if no event to send.
1048 */
1049 DBGFEVENTTYPE enmEvent;
1050 DBGFEVENTCTX enmCtx = DBGFEVENTCTX_INVALID;
1051 switch (enmCmd)
1052 {
1053 /*
1054 * Halt is answered by an event say that we've halted.
1055 */
1056 case DBGFCMD_HALT:
1057 {
1058 *pfResumeExecution = false;
1059 enmEvent = DBGFEVENT_HALT_DONE;
1060 break;
1061 }
1062
1063
1064 /*
1065 * Resume is not answered, we just resume execution.
1066 */
1067 case DBGFCMD_GO:
1068 {
1069 pVCpu->dbgf.s.fSingleSteppingRaw = false;
1070 *pfResumeExecution = true;
1071 return VINF_SUCCESS;
1072 }
1073
1074 /** @todo implement (and define) the rest of the commands. */
1075
1076 /*
1077 * Single step, with trace into.
1078 */
1079 case DBGFCMD_SINGLE_STEP:
1080 {
1081 Log2(("Single step\n"));
1082 PVM pVM = pVCpu->CTX_SUFF(pVM);
1083 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1084 {
1085 if (dbgfStepGetCurInstrType(pVM, pVCpu) == DBGFSTEPINSTRTYPE_CALL)
1086 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1087 }
1088 if (pVM->dbgf.s.SteppingFilter.cMaxSteps > 0)
1089 {
1090 pVCpu->dbgf.s.fSingleSteppingRaw = true;
1091 *pfResumeExecution = true;
1092 return VINF_EM_DBG_STEP;
1093 }
1094 /* Stop after zero steps. Nonsense, but whatever. */
1095 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1096 *pfResumeExecution = false;
1097 enmCtx = dbgfR3FigureEventCtx(pVCpu);
1098 enmEvent = enmCtx != DBGFEVENTCTX_HYPER ? DBGFEVENT_STEPPED : DBGFEVENT_STEPPED_HYPER;
1099 break;
1100 }
1101
1102 /*
1103 * Default is to send an invalid command event.
1104 */
1105 default:
1106 {
1107 *pfResumeExecution = false;
1108 enmEvent = DBGFEVENT_INVALID_COMMAND;
1109 break;
1110 }
1111 }
1112
1113 /*
1114 * Send the pending event.
1115 */
1116 Log2(("DBGF: Emulation thread: sending event %d\n", enmEvent));
1117 int rc = dbgfR3SendEventNoWait(pVCpu->CTX_SUFF(pVM), pVCpu, enmEvent, enmCtx);
1118 AssertRCStmt(rc, *pfResumeExecution = true);
1119 return rc;
1120}
1121
1122
1123/**
1124 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1125 * EMT rendezvous worker for DBGFR3Attach - only called on one EMT.}
1126 */
1127static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Attach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1128{
1129 PUVM pUVM = pVM->pUVM;
1130 int *prcAttach = (int *)pvUser;
1131 RT_NOREF(pVCpu);
1132
1133 if (pVM->dbgf.s.fAttached)
1134 {
1135 Log(("dbgfR3Attach: Debugger already attached\n"));
1136 *prcAttach = VERR_DBGF_ALREADY_ATTACHED;
1137 return VINF_SUCCESS;
1138 }
1139
1140 /*
1141 * The per-CPU bits.
1142 */
1143 for (uint32_t i = 0; i < pUVM->cCpus; i++)
1144 {
1145 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1146
1147 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1148 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1149 }
1150
1151 /*
1152 * Init of the VM -> Debugger communication part living in the global VM structure.
1153 */
1154 pUVM->dbgf.s.cDbgEvtMax = pVM->cCpus * 5 + 10; /* Initial size of event ring, increased when being full. */
1155 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1156 pUVM->dbgf.s.idxDbgEvtRead = 0;
1157 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1158 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1159 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1160 int rc;
1161 pUVM->dbgf.s.paDbgEvts = (PDBGFEVENT)MMR3HeapAllocU(pUVM, MM_TAG_DBGF, pUVM->dbgf.s.cDbgEvtMax * sizeof(DBGFEVENT));
1162 if (pUVM->dbgf.s.paDbgEvts)
1163 {
1164 rc = RTSemEventCreate(&pUVM->dbgf.s.hEvtWait);
1165 if (RT_SUCCESS(rc))
1166 {
1167 rc = RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxDbgEvtWr);
1168 if (RT_SUCCESS(rc))
1169 {
1170 rc = RTSemEventMultiCreate(&pUVM->dbgf.s.hEvtRingBufFull);
1171 if (RT_SUCCESS(rc))
1172 {
1173 /*
1174 * At last, set the attached flag.
1175 */
1176 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, true);
1177 *prcAttach = VINF_SUCCESS;
1178 return VINF_SUCCESS;
1179 }
1180
1181 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1182 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1183 }
1184 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1185 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1186 }
1187 }
1188 else
1189 rc = VERR_NO_MEMORY;
1190
1191 *prcAttach = rc;
1192 return VINF_SUCCESS;
1193}
1194
1195
1196/**
1197 * Attaches a debugger to the specified VM.
1198 *
1199 * Only one debugger at a time.
1200 *
1201 * @returns VBox status code.
1202 * @param pUVM The user mode VM handle.
1203 */
1204VMMR3DECL(int) DBGFR3Attach(PUVM pUVM)
1205{
1206 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1207 PVM pVM = pUVM->pVM;
1208 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1209
1210 /*
1211 * Call the VM, use EMT rendezvous for serialization.
1212 */
1213 int rcAttach = VERR_IPE_UNINITIALIZED_STATUS;
1214 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Attach, &rcAttach);
1215 if (RT_SUCCESS(rc))
1216 rc = rcAttach;
1217
1218 return rc;
1219}
1220
1221
1222/**
1223 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1224 * EMT rendezvous worker for DBGFR3Detach - called on all EMTs (why?).}
1225 */
1226static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Detach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1227{
1228 if (pVCpu->idCpu == 0)
1229 {
1230 PUVM pUVM = (PUVM)pvUser;
1231
1232 /*
1233 * Per-CPU cleanup.
1234 */
1235 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1236 {
1237 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1238
1239 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1240 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1241 }
1242
1243 /*
1244 * De-init of the VM -> Debugger communication part living in the global VM structure.
1245 */
1246 if (pUVM->dbgf.s.paDbgEvts)
1247 {
1248 MMR3HeapFree(pUVM->dbgf.s.paDbgEvts);
1249 pUVM->dbgf.s.paDbgEvts = NULL;
1250 }
1251
1252 if (pUVM->dbgf.s.hEvtWait != NIL_RTSEMEVENT)
1253 {
1254 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1255 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1256 }
1257
1258 if (pUVM->dbgf.s.hMtxDbgEvtWr != NIL_RTSEMFASTMUTEX)
1259 {
1260 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1261 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1262 }
1263
1264 if (pUVM->dbgf.s.hEvtRingBufFull != NIL_RTSEMEVENTMULTI)
1265 {
1266 RTSemEventMultiDestroy(pUVM->dbgf.s.hEvtRingBufFull);
1267 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1268 }
1269
1270 pUVM->dbgf.s.cDbgEvtMax = 0;
1271 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1272 pUVM->dbgf.s.idxDbgEvtRead = 0;
1273 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1274 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1275 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1276
1277 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, false);
1278 }
1279
1280 return VINF_SUCCESS;
1281}
1282
1283
1284/**
1285 * Detaches a debugger from the specified VM.
1286 *
1287 * Caller must be attached to the VM.
1288 *
1289 * @returns VBox status code.
1290 * @param pUVM The user mode VM handle.
1291 */
1292VMMR3DECL(int) DBGFR3Detach(PUVM pUVM)
1293{
1294 LogFlow(("DBGFR3Detach:\n"));
1295
1296 /*
1297 * Validate input. The UVM handle shall be valid, the VM handle might be
1298 * in the processes of being destroyed already, so deal quietly with that.
1299 */
1300 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1301 PVM pVM = pUVM->pVM;
1302 if (!VM_IS_VALID_EXT(pVM))
1303 return VERR_INVALID_VM_HANDLE;
1304
1305 /*
1306 * Check if attached.
1307 */
1308 if (!pVM->dbgf.s.fAttached)
1309 return VERR_DBGF_NOT_ATTACHED;
1310
1311 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Detach, pUVM);
1312}
1313
1314
1315/**
1316 * Wait for a debug event.
1317 *
1318 * @returns VBox status code. Will not return VBOX_INTERRUPTED.
1319 * @param pUVM The user mode VM handle.
1320 * @param cMillies Number of millis to wait.
1321 * @param pEvent Where to store the event data.
1322 */
1323VMMR3DECL(int) DBGFR3EventWait(PUVM pUVM, RTMSINTERVAL cMillies, PDBGFEVENT pEvent)
1324{
1325 /*
1326 * Check state.
1327 */
1328 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1329 PVM pVM = pUVM->pVM;
1330 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1331 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1332
1333 RT_BZERO(pEvent, sizeof(*pEvent));
1334
1335 /*
1336 * Wait for an event to arrive if there are none.
1337 */
1338 int rc = VINF_SUCCESS;
1339 uint32_t idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
1340 if (idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite))
1341 {
1342 do
1343 {
1344 rc = RTSemEventWait(pUVM->dbgf.s.hEvtWait, cMillies);
1345 } while ( RT_SUCCESS(rc)
1346 && idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1347 }
1348
1349 if (RT_SUCCESS(rc))
1350 {
1351 Assert(idxDbgEvtRead != ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1352
1353 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
1354 memcpy(pEvent, &pUVM->dbgf.s.paDbgEvts[idxDbgEvtRead % cDbgEvtMax], sizeof(*pEvent));
1355 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtRead, (idxDbgEvtRead + 1) % cDbgEvtMax);
1356 }
1357
1358 Log2(("DBGFR3EventWait: rc=%Rrc (event type %d)\n", rc, pEvent->enmType));
1359 return rc;
1360}
1361
1362
1363/**
1364 * Halts VM execution.
1365 *
1366 * After calling this the VM isn't actually halted till an DBGFEVENT_HALT_DONE
1367 * arrives. Until that time it's not possible to issue any new commands.
1368 *
1369 * @returns VBox status code.
1370 * @retval VWRN_DBGF_ALREADY_HALTED if @a idCpu is VMCPUID_ALL and all vCPUs
1371 * are halted.
1372 * @param pUVM The user mode VM handle.
1373 * @param idCpu The vCPU to halt, VMCPUID_ALL halts all still running vCPUs.
1374 */
1375VMMR3DECL(int) DBGFR3Halt(PUVM pUVM, VMCPUID idCpu)
1376{
1377 /*
1378 * Check state.
1379 */
1380 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1381 PVM pVM = pUVM->pVM;
1382 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1383 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1384 AssertReturn(idCpu == VMCPUID_ALL || idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1385
1386 /*
1387 * Halt the requested CPUs as needed.
1388 */
1389 int rc;
1390 if (idCpu != VMCPUID_ALL)
1391 {
1392 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1393 if (!dbgfR3CpuIsHalted(pUVCpu))
1394 {
1395 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1396 rc = VINF_SUCCESS;
1397 }
1398 else
1399 rc = VWRN_DBGF_ALREADY_HALTED;
1400 }
1401 else
1402 {
1403 rc = VWRN_DBGF_ALREADY_HALTED;
1404 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1405 {
1406 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1407 if (!dbgfR3CpuIsHalted(pUVCpu))
1408 {
1409 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1410 rc = VINF_SUCCESS;
1411 }
1412 }
1413 }
1414
1415 return rc;
1416}
1417
1418
1419/**
1420 * Checks if any of the specified vCPUs have been halted by the debugger.
1421 *
1422 * @returns True if at least one halted vCPUs.
1423 * @returns False if no halted vCPUs.
1424 * @param pUVM The user mode VM handle.
1425 * @param idCpu The CPU id to check for, VMCPUID_ALL will return true if
1426 * at least a single vCPU is halted in the debugger.
1427 */
1428VMMR3DECL(bool) DBGFR3IsHalted(PUVM pUVM, VMCPUID idCpu)
1429{
1430 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1431 PVM pVM = pUVM->pVM;
1432 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1433 AssertReturn(pVM->dbgf.s.fAttached, false);
1434
1435 return dbgfR3CpuAreAnyHaltedByCpuId(pUVM, idCpu);
1436}
1437
1438
1439/**
1440 * Checks if the debugger can wait for events or not.
1441 *
1442 * This function is only used by lazy, multiplexing debuggers. :-)
1443 *
1444 * @returns VBox status code.
1445 * @retval VINF_SUCCESS if waitable.
1446 * @retval VERR_SEM_OUT_OF_TURN if not waitable.
1447 * @retval VERR_INVALID_VM_HANDLE if the VM is being (/ has been) destroyed
1448 * (not asserted) or if the handle is invalid (asserted).
1449 * @retval VERR_DBGF_NOT_ATTACHED if not attached.
1450 *
1451 * @param pUVM The user mode VM handle.
1452 */
1453VMMR3DECL(int) DBGFR3QueryWaitable(PUVM pUVM)
1454{
1455 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1456
1457 /* Note! There is a slight race here, unfortunately. */
1458 PVM pVM = pUVM->pVM;
1459 if (!RT_VALID_PTR(pVM))
1460 return VERR_INVALID_VM_HANDLE;
1461 if (pVM->enmVMState >= VMSTATE_DESTROYING)
1462 return VERR_INVALID_VM_HANDLE;
1463 if (!pVM->dbgf.s.fAttached)
1464 return VERR_DBGF_NOT_ATTACHED;
1465
1466 /** @todo was: if (!RTSemPongShouldWait(...)) return VERR_SEM_OUT_OF_TURN; */
1467 return VINF_SUCCESS;
1468}
1469
1470
1471/**
1472 * Resumes VM execution.
1473 *
1474 * There is no receipt event on this command.
1475 *
1476 * @returns VBox status code.
1477 * @retval VWRN_DBGF_ALREADY_RUNNING if the specified vCPUs are all running.
1478 * @param pUVM The user mode VM handle.
1479 * @param idCpu The vCPU to resume, VMCPUID_ALL resumes all still halted vCPUs.
1480 */
1481VMMR3DECL(int) DBGFR3Resume(PUVM pUVM, VMCPUID idCpu)
1482{
1483 /*
1484 * Validate input and attachment state.
1485 */
1486 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1487 PVM pVM = pUVM->pVM;
1488 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1489 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1490
1491 /*
1492 * Ping the halted emulation threads, telling them to run.
1493 */
1494 int rc = VWRN_DBGF_ALREADY_RUNNING;
1495 if (idCpu != VMCPUID_ALL)
1496 {
1497 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1498 if (dbgfR3CpuIsHalted(pUVCpu))
1499 {
1500 rc = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1501 AssertRC(rc);
1502 }
1503 }
1504 else
1505 {
1506 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1507 {
1508 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1509 if (dbgfR3CpuIsHalted(pUVCpu))
1510 {
1511 int rc2 = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1512 AssertRC(rc2);
1513 if (rc == VWRN_DBGF_ALREADY_RUNNING || RT_FAILURE(rc2))
1514 rc = rc2;
1515 }
1516 }
1517 }
1518
1519 return rc;
1520}
1521
1522
1523/**
1524 * Classifies the current instruction.
1525 *
1526 * @returns Type of instruction.
1527 * @param pVM The cross context VM structure.
1528 * @param pVCpu The current CPU.
1529 * @thread EMT(pVCpu)
1530 */
1531static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu)
1532{
1533 /*
1534 * Read the instruction.
1535 */
1536 size_t cbRead = 0;
1537 uint8_t abOpcode[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1538 int rc = PGMR3DbgReadGCPtr(pVM, abOpcode, CPUMGetGuestFlatPC(pVCpu), sizeof(abOpcode) - 1, 0 /*fFlags*/, &cbRead);
1539 if (RT_SUCCESS(rc))
1540 {
1541 /*
1542 * Do minimal parsing. No real need to involve the disassembler here.
1543 */
1544 uint8_t *pb = abOpcode;
1545 for (;;)
1546 {
1547 switch (*pb++)
1548 {
1549 default:
1550 return DBGFSTEPINSTRTYPE_OTHER;
1551
1552 case 0xe8: /* call rel16/32 */
1553 case 0x9a: /* call farptr */
1554 case 0xcc: /* int3 */
1555 case 0xcd: /* int xx */
1556 // case 0xce: /* into */
1557 return DBGFSTEPINSTRTYPE_CALL;
1558
1559 case 0xc2: /* ret xx */
1560 case 0xc3: /* ret */
1561 case 0xca: /* retf xx */
1562 case 0xcb: /* retf */
1563 case 0xcf: /* iret */
1564 return DBGFSTEPINSTRTYPE_RET;
1565
1566 case 0xff:
1567 if ( ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 2 /* call indir */
1568 || ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 3) /* call indir-farptr */
1569 return DBGFSTEPINSTRTYPE_CALL;
1570 return DBGFSTEPINSTRTYPE_OTHER;
1571
1572 case 0x0f:
1573 switch (*pb++)
1574 {
1575 case 0x05: /* syscall */
1576 case 0x34: /* sysenter */
1577 return DBGFSTEPINSTRTYPE_CALL;
1578 case 0x07: /* sysret */
1579 case 0x35: /* sysexit */
1580 return DBGFSTEPINSTRTYPE_RET;
1581 }
1582 break;
1583
1584 /* Must handle some REX prefixes. So we do all normal prefixes. */
1585 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
1586 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
1587 if (!CPUMIsGuestIn64BitCode(pVCpu))
1588 return DBGFSTEPINSTRTYPE_OTHER;
1589 break;
1590
1591 case 0x2e: /* CS */
1592 case 0x36: /* SS */
1593 case 0x3e: /* DS */
1594 case 0x26: /* ES */
1595 case 0x64: /* FS */
1596 case 0x65: /* GS */
1597 case 0x66: /* op size */
1598 case 0x67: /* addr size */
1599 case 0xf0: /* lock */
1600 case 0xf2: /* REPNZ */
1601 case 0xf3: /* REPZ */
1602 break;
1603 }
1604 }
1605 }
1606
1607 return DBGFSTEPINSTRTYPE_INVALID;
1608}
1609
1610
1611/**
1612 * Checks if the stepping has reached a stop point.
1613 *
1614 * Called when raising a stepped event.
1615 *
1616 * @returns true if the event should be raised, false if we should take one more
1617 * step first.
1618 * @param pVM The cross context VM structure.
1619 * @param pVCpu The cross context per CPU structure of the calling EMT.
1620 * @thread EMT(pVCpu)
1621 */
1622static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu)
1623{
1624 /*
1625 * Check valid pVCpu and that it matches the CPU one stepping.
1626 */
1627 if (pVCpu)
1628 {
1629 if (pVCpu->idCpu == pVM->dbgf.s.SteppingFilter.idCpu)
1630 {
1631 /*
1632 * Increase the number of steps and see if we've reached the max.
1633 */
1634 pVM->dbgf.s.SteppingFilter.cSteps++;
1635 if (pVM->dbgf.s.SteppingFilter.cSteps < pVM->dbgf.s.SteppingFilter.cMaxSteps)
1636 {
1637 /*
1638 * Check PC and SP address filtering.
1639 */
1640 if (pVM->dbgf.s.SteppingFilter.fFlags & (DBGF_STEP_F_STOP_ON_ADDRESS | DBGF_STEP_F_STOP_ON_STACK_POP))
1641 {
1642 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1643 && pVM->dbgf.s.SteppingFilter.AddrPc == CPUMGetGuestFlatPC(pVCpu))
1644 return true;
1645 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1646 && CPUMGetGuestFlatSP(pVCpu) - pVM->dbgf.s.SteppingFilter.AddrStackPop
1647 < pVM->dbgf.s.SteppingFilter.cbStackPop)
1648 return true;
1649 }
1650
1651 /*
1652 * Do step-over filtering separate from the step-into one.
1653 */
1654 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1655 {
1656 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1657 switch (enmType)
1658 {
1659 default:
1660 if ( pVM->dbgf.s.SteppingFilter.uCallDepth != 0
1661 || (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_FILTER_MASK))
1662 break;
1663 return true;
1664 case DBGFSTEPINSTRTYPE_CALL:
1665 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1666 && pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1667 return true;
1668 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1669 break;
1670 case DBGFSTEPINSTRTYPE_RET:
1671 if (pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1672 {
1673 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1674 return true;
1675 /* If after return, we use the cMaxStep limit to stop the next time. */
1676 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1677 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1678 }
1679 else if (pVM->dbgf.s.SteppingFilter.uCallDepth > 0)
1680 pVM->dbgf.s.SteppingFilter.uCallDepth--;
1681 break;
1682 }
1683 return false;
1684 }
1685 /*
1686 * Filtered step-into.
1687 */
1688 else if ( pVM->dbgf.s.SteppingFilter.fFlags
1689 & (DBGF_STEP_F_STOP_ON_CALL | DBGF_STEP_F_STOP_ON_RET | DBGF_STEP_F_STOP_AFTER_RET))
1690 {
1691 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1692 switch (enmType)
1693 {
1694 default:
1695 break;
1696 case DBGFSTEPINSTRTYPE_CALL:
1697 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1698 return true;
1699 break;
1700 case DBGFSTEPINSTRTYPE_RET:
1701 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1702 return true;
1703 /* If after return, we use the cMaxStep limit to stop the next time. */
1704 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1705 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1706 break;
1707 }
1708 return false;
1709 }
1710 }
1711 }
1712 }
1713
1714 return true;
1715}
1716
1717
1718/**
1719 * Step Into.
1720 *
1721 * A single step event is generated from this command.
1722 * The current implementation is not reliable, so don't rely on the event coming.
1723 *
1724 * @returns VBox status code.
1725 * @param pUVM The user mode VM handle.
1726 * @param idCpu The ID of the CPU to single step on.
1727 */
1728VMMR3DECL(int) DBGFR3Step(PUVM pUVM, VMCPUID idCpu)
1729{
1730 return DBGFR3StepEx(pUVM, idCpu, DBGF_STEP_F_INTO, NULL, NULL, 0, 1);
1731}
1732
1733
1734/**
1735 * Full fleged step.
1736 *
1737 * This extended stepping API allows for doing multiple steps before raising an
1738 * event, helping implementing step over, step out and other more advanced
1739 * features.
1740 *
1741 * Like the DBGFR3Step() API, this will normally generate a DBGFEVENT_STEPPED or
1742 * DBGFEVENT_STEPPED_EVENT. However the stepping may be interrupted by other
1743 * events, which will abort the stepping.
1744 *
1745 * The stop on pop area feature is for safeguarding step out.
1746 *
1747 * Please note though, that it will always use stepping and never breakpoints.
1748 * While this allows for a much greater flexibility it can at times be rather
1749 * slow.
1750 *
1751 * @returns VBox status code.
1752 * @param pUVM The user mode VM handle.
1753 * @param idCpu The ID of the CPU to single step on.
1754 * @param fFlags Flags controlling the stepping, DBGF_STEP_F_XXX.
1755 * Either DBGF_STEP_F_INTO or DBGF_STEP_F_OVER must
1756 * always be specified.
1757 * @param pStopPcAddr Address to stop executing at. Completely ignored
1758 * unless DBGF_STEP_F_STOP_ON_ADDRESS is specified.
1759 * @param pStopPopAddr Stack address that SP must be lower than when
1760 * performing DBGF_STEP_F_STOP_ON_STACK_POP filtering.
1761 * @param cbStopPop The range starting at @a pStopPopAddr which is
1762 * considered to be within the same thread stack. Note
1763 * that the API allows @a pStopPopAddr and @a cbStopPop
1764 * to form an area that wraps around and it will
1765 * consider the part starting at 0 as included.
1766 * @param cMaxSteps The maximum number of steps to take. This is to
1767 * prevent stepping for ever, so passing UINT32_MAX is
1768 * not recommended.
1769 *
1770 * @remarks The two address arguments must be guest context virtual addresses,
1771 * or HMA. The code doesn't make much of a point of out HMA, though.
1772 */
1773VMMR3DECL(int) DBGFR3StepEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, PCDBGFADDRESS pStopPcAddr,
1774 PCDBGFADDRESS pStopPopAddr, RTGCUINTPTR cbStopPop, uint32_t cMaxSteps)
1775{
1776 /*
1777 * Check state.
1778 */
1779 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1780 PVM pVM = pUVM->pVM;
1781 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1782 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
1783 AssertReturn(!(fFlags & ~DBGF_STEP_F_VALID_MASK), VERR_INVALID_FLAGS);
1784 AssertReturn(RT_BOOL(fFlags & DBGF_STEP_F_INTO) != RT_BOOL(fFlags & DBGF_STEP_F_OVER), VERR_INVALID_FLAGS);
1785 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1786 {
1787 AssertReturn(RT_VALID_PTR(pStopPcAddr), VERR_INVALID_POINTER);
1788 AssertReturn(DBGFADDRESS_IS_VALID(pStopPcAddr), VERR_INVALID_PARAMETER);
1789 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPcAddr), VERR_INVALID_PARAMETER);
1790 }
1791 AssertReturn(!(fFlags & DBGF_STEP_F_STOP_ON_STACK_POP) || RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1792 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1793 {
1794 AssertReturn(RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1795 AssertReturn(DBGFADDRESS_IS_VALID(pStopPopAddr), VERR_INVALID_PARAMETER);
1796 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPopAddr), VERR_INVALID_PARAMETER);
1797 AssertReturn(cbStopPop > 0, VERR_INVALID_PARAMETER);
1798 }
1799
1800 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1801 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1802 if (RT_LIKELY(dbgfR3CpuIsHalted(pUVCpu)))
1803 { /* likely */ }
1804 else
1805 return VERR_SEM_OUT_OF_TURN;
1806 Assert(pVM->dbgf.s.SteppingFilter.idCpu == NIL_VMCPUID);
1807
1808 /*
1809 * Send the emulation thread a single-step command.
1810 */
1811 if (fFlags == DBGF_STEP_F_INTO)
1812 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1813 else
1814 pVM->dbgf.s.SteppingFilter.idCpu = idCpu;
1815 pVM->dbgf.s.SteppingFilter.fFlags = fFlags;
1816 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1817 pVM->dbgf.s.SteppingFilter.AddrPc = pStopPcAddr->FlatPtr;
1818 else
1819 pVM->dbgf.s.SteppingFilter.AddrPc = 0;
1820 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1821 {
1822 pVM->dbgf.s.SteppingFilter.AddrStackPop = pStopPopAddr->FlatPtr;
1823 pVM->dbgf.s.SteppingFilter.cbStackPop = cbStopPop;
1824 }
1825 else
1826 {
1827 pVM->dbgf.s.SteppingFilter.AddrStackPop = 0;
1828 pVM->dbgf.s.SteppingFilter.cbStackPop = RTGCPTR_MAX;
1829 }
1830
1831 pVM->dbgf.s.SteppingFilter.cMaxSteps = cMaxSteps;
1832 pVM->dbgf.s.SteppingFilter.cSteps = 0;
1833 pVM->dbgf.s.SteppingFilter.uCallDepth = 0;
1834
1835 Assert(dbgfR3CpuIsHalted(pUVCpu));
1836 return dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_SINGLE_STEP);
1837}
1838
1839
1840
1841/**
1842 * dbgfR3EventConfigEx argument packet.
1843 */
1844typedef struct DBGFR3EVENTCONFIGEXARGS
1845{
1846 PCDBGFEVENTCONFIG paConfigs;
1847 size_t cConfigs;
1848 int rc;
1849} DBGFR3EVENTCONFIGEXARGS;
1850/** Pointer to a dbgfR3EventConfigEx argument packet. */
1851typedef DBGFR3EVENTCONFIGEXARGS *PDBGFR3EVENTCONFIGEXARGS;
1852
1853
1854/**
1855 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Worker for DBGFR3EventConfigEx.}
1856 */
1857static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
1858{
1859 if (pVCpu->idCpu == 0)
1860 {
1861 PDBGFR3EVENTCONFIGEXARGS pArgs = (PDBGFR3EVENTCONFIGEXARGS)pvUser;
1862 DBGFEVENTCONFIG volatile const *paConfigs = pArgs->paConfigs;
1863 size_t cConfigs = pArgs->cConfigs;
1864
1865 /*
1866 * Apply the changes.
1867 */
1868 unsigned cChanges = 0;
1869 for (uint32_t i = 0; i < cConfigs; i++)
1870 {
1871 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
1872 AssertReturn(enmType >= DBGFEVENT_FIRST_SELECTABLE && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1873 if (paConfigs[i].fEnabled)
1874 cChanges += ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, enmType) == false;
1875 else
1876 cChanges += ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, enmType) == true;
1877 }
1878
1879 /*
1880 * Inform HM about changes.
1881 */
1882 if (cChanges > 0 && HMIsEnabled(pVM))
1883 {
1884 HMR3NotifyDebugEventChanged(pVM);
1885 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1886 }
1887 }
1888 else if (HMIsEnabled(pVM))
1889 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1890
1891 return VINF_SUCCESS;
1892}
1893
1894
1895/**
1896 * Configures (enables/disables) multiple selectable debug events.
1897 *
1898 * @returns VBox status code.
1899 * @param pUVM The user mode VM handle.
1900 * @param paConfigs The event to configure and their new state.
1901 * @param cConfigs Number of entries in @a paConfigs.
1902 */
1903VMMR3DECL(int) DBGFR3EventConfigEx(PUVM pUVM, PCDBGFEVENTCONFIG paConfigs, size_t cConfigs)
1904{
1905 /*
1906 * Validate input.
1907 */
1908 size_t i = cConfigs;
1909 while (i-- > 0)
1910 {
1911 AssertReturn(paConfigs[i].enmType >= DBGFEVENT_FIRST_SELECTABLE, VERR_INVALID_PARAMETER);
1912 AssertReturn(paConfigs[i].enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1913 }
1914 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1915 PVM pVM = pUVM->pVM;
1916 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1917
1918 /*
1919 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
1920 * can sync their data and execution with new debug state.
1921 */
1922 DBGFR3EVENTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
1923 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
1924 dbgfR3EventConfigEx, &Args);
1925 if (RT_SUCCESS(rc))
1926 rc = Args.rc;
1927 return rc;
1928}
1929
1930
1931/**
1932 * Enables or disables a selectable debug event.
1933 *
1934 * @returns VBox status code.
1935 * @param pUVM The user mode VM handle.
1936 * @param enmEvent The selectable debug event.
1937 * @param fEnabled The new state.
1938 */
1939VMMR3DECL(int) DBGFR3EventConfig(PUVM pUVM, DBGFEVENTTYPE enmEvent, bool fEnabled)
1940{
1941 /*
1942 * Convert to an array call.
1943 */
1944 DBGFEVENTCONFIG EvtCfg = { enmEvent, fEnabled };
1945 return DBGFR3EventConfigEx(pUVM, &EvtCfg, 1);
1946}
1947
1948
1949/**
1950 * Checks if the given selectable event is enabled.
1951 *
1952 * @returns true if enabled, false if not or invalid input.
1953 * @param pUVM The user mode VM handle.
1954 * @param enmEvent The selectable debug event.
1955 * @sa DBGFR3EventQuery
1956 */
1957VMMR3DECL(bool) DBGFR3EventIsEnabled(PUVM pUVM, DBGFEVENTTYPE enmEvent)
1958{
1959 /*
1960 * Validate input.
1961 */
1962 AssertReturn( enmEvent >= DBGFEVENT_HALT_DONE
1963 && enmEvent < DBGFEVENT_END, false);
1964 Assert( enmEvent >= DBGFEVENT_FIRST_SELECTABLE
1965 || enmEvent == DBGFEVENT_BREAKPOINT
1966 || enmEvent == DBGFEVENT_BREAKPOINT_IO
1967 || enmEvent == DBGFEVENT_BREAKPOINT_MMIO);
1968
1969 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1970 PVM pVM = pUVM->pVM;
1971 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1972
1973 /*
1974 * Check the event status.
1975 */
1976 return ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, enmEvent);
1977}
1978
1979
1980/**
1981 * Queries the status of a set of events.
1982 *
1983 * @returns VBox status code.
1984 * @param pUVM The user mode VM handle.
1985 * @param paConfigs The events to query and where to return the state.
1986 * @param cConfigs The number of elements in @a paConfigs.
1987 * @sa DBGFR3EventIsEnabled, DBGF_IS_EVENT_ENABLED
1988 */
1989VMMR3DECL(int) DBGFR3EventQuery(PUVM pUVM, PDBGFEVENTCONFIG paConfigs, size_t cConfigs)
1990{
1991 /*
1992 * Validate input.
1993 */
1994 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1995 PVM pVM = pUVM->pVM;
1996 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1997
1998 for (size_t i = 0; i < cConfigs; i++)
1999 {
2000 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
2001 AssertReturn( enmType >= DBGFEVENT_HALT_DONE
2002 && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
2003 Assert( enmType >= DBGFEVENT_FIRST_SELECTABLE
2004 || enmType == DBGFEVENT_BREAKPOINT
2005 || enmType == DBGFEVENT_BREAKPOINT_IO
2006 || enmType == DBGFEVENT_BREAKPOINT_MMIO);
2007 paConfigs[i].fEnabled = ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, paConfigs[i].enmType);
2008 }
2009
2010 return VINF_SUCCESS;
2011}
2012
2013
2014/**
2015 * dbgfR3InterruptConfigEx argument packet.
2016 */
2017typedef struct DBGFR3INTERRUPTCONFIGEXARGS
2018{
2019 PCDBGFINTERRUPTCONFIG paConfigs;
2020 size_t cConfigs;
2021 int rc;
2022} DBGFR3INTERRUPTCONFIGEXARGS;
2023/** Pointer to a dbgfR3InterruptConfigEx argument packet. */
2024typedef DBGFR3INTERRUPTCONFIGEXARGS *PDBGFR3INTERRUPTCONFIGEXARGS;
2025
2026/**
2027 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
2028 * Worker for DBGFR3InterruptConfigEx.}
2029 */
2030static DECLCALLBACK(VBOXSTRICTRC) dbgfR3InterruptConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
2031{
2032 if (pVCpu->idCpu == 0)
2033 {
2034 PDBGFR3INTERRUPTCONFIGEXARGS pArgs = (PDBGFR3INTERRUPTCONFIGEXARGS)pvUser;
2035 PCDBGFINTERRUPTCONFIG paConfigs = pArgs->paConfigs;
2036 size_t cConfigs = pArgs->cConfigs;
2037
2038 /*
2039 * Apply the changes.
2040 */
2041 bool fChanged = false;
2042 bool fThis;
2043 for (uint32_t i = 0; i < cConfigs; i++)
2044 {
2045 /*
2046 * Hardware interrupts.
2047 */
2048 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2049 {
2050 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == false;
2051 if (fThis)
2052 {
2053 Assert(pVM->dbgf.s.cHardIntBreakpoints < 256);
2054 pVM->dbgf.s.cHardIntBreakpoints++;
2055 }
2056 }
2057 else if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_DISABLED)
2058 {
2059 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == true;
2060 if (fThis)
2061 {
2062 Assert(pVM->dbgf.s.cHardIntBreakpoints > 0);
2063 pVM->dbgf.s.cHardIntBreakpoints--;
2064 }
2065 }
2066
2067 /*
2068 * Software interrupts.
2069 */
2070 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2071 {
2072 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == false;
2073 if (fThis)
2074 {
2075 Assert(pVM->dbgf.s.cSoftIntBreakpoints < 256);
2076 pVM->dbgf.s.cSoftIntBreakpoints++;
2077 }
2078 }
2079 else if (paConfigs[i].enmSoftState == DBGFINTERRUPTSTATE_DISABLED)
2080 {
2081 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == true;
2082 if (fThis)
2083 {
2084 Assert(pVM->dbgf.s.cSoftIntBreakpoints > 0);
2085 pVM->dbgf.s.cSoftIntBreakpoints--;
2086 }
2087 }
2088 }
2089
2090 /*
2091 * Update the event bitmap entries.
2092 */
2093 if (pVM->dbgf.s.cHardIntBreakpoints > 0)
2094 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == false;
2095 else
2096 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == true;
2097
2098 if (pVM->dbgf.s.cSoftIntBreakpoints > 0)
2099 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == false;
2100 else
2101 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == true;
2102
2103 /*
2104 * Inform HM about changes.
2105 */
2106 if (fChanged && HMIsEnabled(pVM))
2107 {
2108 HMR3NotifyDebugEventChanged(pVM);
2109 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2110 }
2111 }
2112 else if (HMIsEnabled(pVM))
2113 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2114
2115 return VINF_SUCCESS;
2116}
2117
2118
2119/**
2120 * Changes
2121 *
2122 * @returns VBox status code.
2123 * @param pUVM The user mode VM handle.
2124 * @param paConfigs The events to query and where to return the state.
2125 * @param cConfigs The number of elements in @a paConfigs.
2126 * @sa DBGFR3InterruptConfigHardware, DBGFR3InterruptConfigSoftware
2127 */
2128VMMR3DECL(int) DBGFR3InterruptConfigEx(PUVM pUVM, PCDBGFINTERRUPTCONFIG paConfigs, size_t cConfigs)
2129{
2130 /*
2131 * Validate input.
2132 */
2133 size_t i = cConfigs;
2134 while (i-- > 0)
2135 {
2136 AssertReturn(paConfigs[i].enmHardState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2137 AssertReturn(paConfigs[i].enmSoftState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2138 }
2139
2140 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2141 PVM pVM = pUVM->pVM;
2142 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2143
2144 /*
2145 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
2146 * can sync their data and execution with new debug state.
2147 */
2148 DBGFR3INTERRUPTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
2149 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
2150 dbgfR3InterruptConfigEx, &Args);
2151 if (RT_SUCCESS(rc))
2152 rc = Args.rc;
2153 return rc;
2154}
2155
2156
2157/**
2158 * Configures interception of a hardware interrupt.
2159 *
2160 * @returns VBox status code.
2161 * @param pUVM The user mode VM handle.
2162 * @param iInterrupt The interrupt number.
2163 * @param fEnabled Whether interception is enabled or not.
2164 * @sa DBGFR3InterruptSoftwareConfig, DBGFR3InterruptConfigEx
2165 */
2166VMMR3DECL(int) DBGFR3InterruptHardwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2167{
2168 /*
2169 * Convert to DBGFR3InterruptConfigEx call.
2170 */
2171 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, (uint8_t)fEnabled, DBGFINTERRUPTSTATE_DONT_TOUCH };
2172 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2173}
2174
2175
2176/**
2177 * Configures interception of a software interrupt.
2178 *
2179 * @returns VBox status code.
2180 * @param pUVM The user mode VM handle.
2181 * @param iInterrupt The interrupt number.
2182 * @param fEnabled Whether interception is enabled or not.
2183 * @sa DBGFR3InterruptHardwareConfig, DBGFR3InterruptConfigEx
2184 */
2185VMMR3DECL(int) DBGFR3InterruptSoftwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2186{
2187 /*
2188 * Convert to DBGFR3InterruptConfigEx call.
2189 */
2190 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, DBGFINTERRUPTSTATE_DONT_TOUCH, (uint8_t)fEnabled };
2191 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2192}
2193
2194
2195/**
2196 * Checks whether interception is enabled for a hardware interrupt.
2197 *
2198 * @returns true if enabled, false if not or invalid input.
2199 * @param pUVM The user mode VM handle.
2200 * @param iInterrupt The interrupt number.
2201 * @sa DBGFR3InterruptSoftwareIsEnabled, DBGF_IS_HARDWARE_INT_ENABLED,
2202 * DBGF_IS_SOFTWARE_INT_ENABLED
2203 */
2204VMMR3DECL(int) DBGFR3InterruptHardwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2205{
2206 /*
2207 * Validate input.
2208 */
2209 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2210 PVM pVM = pUVM->pVM;
2211 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2212
2213 /*
2214 * Check it.
2215 */
2216 return ASMBitTest(&pVM->dbgf.s.bmHardIntBreakpoints, iInterrupt);
2217}
2218
2219
2220/**
2221 * Checks whether interception is enabled for a software interrupt.
2222 *
2223 * @returns true if enabled, false if not or invalid input.
2224 * @param pUVM The user mode VM handle.
2225 * @param iInterrupt The interrupt number.
2226 * @sa DBGFR3InterruptHardwareIsEnabled, DBGF_IS_SOFTWARE_INT_ENABLED,
2227 * DBGF_IS_HARDWARE_INT_ENABLED,
2228 */
2229VMMR3DECL(int) DBGFR3InterruptSoftwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2230{
2231 /*
2232 * Validate input.
2233 */
2234 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2235 PVM pVM = pUVM->pVM;
2236 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2237
2238 /*
2239 * Check it.
2240 */
2241 return ASMBitTest(&pVM->dbgf.s.bmSoftIntBreakpoints, iInterrupt);
2242}
2243
2244
2245
2246/**
2247 * Call this to single step programmatically.
2248 *
2249 * You must pass down the return code to the EM loop! That's
2250 * where the actual single stepping take place (at least in the
2251 * current implementation).
2252 *
2253 * @returns VINF_EM_DBG_STEP
2254 *
2255 * @param pVCpu The cross context virtual CPU structure.
2256 *
2257 * @thread VCpu EMT
2258 * @internal
2259 */
2260VMMR3_INT_DECL(int) DBGFR3PrgStep(PVMCPU pVCpu)
2261{
2262 VMCPU_ASSERT_EMT(pVCpu);
2263
2264 pVCpu->dbgf.s.fSingleSteppingRaw = true;
2265 return VINF_EM_DBG_STEP;
2266}
2267
2268
2269/**
2270 * Inject an NMI into a running VM (only VCPU 0!)
2271 *
2272 * @returns VBox status code.
2273 * @param pUVM The user mode VM structure.
2274 * @param idCpu The ID of the CPU to inject the NMI on.
2275 */
2276VMMR3DECL(int) DBGFR3InjectNMI(PUVM pUVM, VMCPUID idCpu)
2277{
2278 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2279 PVM pVM = pUVM->pVM;
2280 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2281 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
2282
2283 /** @todo Implement generic NMI injection. */
2284 /** @todo NEM: NMI injection */
2285 if (!HMIsEnabled(pVM))
2286 return VERR_NOT_SUP_BY_NEM;
2287
2288 VMCPU_FF_SET(pVM->apCpusR3[idCpu], VMCPU_FF_INTERRUPT_NMI);
2289 return VINF_SUCCESS;
2290}
2291
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette