VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGF.cpp@ 93787

Last change on this file since 93787 was 93787, checked in by vboxsync, 3 years ago

VMM/{NEM*,DBGF}: Make NEM respond to debug event changes and implement the logic for the darwin backend, bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 77.1 KB
Line 
1/* $Id: DBGF.cpp 93787 2022-02-16 11:07:57Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_dbgf DBGF - The Debugger Facility
20 *
21 * The purpose of the DBGF is to provide an interface for debuggers to
22 * manipulate the VMM without having to mess up the source code for each of
23 * them. The DBGF is always built in and will always work when a debugger
24 * attaches to the VM. The DBGF provides the basic debugger features, such as
25 * halting execution, handling breakpoints, single step execution, instruction
26 * disassembly, info querying, OS specific diggers, symbol and module
27 * management.
28 *
29 * The interface is working in a manner similar to the win32, linux and os2
30 * debugger interfaces. The interface has an asynchronous nature. This comes
31 * from the fact that the VMM and the Debugger are running in different threads.
32 * They are referred to as the "emulation thread" and the "debugger thread", or
33 * as the "ping thread" and the "pong thread, respectivly. (The last set of
34 * names comes from the use of the Ping-Pong synchronization construct from the
35 * RTSem API.)
36 *
37 * @see grp_dbgf
38 *
39 *
40 * @section sec_dbgf_scenario Usage Scenario
41 *
42 * The debugger starts by attaching to the VM. For practical reasons we limit the
43 * number of concurrently attached debuggers to 1 per VM. The action of
44 * attaching to the VM causes the VM to check and generate debug events.
45 *
46 * The debugger then will wait/poll for debug events and issue commands.
47 *
48 * The waiting and polling is done by the DBGFEventWait() function. It will wait
49 * for the emulation thread to send a ping, thus indicating that there is an
50 * event waiting to be processed.
51 *
52 * An event can be a response to a command issued previously, the hitting of a
53 * breakpoint, or running into a bad/fatal VMM condition. The debugger now has
54 * the ping and must respond to the event at hand - the VMM is waiting. This
55 * usually means that the user of the debugger must do something, but it doesn't
56 * have to. The debugger is free to call any DBGF function (nearly at least)
57 * while processing the event.
58 *
59 * Typically the user will issue a request for the execution to be resumed, so
60 * the debugger calls DBGFResume() and goes back to waiting/polling for events.
61 *
62 * When the user eventually terminates the debugging session or selects another
63 * VM, the debugger detaches from the VM. This means that breakpoints are
64 * disabled and that the emulation thread no longer polls for debugger commands.
65 *
66 */
67
68
69/*********************************************************************************************************************************
70* Header Files *
71*********************************************************************************************************************************/
72#define LOG_GROUP LOG_GROUP_DBGF
73#include <VBox/vmm/dbgf.h>
74#include <VBox/vmm/selm.h>
75#include <VBox/vmm/em.h>
76#include <VBox/vmm/hm.h>
77#include <VBox/vmm/mm.h>
78#include <VBox/vmm/nem.h>
79#include "DBGFInternal.h"
80#include <VBox/vmm/vm.h>
81#include <VBox/vmm/uvm.h>
82#include <VBox/err.h>
83
84#include <VBox/log.h>
85#include <iprt/semaphore.h>
86#include <iprt/thread.h>
87#include <iprt/asm.h>
88#include <iprt/time.h>
89#include <iprt/assert.h>
90#include <iprt/stream.h>
91#include <iprt/env.h>
92
93
94/*********************************************************************************************************************************
95* Structures and Typedefs *
96*********************************************************************************************************************************/
97/**
98 * Instruction type returned by dbgfStepGetCurInstrType.
99 */
100typedef enum DBGFSTEPINSTRTYPE
101{
102 DBGFSTEPINSTRTYPE_INVALID = 0,
103 DBGFSTEPINSTRTYPE_OTHER,
104 DBGFSTEPINSTRTYPE_RET,
105 DBGFSTEPINSTRTYPE_CALL,
106 DBGFSTEPINSTRTYPE_END,
107 DBGFSTEPINSTRTYPE_32BIT_HACK = 0x7fffffff
108} DBGFSTEPINSTRTYPE;
109
110
111/*********************************************************************************************************************************
112* Internal Functions *
113*********************************************************************************************************************************/
114DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx);
115DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu);
116static int dbgfR3CpuWait(PVMCPU pVCpu);
117static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution);
118static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu);
119static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu);
120static int dbgfR3EventHaltAllVCpus(PVM pVM, PVMCPU pVCpuExclude);
121
122
123
124/**
125 * Initializes the DBGF.
126 *
127 * @returns VBox status code.
128 * @param pVM The cross context VM structure.
129 */
130VMMR3_INT_DECL(int) DBGFR3Init(PVM pVM)
131{
132 PUVM pUVM = pVM->pUVM;
133 AssertCompile(sizeof(pUVM->dbgf.s) <= sizeof(pUVM->dbgf.padding));
134 AssertCompile(sizeof(pUVM->aCpus[0].dbgf.s) <= sizeof(pUVM->aCpus[0].dbgf.padding));
135
136 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
137
138 /*
139 * The usual sideways mountain climbing style of init:
140 */
141 int rc = dbgfR3InfoInit(pUVM); /* (First, initalizes the shared critical section.) */
142 if (RT_SUCCESS(rc))
143 {
144 rc = dbgfR3TraceInit(pVM);
145 if (RT_SUCCESS(rc))
146 {
147 rc = dbgfR3RegInit(pUVM);
148 if (RT_SUCCESS(rc))
149 {
150 rc = dbgfR3AsInit(pUVM);
151 if (RT_SUCCESS(rc))
152 {
153 rc = dbgfR3BpInit(pUVM);
154 if (RT_SUCCESS(rc))
155 {
156 rc = dbgfR3OSInit(pUVM);
157 if (RT_SUCCESS(rc))
158 {
159 rc = dbgfR3PlugInInit(pUVM);
160 if (RT_SUCCESS(rc))
161 {
162 rc = dbgfR3BugCheckInit(pVM);
163 if (RT_SUCCESS(rc))
164 {
165#ifdef VBOX_WITH_DBGF_TRACING
166 rc = dbgfR3TracerInit(pVM);
167#endif
168 if (RT_SUCCESS(rc))
169 {
170 return VINF_SUCCESS;
171 }
172 }
173 dbgfR3PlugInTerm(pUVM);
174 }
175 dbgfR3OSTermPart1(pUVM);
176 dbgfR3OSTermPart2(pUVM);
177 }
178 dbgfR3BpTerm(pUVM);
179 }
180 dbgfR3AsTerm(pUVM);
181 }
182 dbgfR3RegTerm(pUVM);
183 }
184 dbgfR3TraceTerm(pVM);
185 }
186 dbgfR3InfoTerm(pUVM);
187 }
188 return rc;
189}
190
191
192/**
193 * Terminates and cleans up resources allocated by the DBGF.
194 *
195 * @returns VBox status code.
196 * @param pVM The cross context VM structure.
197 */
198VMMR3_INT_DECL(int) DBGFR3Term(PVM pVM)
199{
200 PUVM pUVM = pVM->pUVM;
201
202#ifdef VBOX_WITH_DBGF_TRACING
203 dbgfR3TracerTerm(pVM);
204#endif
205 dbgfR3OSTermPart1(pUVM);
206 dbgfR3PlugInTerm(pUVM);
207 dbgfR3OSTermPart2(pUVM);
208 dbgfR3BpTerm(pUVM);
209 dbgfR3AsTerm(pUVM);
210 dbgfR3RegTerm(pUVM);
211 dbgfR3TraceTerm(pVM);
212 dbgfR3InfoTerm(pUVM);
213
214 return VINF_SUCCESS;
215}
216
217
218/**
219 * This is for tstCFGM and others to avoid trigger leak detection.
220 *
221 * @returns VBox status code.
222 * @param pUVM The user mode VM structure.
223 */
224VMMR3DECL(void) DBGFR3TermUVM(PUVM pUVM)
225{
226 dbgfR3InfoTerm(pUVM);
227}
228
229
230/**
231 * Called when the VM is powered off to detach debuggers.
232 *
233 * @param pVM The cross context VM structure.
234 */
235VMMR3_INT_DECL(void) DBGFR3PowerOff(PVM pVM)
236{
237 /*
238 * Send a termination event to any attached debugger.
239 */
240 if (pVM->dbgf.s.fAttached)
241 {
242 PVMCPU pVCpu = VMMGetCpu(pVM);
243 int rc = dbgfR3SendEventWait(pVM, pVCpu, DBGFEVENT_POWERING_OFF, DBGFEVENTCTX_OTHER);
244 AssertLogRelRC(rc);
245
246 /*
247 * Clear the FF so we won't get confused later on.
248 */
249 VM_FF_CLEAR(pVM, VM_FF_DBGF);
250 }
251}
252
253
254/**
255 * Applies relocations to data and code managed by this
256 * component. This function will be called at init and
257 * whenever the VMM need to relocate it self inside the GC.
258 *
259 * @param pVM The cross context VM structure.
260 * @param offDelta Relocation delta relative to old location.
261 */
262VMMR3_INT_DECL(void) DBGFR3Relocate(PVM pVM, RTGCINTPTR offDelta)
263{
264 dbgfR3TraceRelocate(pVM);
265 dbgfR3AsRelocate(pVM->pUVM, offDelta);
266}
267
268
269/**
270 * Waits a little while for a debuggger to attach.
271 *
272 * @returns True is a debugger have attached.
273 * @param pVM The cross context VM structure.
274 * @param pVCpu The cross context per CPU structure.
275 * @param enmEvent Event.
276 *
277 * @thread EMT(pVCpu)
278 */
279bool dbgfR3WaitForAttach(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
280{
281 /*
282 * First a message.
283 */
284#if !defined(DEBUG)
285 int cWait = 10;
286#else
287 int cWait = RTEnvExist("VBOX_DBGF_NO_WAIT_FOR_ATTACH")
288 || ( !VM_IS_RAW_MODE_ENABLED(pVM)
289 && ( enmEvent == DBGFEVENT_ASSERTION_HYPER
290 || enmEvent == DBGFEVENT_FATAL_ERROR)
291 && !RTEnvExist("VBOX_DBGF_WAIT_FOR_ATTACH"))
292 ? 10
293 : 150;
294#endif
295 RTStrmPrintf(g_pStdErr,
296 "DBGF: No debugger attached, waiting %d second%s for one to attach (event=%d)\n"
297#ifdef DEBUG
298 " Set VBOX_DBGF_NO_WAIT_FOR_ATTACH=1 for short wait or VBOX_DBGF_WAIT_FOR_ATTACH=1 longer.\n"
299#endif
300 ,
301 cWait / 10, cWait != 10 ? "s" : "", enmEvent);
302 RTStrmFlush(g_pStdErr);
303 while (cWait > 0)
304 {
305 RTThreadSleep(100);
306 if (pVM->dbgf.s.fAttached)
307 {
308 RTStrmPrintf(g_pStdErr, "Attached!\n");
309 RTStrmFlush(g_pStdErr);
310 return true;
311 }
312
313 /* Process rendezvous (debugger attaching involves such). */
314 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
315 {
316 int rc = VMMR3EmtRendezvousFF(pVM, pVCpu); AssertRC(rc);
317 if (rc != VINF_SUCCESS)
318 {
319 /** @todo Ignoring these could be bad. */
320 RTStrmPrintf(g_pStdErr, "[rcRendezvous=%Rrc, ignored!]", rc);
321 RTStrmFlush(g_pStdErr);
322 }
323 }
324
325 /* Process priority stuff. */
326 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
327 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
328 {
329 int rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, true /*fPriorityOnly*/);
330 if (rc == VINF_SUCCESS)
331 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, true /*fPriorityOnly*/);
332 if (rc != VINF_SUCCESS)
333 {
334 /** @todo Ignoring these could be bad. */
335 RTStrmPrintf(g_pStdErr, "[rcReq=%Rrc, ignored!]", rc);
336 RTStrmFlush(g_pStdErr);
337 }
338 }
339
340 /* next */
341 if (!(cWait % 10))
342 {
343 RTStrmPrintf(g_pStdErr, "%d.", cWait / 10);
344 RTStrmFlush(g_pStdErr);
345 }
346 cWait--;
347 }
348
349 RTStrmPrintf(g_pStdErr, "Stopping the VM!\n");
350 RTStrmFlush(g_pStdErr);
351 return false;
352}
353
354
355/**
356 * Forced action callback.
357 *
358 * The VMM will call this from it's main loop when either VM_FF_DBGF or
359 * VMCPU_FF_DBGF are set.
360 *
361 * The function checks for and executes pending commands from the debugger.
362 * Then it checks for pending debug events and serves these.
363 *
364 * @returns VINF_SUCCESS normally.
365 * @returns VERR_DBGF_RAISE_FATAL_ERROR to pretend a fatal error happened.
366 * @param pVM The cross context VM structure.
367 * @param pVCpu The cross context per CPU structure.
368 */
369VMMR3_INT_DECL(int) DBGFR3VMMForcedAction(PVM pVM, PVMCPU pVCpu)
370{
371 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
372
373 /*
374 * Dispatch pending events.
375 */
376 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_DBGF))
377 {
378 if ( pVCpu->dbgf.s.cEvents > 0
379 && pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT)
380 {
381 rcStrict = DBGFR3EventHandlePending(pVM, pVCpu);
382 /** @todo may end up with VERR_DBGF_NOT_ATTACHED here, which will prove fatal... */
383 }
384
385 /*
386 * Command pending? Process it.
387 */
388 PUVMCPU pUVCpu = pVCpu->pUVCpu;
389 if (pUVCpu->dbgf.s.enmDbgfCmd != DBGFCMD_NO_COMMAND)
390 {
391 bool fResumeExecution;
392 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
393 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
394 VBOXSTRICTRC rcStrict2 = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
395 if (!fResumeExecution)
396 rcStrict2 = dbgfR3CpuWait(pVCpu);
397 if ( rcStrict2 != VINF_SUCCESS
398 && ( rcStrict == VINF_SUCCESS
399 || RT_FAILURE(rcStrict2)
400 || rcStrict2 < rcStrict) ) /** @todo oversimplified? */
401 rcStrict = rcStrict2;
402 }
403 }
404
405 return VBOXSTRICTRC_TODO(rcStrict);
406}
407
408
409/**
410 * Try to determine the event context.
411 *
412 * @returns debug event context.
413 * @param pVCpu The cross context vCPU structure.
414 */
415static DBGFEVENTCTX dbgfR3FigureEventCtx(PVMCPU pVCpu)
416{
417 switch (EMGetState(pVCpu))
418 {
419 case EMSTATE_HM:
420 case EMSTATE_NEM:
421 case EMSTATE_DEBUG_GUEST_HM:
422 case EMSTATE_DEBUG_GUEST_NEM:
423 return DBGFEVENTCTX_HM;
424
425 case EMSTATE_IEM:
426 case EMSTATE_RAW:
427 case EMSTATE_IEM_THEN_REM:
428 case EMSTATE_DEBUG_GUEST_IEM:
429 case EMSTATE_DEBUG_GUEST_RAW:
430 return DBGFEVENTCTX_RAW;
431
432
433 case EMSTATE_REM:
434 case EMSTATE_DEBUG_GUEST_REM:
435 return DBGFEVENTCTX_REM;
436
437 case EMSTATE_DEBUG_HYPER:
438 case EMSTATE_GURU_MEDITATION:
439 return DBGFEVENTCTX_HYPER;
440
441 default:
442 return DBGFEVENTCTX_OTHER;
443 }
444}
445
446
447/**
448 * Sends the event to the debugger (i.e. adds it to the event ring buffer).
449 *
450 * @returns VBox status code.
451 * @param pVM The cross context VM structure.
452 * @param pVCpu The CPU sending the event.
453 * @param enmType The event type to send.
454 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
455 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
456 * @param cbPayload The size of the event payload, optional.
457 */
458static int dbgfR3SendEventWorker(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
459 void const *pvPayload, size_t cbPayload)
460{
461 PUVM pUVM = pVM->pUVM;
462 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID; /** @todo per vCPU stepping filter. */
463
464 /*
465 * Massage the input a little.
466 */
467 AssertStmt(cbPayload <= RT_SIZEOFMEMB(DBGFEVENT, u), cbPayload = RT_SIZEOFMEMB(DBGFEVENT, u));
468 if (enmCtx == DBGFEVENTCTX_INVALID)
469 enmCtx = dbgfR3FigureEventCtx(pVCpu);
470
471 /*
472 * Put the event into the ring buffer.
473 */
474 RTSemFastMutexRequest(pUVM->dbgf.s.hMtxDbgEvtWr);
475
476 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
477 uint32_t const idxDbgEvtWrite = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite);
478 uint32_t const idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
479 /** @todo Handle full buffer. */ RT_NOREF(idxDbgEvtRead);
480
481 PDBGFEVENT pEvent = &pUVM->dbgf.s.paDbgEvts[idxDbgEvtWrite % cDbgEvtMax];
482
483#ifdef DEBUG
484 ASMMemFill32(pEvent, sizeof(*pEvent), UINT32_C(0xdeadbeef));
485#endif
486 pEvent->enmType = enmType;
487 pEvent->enmCtx = enmCtx;
488 pEvent->idCpu = pVCpu->idCpu;
489 pEvent->uReserved = 0;
490 if (cbPayload)
491 memcpy(&pEvent->u, pvPayload, cbPayload);
492
493 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtWrite, (idxDbgEvtWrite + 1) % cDbgEvtMax);
494
495 RTSemFastMutexRelease(pUVM->dbgf.s.hMtxDbgEvtWr);
496
497 /*
498 * Signal the debugger.
499 */
500 return RTSemEventSignal(pUVM->dbgf.s.hEvtWait);
501}
502
503
504/**
505 * Send event and wait for the debugger to respond.
506 *
507 * @returns Strict VBox status code.
508 * @param pVM The cross context VM structure.
509 * @param pVCpu The CPU sending the event.
510 * @param enmType The event type to send.
511 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
512 */
513DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
514{
515 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
516 if (RT_SUCCESS(rc))
517 rc = dbgfR3CpuWait(pVCpu);
518 return rc;
519}
520
521
522/**
523 * Send event and wait for the debugger to respond, extended version.
524 *
525 * @returns Strict VBox status code.
526 * @param pVM The cross context VM structure.
527 * @param pVCpu The CPU sending the event.
528 * @param enmType The event type to send.
529 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
530 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
531 * @param cbPayload The size of the event payload, optional.
532 */
533DECLINLINE(int) dbgfR3SendEventWaitEx(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
534 void const *pvPayload, size_t cbPayload)
535{
536 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, pvPayload, cbPayload);
537 if (RT_SUCCESS(rc))
538 rc = dbgfR3CpuWait(pVCpu);
539 return rc;
540}
541
542
543/**
544 * Send event but do NOT wait for the debugger.
545 *
546 * Currently only used by dbgfR3CpuCmd().
547 *
548 * @param pVM The cross context VM structure.
549 * @param pVCpu The CPU sending the event.
550 * @param enmType The event type to send.
551 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
552 */
553DECLINLINE(int) dbgfR3SendEventNoWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
554{
555 return dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
556}
557
558
559/**
560 * The common event prologue code.
561 *
562 * It will make sure someone is attached, and perhaps process any high priority
563 * pending actions (none yet).
564 *
565 * @returns VBox status code.
566 * @param pVM The cross context VM structure.
567 * @param pVCpu The vCPU cross context structure.
568 * @param enmEvent The event to be sent.
569 */
570static int dbgfR3EventPrologue(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
571{
572 /*
573 * Check if a debugger is attached.
574 */
575 if ( !pVM->dbgf.s.fAttached
576 && !dbgfR3WaitForAttach(pVM, pVCpu, enmEvent))
577 {
578 Log(("dbgfR3EventPrologue: enmEvent=%d - debugger not attached\n", enmEvent));
579 return VERR_DBGF_NOT_ATTACHED;
580 }
581
582 /*
583 * Look thru pending commands and finish those which make sense now.
584 */
585 /** @todo Process/purge pending commands. */
586 //int rc = DBGFR3VMMForcedAction(pVM);
587 return VINF_SUCCESS;
588}
589
590
591/**
592 * Processes a pending event on the current CPU.
593 *
594 * This is called by EM in response to VINF_EM_DBG_EVENT.
595 *
596 * @returns Strict VBox status code.
597 * @param pVM The cross context VM structure.
598 * @param pVCpu The cross context per CPU structure.
599 *
600 * @thread EMT(pVCpu)
601 */
602VMMR3_INT_DECL(VBOXSTRICTRC) DBGFR3EventHandlePending(PVM pVM, PVMCPU pVCpu)
603{
604 VMCPU_ASSERT_EMT(pVCpu);
605 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
606
607 /*
608 * Check that we've got an event first.
609 */
610 AssertReturn(pVCpu->dbgf.s.cEvents > 0, VINF_SUCCESS);
611 AssertReturn(pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT, VINF_SUCCESS);
612 PDBGFEVENT pEvent = &pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].Event;
613
614 /*
615 * Make sure we've got a debugger and is allowed to speak to it.
616 */
617 int rc = dbgfR3EventPrologue(pVM, pVCpu, pEvent->enmType);
618 if (RT_FAILURE(rc))
619 {
620 /** @todo drop them events? */
621 return rc; /** @todo this will cause trouble if we're here via an FF! */
622 }
623
624 /*
625 * Send the event and mark it as ignore.
626 * ASSUMES no new events get generate while dbgfR3CpuWait is executing!
627 */
628 VBOXSTRICTRC rcStrict = dbgfR3SendEventWaitEx(pVM, pVCpu, pEvent->enmType, pEvent->enmCtx, &pEvent->u, sizeof(pEvent->u));
629 pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState = DBGFEVENTSTATE_IGNORE;
630 return rcStrict;
631}
632
633
634/**
635 * Send a generic debugger event which takes no data.
636 *
637 * @returns VBox status code.
638 * @param pVM The cross context VM structure.
639 * @param enmEvent The event to send.
640 * @internal
641 */
642VMMR3DECL(int) DBGFR3Event(PVM pVM, DBGFEVENTTYPE enmEvent)
643{
644 PVMCPU pVCpu = VMMGetCpu(pVM);
645 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
646
647 /*
648 * Do stepping filtering.
649 */
650 /** @todo Would be better if we did some of this inside the execution
651 * engines. */
652 if ( enmEvent == DBGFEVENT_STEPPED
653 || enmEvent == DBGFEVENT_STEPPED_HYPER)
654 {
655 if (!dbgfStepAreWeThereYet(pVM, pVCpu))
656 return VINF_EM_DBG_STEP;
657 }
658
659 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
660 if (RT_FAILURE(rc))
661 return rc;
662
663 /*
664 * Send the event and process the reply communication.
665 */
666 return dbgfR3SendEventWait(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID);
667}
668
669
670/**
671 * Send a debugger event which takes the full source file location.
672 *
673 * @returns VBox status code.
674 * @param pVM The cross context VM structure.
675 * @param enmEvent The event to send.
676 * @param pszFile Source file.
677 * @param uLine Line number in source file.
678 * @param pszFunction Function name.
679 * @param pszFormat Message which accompanies the event.
680 * @param ... Message arguments.
681 * @internal
682 */
683VMMR3DECL(int) DBGFR3EventSrc(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, ...)
684{
685 va_list args;
686 va_start(args, pszFormat);
687 int rc = DBGFR3EventSrcV(pVM, enmEvent, pszFile, uLine, pszFunction, pszFormat, args);
688 va_end(args);
689 return rc;
690}
691
692
693/**
694 * Send a debugger event which takes the full source file location.
695 *
696 * @returns VBox status code.
697 * @param pVM The cross context VM structure.
698 * @param enmEvent The event to send.
699 * @param pszFile Source file.
700 * @param uLine Line number in source file.
701 * @param pszFunction Function name.
702 * @param pszFormat Message which accompanies the event.
703 * @param args Message arguments.
704 * @internal
705 */
706VMMR3DECL(int) DBGFR3EventSrcV(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, va_list args)
707{
708 PVMCPU pVCpu = VMMGetCpu(pVM);
709 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
710
711 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
712 if (RT_FAILURE(rc))
713 return rc;
714
715 /*
716 * Format the message.
717 */
718 char *pszMessage = NULL;
719 char szMessage[8192];
720 if (pszFormat && *pszFormat)
721 {
722 pszMessage = &szMessage[0];
723 RTStrPrintfV(szMessage, sizeof(szMessage), pszFormat, args);
724 }
725
726 /*
727 * Send the event and process the reply communication.
728 */
729 DBGFEVENT DbgEvent; /** @todo split up DBGFEVENT so we can skip the dead wait on the stack? */
730 DbgEvent.u.Src.pszFile = pszFile;
731 DbgEvent.u.Src.uLine = uLine;
732 DbgEvent.u.Src.pszFunction = pszFunction;
733 DbgEvent.u.Src.pszMessage = pszMessage;
734 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Src));
735}
736
737
738/**
739 * Send a debugger event which takes the two assertion messages.
740 *
741 * @returns VBox status code.
742 * @param pVM The cross context VM structure.
743 * @param enmEvent The event to send.
744 * @param pszMsg1 First assertion message.
745 * @param pszMsg2 Second assertion message.
746 */
747VMMR3_INT_DECL(int) DBGFR3EventAssertion(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszMsg1, const char *pszMsg2)
748{
749 PVMCPU pVCpu = VMMGetCpu(pVM);
750 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
751
752 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
753 if (RT_FAILURE(rc))
754 return rc;
755
756 /*
757 * Send the event and process the reply communication.
758 */
759 DBGFEVENT DbgEvent;
760 DbgEvent.u.Assert.pszMsg1 = pszMsg1;
761 DbgEvent.u.Assert.pszMsg2 = pszMsg2;
762 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Assert));
763}
764
765
766/**
767 * Breakpoint was hit somewhere.
768 * Figure out which breakpoint it is and notify the debugger.
769 *
770 * @returns VBox status code.
771 * @param pVM The cross context VM structure.
772 * @param enmEvent DBGFEVENT_BREAKPOINT_HYPER or DBGFEVENT_BREAKPOINT.
773 */
774VMMR3_INT_DECL(int) DBGFR3EventBreakpoint(PVM pVM, DBGFEVENTTYPE enmEvent)
775{
776 PVMCPU pVCpu = VMMGetCpu(pVM);
777 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
778
779 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
780 if (RT_FAILURE(rc))
781 return rc;
782
783 /*
784 * Halt all other vCPUs as well to give the user the ability to inspect other
785 * vCPU states as well.
786 */
787 rc = dbgfR3EventHaltAllVCpus(pVM, pVCpu);
788 if (RT_FAILURE(rc))
789 return rc;
790
791 /*
792 * Send the event and process the reply communication.
793 */
794 DBGFEVENT DbgEvent;
795 DbgEvent.u.Bp.hBp = pVCpu->dbgf.s.hBpActive;
796 pVCpu->dbgf.s.hBpActive = NIL_DBGFBP;
797 if (DbgEvent.u.Bp.hBp != NIL_DBGFBP)
798 {
799 DbgEvent.enmCtx = DBGFEVENTCTX_RAW;
800 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_RAW, &DbgEvent.u, sizeof(DbgEvent.u.Bp));
801 }
802
803 return VERR_DBGF_IPE_1;
804}
805
806
807/**
808 * Returns whether the given vCPU is waiting for the debugger.
809 *
810 * @returns Flags whether the vCPU is currently waiting for the debugger.
811 * @param pUVCpu The user mode vCPU structure.
812 */
813DECLINLINE(bool) dbgfR3CpuIsHalted(PUVMCPU pUVCpu)
814{
815 return ASMAtomicReadBool(&pUVCpu->dbgf.s.fStopped);
816}
817
818
819/**
820 * Checks whether the given vCPU is waiting in the debugger.
821 *
822 * @returns Flag whether the indicated vCPU is halted, when VMCPUID_ALL
823 * is given true is returned when at least one vCPU is halted.
824 * @param pUVM The user mode VM structure.
825 * @param idCpu The CPU ID to check, VMCPUID_ALL to check all vCPUs.
826 */
827DECLINLINE(bool) dbgfR3CpuAreAnyHaltedByCpuId(PUVM pUVM, VMCPUID idCpu)
828{
829 AssertReturn(idCpu < pUVM->cCpus || idCpu == VMCPUID_ALL, false);
830
831 /* Check that either the given vCPU or all are actually halted. */
832 if (idCpu != VMCPUID_ALL)
833 return dbgfR3CpuIsHalted(&pUVM->aCpus[idCpu]);
834
835 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
836 if (dbgfR3CpuIsHalted(&pUVM->aCpus[i]))
837 return true;
838 return false;
839}
840
841
842/**
843 * Gets the pending debug command for this EMT/CPU, replacing it with
844 * DBGFCMD_NO_COMMAND.
845 *
846 * @returns Pending command.
847 * @param pUVCpu The user mode virtual CPU structure.
848 * @thread EMT(pUVCpu)
849 */
850DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu)
851{
852 DBGFCMD enmCmd = (DBGFCMD)ASMAtomicXchgU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, DBGFCMD_NO_COMMAND);
853 Log2(("DBGF: Getting command: %d\n", enmCmd));
854 return enmCmd;
855}
856
857
858/**
859 * Send a debug command to a CPU, making sure to notify it.
860 *
861 * @returns VBox status code.
862 * @param pUVCpu The user mode virtual CPU structure.
863 * @param enmCmd The command to submit to the CPU.
864 */
865DECLINLINE(int) dbgfR3CpuSetCmdAndNotify(PUVMCPU pUVCpu, DBGFCMD enmCmd)
866{
867 Log2(("DBGF: Setting command to %d\n", enmCmd));
868 Assert(enmCmd != DBGFCMD_NO_COMMAND);
869 AssertMsg(pUVCpu->dbgf.s.enmDbgfCmd == DBGFCMD_NO_COMMAND, ("enmCmd=%d enmDbgfCmd=%d\n", enmCmd, pUVCpu->dbgf.s.enmDbgfCmd));
870
871 ASMAtomicWriteU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, enmCmd);
872 VMCPU_FF_SET(pUVCpu->pVCpu, VMCPU_FF_DBGF);
873
874 VMR3NotifyCpuFFU(pUVCpu, 0 /*fFlags*/);
875 return VINF_SUCCESS;
876}
877
878
879/**
880 * @callback_method_impl{FNVMMEMTRENDEZVOUS}
881 */
882static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventHaltEmtWorker(PVM pVM, PVMCPU pVCpu, void *pvUser)
883{
884 RT_NOREF(pvUser);
885
886 VMCPU_ASSERT_EMT(pVCpu);
887 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
888
889 PUVMCPU pUVCpu = pVCpu->pUVCpu;
890 if ( pVCpu != (PVMCPU)pvUser
891 && !dbgfR3CpuIsHalted(pUVCpu))
892 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
893
894 return VINF_SUCCESS;
895}
896
897
898/**
899 * Halts all vCPUs of the given VM except for the given one.
900 *
901 * @returns VBox status code.
902 * @param pVM The cross context VM structure.
903 * @param pVCpuExclude The vCPU cross context structure of the vCPU to exclude.
904 */
905static int dbgfR3EventHaltAllVCpus(PVM pVM, PVMCPU pVCpuExclude)
906{
907 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3EventHaltEmtWorker, pVCpuExclude);
908}
909
910
911/**
912 * Waits for the debugger to respond.
913 *
914 * @returns VBox status code. (clearify)
915 * @param pVCpu The cross context vCPU structure.
916 */
917static int dbgfR3CpuWait(PVMCPU pVCpu)
918{
919 PVM pVM = pVCpu->CTX_SUFF(pVM);
920 PUVMCPU pUVCpu = pVCpu->pUVCpu;
921
922 LogFlow(("dbgfR3CpuWait:\n"));
923 int rcRet = VINF_SUCCESS;
924
925 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, true);
926
927 /*
928 * Waits for the debugger to reply (i.e. issue an command).
929 */
930 for (;;)
931 {
932 /*
933 * Wait.
934 */
935 for (;;)
936 {
937 /*
938 * Process forced flags before we go sleep.
939 */
940 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_DBGF | VMCPU_FF_REQUEST)
941 || VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VMCPU_FF_REQUEST | VM_FF_CHECK_VM_STATE))
942 {
943 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF))
944 break;
945
946 int rc;
947 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
948 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
949 else if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
950 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
951 {
952 LogFlow(("dbgfR3CpuWait: Processes requests...\n"));
953 rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
954 if (rc == VINF_SUCCESS)
955 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
956 LogFlow(("dbgfR3CpuWait: VMR3ReqProcess -> %Rrc rcRet=%Rrc\n", rc, rcRet));
957 }
958 else if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
959 {
960 VMSTATE enmState = VMR3GetState(pVM);
961 switch (enmState)
962 {
963 case VMSTATE_FATAL_ERROR:
964 case VMSTATE_FATAL_ERROR_LS:
965 case VMSTATE_GURU_MEDITATION:
966 case VMSTATE_GURU_MEDITATION_LS:
967 rc = VINF_EM_SUSPEND;
968 break;
969 case VMSTATE_DESTROYING:
970 rc = VINF_EM_TERMINATE;
971 break;
972 default:
973 rc = VERR_DBGF_IPE_1;
974 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
975 }
976 }
977 else
978 rc = VINF_SUCCESS;
979 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
980 {
981 switch (rc)
982 {
983 case VINF_EM_DBG_BREAKPOINT:
984 case VINF_EM_DBG_STEPPED:
985 case VINF_EM_DBG_STEP:
986 case VINF_EM_DBG_STOP:
987 case VINF_EM_DBG_EVENT:
988 AssertMsgFailed(("rc=%Rrc\n", rc));
989 break;
990
991 /* return straight away */
992 case VINF_EM_TERMINATE:
993 case VINF_EM_OFF:
994 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
995 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
996 return rc;
997
998 /* remember return code. */
999 default:
1000 AssertReleaseMsgFailed(("rc=%Rrc is not in the switch!\n", rc));
1001 RT_FALL_THRU();
1002 case VINF_EM_RESET:
1003 case VINF_EM_SUSPEND:
1004 case VINF_EM_HALT:
1005 case VINF_EM_RESUME:
1006 case VINF_EM_RESCHEDULE:
1007 case VINF_EM_RESCHEDULE_REM:
1008 case VINF_EM_RESCHEDULE_RAW:
1009 if (rc < rcRet || rcRet == VINF_SUCCESS)
1010 rcRet = rc;
1011 break;
1012 }
1013 }
1014 else if (RT_FAILURE(rc))
1015 {
1016 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
1017 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1018 return rc;
1019 }
1020 }
1021 else if (pVM->dbgf.s.fAttached)
1022 {
1023 int rc = VMR3WaitU(pUVCpu);
1024 if (RT_FAILURE(rc))
1025 {
1026 LogFlow(("dbgfR3CpuWait: returns %Rrc (VMR3WaitU)\n", rc));
1027 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1028 return rc;
1029 }
1030 }
1031 else
1032 {
1033 LogFlow(("dbgfR3CpuWait: Debugger detached, continuing normal execution (%Rrc)\n", rcRet));
1034 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1035 return rcRet;
1036 }
1037 }
1038
1039 /*
1040 * Process the command.
1041 */
1042 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
1043 bool fResumeExecution;
1044 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
1045 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
1046 int rc = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
1047 if (fResumeExecution)
1048 {
1049 if (RT_FAILURE(rc))
1050 rcRet = rc;
1051 else if ( rc >= VINF_EM_FIRST
1052 && rc <= VINF_EM_LAST
1053 && (rc < rcRet || rcRet == VINF_SUCCESS))
1054 rcRet = rc;
1055 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rcRet));
1056 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1057 return rcRet;
1058 }
1059 }
1060}
1061
1062
1063/**
1064 * Executes command from debugger.
1065 *
1066 * The caller is responsible for waiting or resuming execution based on the
1067 * value returned in the *pfResumeExecution indicator.
1068 *
1069 * @returns VBox status code. (clearify!)
1070 * @param pVCpu The cross context vCPU structure.
1071 * @param enmCmd The command in question.
1072 * @param pCmdData Pointer to the command data.
1073 * @param pfResumeExecution Where to store the resume execution / continue waiting indicator.
1074 */
1075static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution)
1076{
1077 RT_NOREF(pCmdData); /* for later */
1078
1079 /*
1080 * The cases in this switch returns directly if no event to send.
1081 */
1082 DBGFEVENTTYPE enmEvent;
1083 DBGFEVENTCTX enmCtx = DBGFEVENTCTX_INVALID;
1084 switch (enmCmd)
1085 {
1086 /*
1087 * Halt is answered by an event say that we've halted.
1088 */
1089 case DBGFCMD_HALT:
1090 {
1091 *pfResumeExecution = false;
1092 enmEvent = DBGFEVENT_HALT_DONE;
1093 break;
1094 }
1095
1096
1097 /*
1098 * Resume is not answered, we just resume execution.
1099 */
1100 case DBGFCMD_GO:
1101 {
1102 pVCpu->dbgf.s.fSingleSteppingRaw = false;
1103 *pfResumeExecution = true;
1104 return VINF_SUCCESS;
1105 }
1106
1107 /** @todo implement (and define) the rest of the commands. */
1108
1109 /*
1110 * Single step, with trace into.
1111 */
1112 case DBGFCMD_SINGLE_STEP:
1113 {
1114 Log2(("Single step\n"));
1115 PVM pVM = pVCpu->CTX_SUFF(pVM);
1116 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1117 {
1118 if (dbgfStepGetCurInstrType(pVM, pVCpu) == DBGFSTEPINSTRTYPE_CALL)
1119 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1120 }
1121 if (pVM->dbgf.s.SteppingFilter.cMaxSteps > 0)
1122 {
1123 pVCpu->dbgf.s.fSingleSteppingRaw = true;
1124 *pfResumeExecution = true;
1125 return VINF_EM_DBG_STEP;
1126 }
1127 /* Stop after zero steps. Nonsense, but whatever. */
1128 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1129 *pfResumeExecution = false;
1130 enmCtx = dbgfR3FigureEventCtx(pVCpu);
1131 enmEvent = enmCtx != DBGFEVENTCTX_HYPER ? DBGFEVENT_STEPPED : DBGFEVENT_STEPPED_HYPER;
1132 break;
1133 }
1134
1135 /*
1136 * Default is to send an invalid command event.
1137 */
1138 default:
1139 {
1140 *pfResumeExecution = false;
1141 enmEvent = DBGFEVENT_INVALID_COMMAND;
1142 break;
1143 }
1144 }
1145
1146 /*
1147 * Send the pending event.
1148 */
1149 Log2(("DBGF: Emulation thread: sending event %d\n", enmEvent));
1150 int rc = dbgfR3SendEventNoWait(pVCpu->CTX_SUFF(pVM), pVCpu, enmEvent, enmCtx);
1151 AssertRCStmt(rc, *pfResumeExecution = true);
1152 return rc;
1153}
1154
1155
1156/**
1157 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1158 * EMT rendezvous worker for DBGFR3Attach - only called on one EMT.}
1159 */
1160static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Attach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1161{
1162 PUVM pUVM = pVM->pUVM;
1163 int *prcAttach = (int *)pvUser;
1164 RT_NOREF(pVCpu);
1165
1166 if (pVM->dbgf.s.fAttached)
1167 {
1168 Log(("dbgfR3Attach: Debugger already attached\n"));
1169 *prcAttach = VERR_DBGF_ALREADY_ATTACHED;
1170 return VINF_SUCCESS;
1171 }
1172
1173 /*
1174 * The per-CPU bits.
1175 */
1176 for (uint32_t i = 0; i < pUVM->cCpus; i++)
1177 {
1178 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1179
1180 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1181 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1182 }
1183
1184 /*
1185 * Init of the VM -> Debugger communication part living in the global VM structure.
1186 */
1187 pUVM->dbgf.s.cDbgEvtMax = pVM->cCpus * 5 + 10; /* Initial size of event ring, increased when being full. */
1188 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1189 pUVM->dbgf.s.idxDbgEvtRead = 0;
1190 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1191 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1192 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1193 int rc;
1194 pUVM->dbgf.s.paDbgEvts = (PDBGFEVENT)MMR3HeapAllocU(pUVM, MM_TAG_DBGF, pUVM->dbgf.s.cDbgEvtMax * sizeof(DBGFEVENT));
1195 if (pUVM->dbgf.s.paDbgEvts)
1196 {
1197 rc = RTSemEventCreate(&pUVM->dbgf.s.hEvtWait);
1198 if (RT_SUCCESS(rc))
1199 {
1200 rc = RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxDbgEvtWr);
1201 if (RT_SUCCESS(rc))
1202 {
1203 rc = RTSemEventMultiCreate(&pUVM->dbgf.s.hEvtRingBufFull);
1204 if (RT_SUCCESS(rc))
1205 {
1206 /*
1207 * At last, set the attached flag.
1208 */
1209 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, true);
1210 *prcAttach = VINF_SUCCESS;
1211 return VINF_SUCCESS;
1212 }
1213
1214 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1215 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1216 }
1217 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1218 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1219 }
1220 }
1221 else
1222 rc = VERR_NO_MEMORY;
1223
1224 *prcAttach = rc;
1225 return VINF_SUCCESS;
1226}
1227
1228
1229/**
1230 * Attaches a debugger to the specified VM.
1231 *
1232 * Only one debugger at a time.
1233 *
1234 * @returns VBox status code.
1235 * @param pUVM The user mode VM handle.
1236 */
1237VMMR3DECL(int) DBGFR3Attach(PUVM pUVM)
1238{
1239 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1240 PVM pVM = pUVM->pVM;
1241 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1242
1243 /*
1244 * Call the VM, use EMT rendezvous for serialization.
1245 */
1246 int rcAttach = VERR_IPE_UNINITIALIZED_STATUS;
1247 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Attach, &rcAttach);
1248 if (RT_SUCCESS(rc))
1249 rc = rcAttach;
1250
1251 return rc;
1252}
1253
1254
1255/**
1256 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1257 * EMT rendezvous worker for DBGFR3Detach - called on all EMTs (why?).}
1258 */
1259static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Detach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1260{
1261 if (pVCpu->idCpu == 0)
1262 {
1263 PUVM pUVM = (PUVM)pvUser;
1264
1265 /*
1266 * Per-CPU cleanup.
1267 */
1268 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1269 {
1270 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1271
1272 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1273 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1274 }
1275
1276 /*
1277 * De-init of the VM -> Debugger communication part living in the global VM structure.
1278 */
1279 if (pUVM->dbgf.s.paDbgEvts)
1280 {
1281 MMR3HeapFree(pUVM->dbgf.s.paDbgEvts);
1282 pUVM->dbgf.s.paDbgEvts = NULL;
1283 }
1284
1285 if (pUVM->dbgf.s.hEvtWait != NIL_RTSEMEVENT)
1286 {
1287 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1288 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1289 }
1290
1291 if (pUVM->dbgf.s.hMtxDbgEvtWr != NIL_RTSEMFASTMUTEX)
1292 {
1293 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1294 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1295 }
1296
1297 if (pUVM->dbgf.s.hEvtRingBufFull != NIL_RTSEMEVENTMULTI)
1298 {
1299 RTSemEventMultiDestroy(pUVM->dbgf.s.hEvtRingBufFull);
1300 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1301 }
1302
1303 pUVM->dbgf.s.cDbgEvtMax = 0;
1304 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1305 pUVM->dbgf.s.idxDbgEvtRead = 0;
1306 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1307 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1308 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1309
1310 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, false);
1311 }
1312
1313 return VINF_SUCCESS;
1314}
1315
1316
1317/**
1318 * Detaches a debugger from the specified VM.
1319 *
1320 * Caller must be attached to the VM.
1321 *
1322 * @returns VBox status code.
1323 * @param pUVM The user mode VM handle.
1324 */
1325VMMR3DECL(int) DBGFR3Detach(PUVM pUVM)
1326{
1327 LogFlow(("DBGFR3Detach:\n"));
1328
1329 /*
1330 * Validate input. The UVM handle shall be valid, the VM handle might be
1331 * in the processes of being destroyed already, so deal quietly with that.
1332 */
1333 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1334 PVM pVM = pUVM->pVM;
1335 if (!VM_IS_VALID_EXT(pVM))
1336 return VERR_INVALID_VM_HANDLE;
1337
1338 /*
1339 * Check if attached.
1340 */
1341 if (!pVM->dbgf.s.fAttached)
1342 return VERR_DBGF_NOT_ATTACHED;
1343
1344 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Detach, pUVM);
1345}
1346
1347
1348/**
1349 * Wait for a debug event.
1350 *
1351 * @returns VBox status code. Will not return VBOX_INTERRUPTED.
1352 * @param pUVM The user mode VM handle.
1353 * @param cMillies Number of millis to wait.
1354 * @param pEvent Where to store the event data.
1355 */
1356VMMR3DECL(int) DBGFR3EventWait(PUVM pUVM, RTMSINTERVAL cMillies, PDBGFEVENT pEvent)
1357{
1358 /*
1359 * Check state.
1360 */
1361 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1362 PVM pVM = pUVM->pVM;
1363 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1364 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1365
1366 RT_BZERO(pEvent, sizeof(*pEvent));
1367
1368 /*
1369 * Wait for an event to arrive if there are none.
1370 */
1371 int rc = VINF_SUCCESS;
1372 uint32_t idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
1373 if (idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite))
1374 {
1375 do
1376 {
1377 rc = RTSemEventWait(pUVM->dbgf.s.hEvtWait, cMillies);
1378 } while ( RT_SUCCESS(rc)
1379 && idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1380 }
1381
1382 if (RT_SUCCESS(rc))
1383 {
1384 Assert(idxDbgEvtRead != ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1385
1386 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
1387 memcpy(pEvent, &pUVM->dbgf.s.paDbgEvts[idxDbgEvtRead % cDbgEvtMax], sizeof(*pEvent));
1388 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtRead, (idxDbgEvtRead + 1) % cDbgEvtMax);
1389 }
1390
1391 Log2(("DBGFR3EventWait: rc=%Rrc (event type %d)\n", rc, pEvent->enmType));
1392 return rc;
1393}
1394
1395
1396/**
1397 * Halts VM execution.
1398 *
1399 * After calling this the VM isn't actually halted till an DBGFEVENT_HALT_DONE
1400 * arrives. Until that time it's not possible to issue any new commands.
1401 *
1402 * @returns VBox status code.
1403 * @retval VWRN_DBGF_ALREADY_HALTED if @a idCpu is VMCPUID_ALL and all vCPUs
1404 * are halted.
1405 * @param pUVM The user mode VM handle.
1406 * @param idCpu The vCPU to halt, VMCPUID_ALL halts all still running vCPUs.
1407 */
1408VMMR3DECL(int) DBGFR3Halt(PUVM pUVM, VMCPUID idCpu)
1409{
1410 /*
1411 * Check state.
1412 */
1413 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1414 PVM pVM = pUVM->pVM;
1415 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1416 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1417 AssertReturn(idCpu == VMCPUID_ALL || idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1418
1419 /*
1420 * Halt the requested CPUs as needed.
1421 */
1422 int rc;
1423 if (idCpu != VMCPUID_ALL)
1424 {
1425 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1426 if (!dbgfR3CpuIsHalted(pUVCpu))
1427 {
1428 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1429 rc = VINF_SUCCESS;
1430 }
1431 else
1432 rc = VWRN_DBGF_ALREADY_HALTED;
1433 }
1434 else
1435 {
1436 rc = VWRN_DBGF_ALREADY_HALTED;
1437 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1438 {
1439 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1440 if (!dbgfR3CpuIsHalted(pUVCpu))
1441 {
1442 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1443 rc = VINF_SUCCESS;
1444 }
1445 }
1446 }
1447
1448 return rc;
1449}
1450
1451
1452/**
1453 * Checks if any of the specified vCPUs have been halted by the debugger.
1454 *
1455 * @returns True if at least one halted vCPUs.
1456 * @returns False if no halted vCPUs.
1457 * @param pUVM The user mode VM handle.
1458 * @param idCpu The CPU id to check for, VMCPUID_ALL will return true if
1459 * at least a single vCPU is halted in the debugger.
1460 */
1461VMMR3DECL(bool) DBGFR3IsHalted(PUVM pUVM, VMCPUID idCpu)
1462{
1463 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1464 PVM pVM = pUVM->pVM;
1465 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1466 AssertReturn(pVM->dbgf.s.fAttached, false);
1467
1468 return dbgfR3CpuAreAnyHaltedByCpuId(pUVM, idCpu);
1469}
1470
1471
1472/**
1473 * Checks if the debugger can wait for events or not.
1474 *
1475 * This function is only used by lazy, multiplexing debuggers. :-)
1476 *
1477 * @returns VBox status code.
1478 * @retval VINF_SUCCESS if waitable.
1479 * @retval VERR_SEM_OUT_OF_TURN if not waitable.
1480 * @retval VERR_INVALID_VM_HANDLE if the VM is being (/ has been) destroyed
1481 * (not asserted) or if the handle is invalid (asserted).
1482 * @retval VERR_DBGF_NOT_ATTACHED if not attached.
1483 *
1484 * @param pUVM The user mode VM handle.
1485 */
1486VMMR3DECL(int) DBGFR3QueryWaitable(PUVM pUVM)
1487{
1488 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1489
1490 /* Note! There is a slight race here, unfortunately. */
1491 PVM pVM = pUVM->pVM;
1492 if (!RT_VALID_PTR(pVM))
1493 return VERR_INVALID_VM_HANDLE;
1494 if (pVM->enmVMState >= VMSTATE_DESTROYING)
1495 return VERR_INVALID_VM_HANDLE;
1496 if (!pVM->dbgf.s.fAttached)
1497 return VERR_DBGF_NOT_ATTACHED;
1498
1499 /** @todo was: if (!RTSemPongShouldWait(...)) return VERR_SEM_OUT_OF_TURN; */
1500 return VINF_SUCCESS;
1501}
1502
1503
1504/**
1505 * Resumes VM execution.
1506 *
1507 * There is no receipt event on this command.
1508 *
1509 * @returns VBox status code.
1510 * @retval VWRN_DBGF_ALREADY_RUNNING if the specified vCPUs are all running.
1511 * @param pUVM The user mode VM handle.
1512 * @param idCpu The vCPU to resume, VMCPUID_ALL resumes all still halted vCPUs.
1513 */
1514VMMR3DECL(int) DBGFR3Resume(PUVM pUVM, VMCPUID idCpu)
1515{
1516 /*
1517 * Validate input and attachment state.
1518 */
1519 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1520 PVM pVM = pUVM->pVM;
1521 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1522 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1523
1524 /*
1525 * Ping the halted emulation threads, telling them to run.
1526 */
1527 int rc = VWRN_DBGF_ALREADY_RUNNING;
1528 if (idCpu != VMCPUID_ALL)
1529 {
1530 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1531 if (dbgfR3CpuIsHalted(pUVCpu))
1532 {
1533 rc = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1534 AssertRC(rc);
1535 }
1536 }
1537 else
1538 {
1539 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1540 {
1541 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1542 if (dbgfR3CpuIsHalted(pUVCpu))
1543 {
1544 int rc2 = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1545 AssertRC(rc2);
1546 if (rc == VWRN_DBGF_ALREADY_RUNNING || RT_FAILURE(rc2))
1547 rc = rc2;
1548 }
1549 }
1550 }
1551
1552 return rc;
1553}
1554
1555
1556/**
1557 * Classifies the current instruction.
1558 *
1559 * @returns Type of instruction.
1560 * @param pVM The cross context VM structure.
1561 * @param pVCpu The current CPU.
1562 * @thread EMT(pVCpu)
1563 */
1564static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu)
1565{
1566 /*
1567 * Read the instruction.
1568 */
1569 size_t cbRead = 0;
1570 uint8_t abOpcode[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1571 int rc = PGMR3DbgReadGCPtr(pVM, abOpcode, CPUMGetGuestFlatPC(pVCpu), sizeof(abOpcode) - 1, 0 /*fFlags*/, &cbRead);
1572 if (RT_SUCCESS(rc))
1573 {
1574 /*
1575 * Do minimal parsing. No real need to involve the disassembler here.
1576 */
1577 uint8_t *pb = abOpcode;
1578 for (;;)
1579 {
1580 switch (*pb++)
1581 {
1582 default:
1583 return DBGFSTEPINSTRTYPE_OTHER;
1584
1585 case 0xe8: /* call rel16/32 */
1586 case 0x9a: /* call farptr */
1587 case 0xcc: /* int3 */
1588 case 0xcd: /* int xx */
1589 // case 0xce: /* into */
1590 return DBGFSTEPINSTRTYPE_CALL;
1591
1592 case 0xc2: /* ret xx */
1593 case 0xc3: /* ret */
1594 case 0xca: /* retf xx */
1595 case 0xcb: /* retf */
1596 case 0xcf: /* iret */
1597 return DBGFSTEPINSTRTYPE_RET;
1598
1599 case 0xff:
1600 if ( ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 2 /* call indir */
1601 || ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 3) /* call indir-farptr */
1602 return DBGFSTEPINSTRTYPE_CALL;
1603 return DBGFSTEPINSTRTYPE_OTHER;
1604
1605 case 0x0f:
1606 switch (*pb++)
1607 {
1608 case 0x05: /* syscall */
1609 case 0x34: /* sysenter */
1610 return DBGFSTEPINSTRTYPE_CALL;
1611 case 0x07: /* sysret */
1612 case 0x35: /* sysexit */
1613 return DBGFSTEPINSTRTYPE_RET;
1614 }
1615 break;
1616
1617 /* Must handle some REX prefixes. So we do all normal prefixes. */
1618 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
1619 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
1620 if (!CPUMIsGuestIn64BitCode(pVCpu))
1621 return DBGFSTEPINSTRTYPE_OTHER;
1622 break;
1623
1624 case 0x2e: /* CS */
1625 case 0x36: /* SS */
1626 case 0x3e: /* DS */
1627 case 0x26: /* ES */
1628 case 0x64: /* FS */
1629 case 0x65: /* GS */
1630 case 0x66: /* op size */
1631 case 0x67: /* addr size */
1632 case 0xf0: /* lock */
1633 case 0xf2: /* REPNZ */
1634 case 0xf3: /* REPZ */
1635 break;
1636 }
1637 }
1638 }
1639
1640 return DBGFSTEPINSTRTYPE_INVALID;
1641}
1642
1643
1644/**
1645 * Checks if the stepping has reached a stop point.
1646 *
1647 * Called when raising a stepped event.
1648 *
1649 * @returns true if the event should be raised, false if we should take one more
1650 * step first.
1651 * @param pVM The cross context VM structure.
1652 * @param pVCpu The cross context per CPU structure of the calling EMT.
1653 * @thread EMT(pVCpu)
1654 */
1655static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu)
1656{
1657 /*
1658 * Check valid pVCpu and that it matches the CPU one stepping.
1659 */
1660 if (pVCpu)
1661 {
1662 if (pVCpu->idCpu == pVM->dbgf.s.SteppingFilter.idCpu)
1663 {
1664 /*
1665 * Increase the number of steps and see if we've reached the max.
1666 */
1667 pVM->dbgf.s.SteppingFilter.cSteps++;
1668 if (pVM->dbgf.s.SteppingFilter.cSteps < pVM->dbgf.s.SteppingFilter.cMaxSteps)
1669 {
1670 /*
1671 * Check PC and SP address filtering.
1672 */
1673 if (pVM->dbgf.s.SteppingFilter.fFlags & (DBGF_STEP_F_STOP_ON_ADDRESS | DBGF_STEP_F_STOP_ON_STACK_POP))
1674 {
1675 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1676 && pVM->dbgf.s.SteppingFilter.AddrPc == CPUMGetGuestFlatPC(pVCpu))
1677 return true;
1678 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1679 && CPUMGetGuestFlatSP(pVCpu) - pVM->dbgf.s.SteppingFilter.AddrStackPop
1680 < pVM->dbgf.s.SteppingFilter.cbStackPop)
1681 return true;
1682 }
1683
1684 /*
1685 * Do step-over filtering separate from the step-into one.
1686 */
1687 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1688 {
1689 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1690 switch (enmType)
1691 {
1692 default:
1693 if ( pVM->dbgf.s.SteppingFilter.uCallDepth != 0
1694 || (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_FILTER_MASK))
1695 break;
1696 return true;
1697 case DBGFSTEPINSTRTYPE_CALL:
1698 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1699 && pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1700 return true;
1701 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1702 break;
1703 case DBGFSTEPINSTRTYPE_RET:
1704 if (pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1705 {
1706 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1707 return true;
1708 /* If after return, we use the cMaxStep limit to stop the next time. */
1709 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1710 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1711 }
1712 else if (pVM->dbgf.s.SteppingFilter.uCallDepth > 0)
1713 pVM->dbgf.s.SteppingFilter.uCallDepth--;
1714 break;
1715 }
1716 return false;
1717 }
1718 /*
1719 * Filtered step-into.
1720 */
1721 else if ( pVM->dbgf.s.SteppingFilter.fFlags
1722 & (DBGF_STEP_F_STOP_ON_CALL | DBGF_STEP_F_STOP_ON_RET | DBGF_STEP_F_STOP_AFTER_RET))
1723 {
1724 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1725 switch (enmType)
1726 {
1727 default:
1728 break;
1729 case DBGFSTEPINSTRTYPE_CALL:
1730 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1731 return true;
1732 break;
1733 case DBGFSTEPINSTRTYPE_RET:
1734 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1735 return true;
1736 /* If after return, we use the cMaxStep limit to stop the next time. */
1737 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1738 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1739 break;
1740 }
1741 return false;
1742 }
1743 }
1744 }
1745 }
1746
1747 return true;
1748}
1749
1750
1751/**
1752 * Step Into.
1753 *
1754 * A single step event is generated from this command.
1755 * The current implementation is not reliable, so don't rely on the event coming.
1756 *
1757 * @returns VBox status code.
1758 * @param pUVM The user mode VM handle.
1759 * @param idCpu The ID of the CPU to single step on.
1760 */
1761VMMR3DECL(int) DBGFR3Step(PUVM pUVM, VMCPUID idCpu)
1762{
1763 return DBGFR3StepEx(pUVM, idCpu, DBGF_STEP_F_INTO, NULL, NULL, 0, 1);
1764}
1765
1766
1767/**
1768 * Full fleged step.
1769 *
1770 * This extended stepping API allows for doing multiple steps before raising an
1771 * event, helping implementing step over, step out and other more advanced
1772 * features.
1773 *
1774 * Like the DBGFR3Step() API, this will normally generate a DBGFEVENT_STEPPED or
1775 * DBGFEVENT_STEPPED_EVENT. However the stepping may be interrupted by other
1776 * events, which will abort the stepping.
1777 *
1778 * The stop on pop area feature is for safeguarding step out.
1779 *
1780 * Please note though, that it will always use stepping and never breakpoints.
1781 * While this allows for a much greater flexibility it can at times be rather
1782 * slow.
1783 *
1784 * @returns VBox status code.
1785 * @param pUVM The user mode VM handle.
1786 * @param idCpu The ID of the CPU to single step on.
1787 * @param fFlags Flags controlling the stepping, DBGF_STEP_F_XXX.
1788 * Either DBGF_STEP_F_INTO or DBGF_STEP_F_OVER must
1789 * always be specified.
1790 * @param pStopPcAddr Address to stop executing at. Completely ignored
1791 * unless DBGF_STEP_F_STOP_ON_ADDRESS is specified.
1792 * @param pStopPopAddr Stack address that SP must be lower than when
1793 * performing DBGF_STEP_F_STOP_ON_STACK_POP filtering.
1794 * @param cbStopPop The range starting at @a pStopPopAddr which is
1795 * considered to be within the same thread stack. Note
1796 * that the API allows @a pStopPopAddr and @a cbStopPop
1797 * to form an area that wraps around and it will
1798 * consider the part starting at 0 as included.
1799 * @param cMaxSteps The maximum number of steps to take. This is to
1800 * prevent stepping for ever, so passing UINT32_MAX is
1801 * not recommended.
1802 *
1803 * @remarks The two address arguments must be guest context virtual addresses,
1804 * or HMA. The code doesn't make much of a point of out HMA, though.
1805 */
1806VMMR3DECL(int) DBGFR3StepEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, PCDBGFADDRESS pStopPcAddr,
1807 PCDBGFADDRESS pStopPopAddr, RTGCUINTPTR cbStopPop, uint32_t cMaxSteps)
1808{
1809 /*
1810 * Check state.
1811 */
1812 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1813 PVM pVM = pUVM->pVM;
1814 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1815 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
1816 AssertReturn(!(fFlags & ~DBGF_STEP_F_VALID_MASK), VERR_INVALID_FLAGS);
1817 AssertReturn(RT_BOOL(fFlags & DBGF_STEP_F_INTO) != RT_BOOL(fFlags & DBGF_STEP_F_OVER), VERR_INVALID_FLAGS);
1818 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1819 {
1820 AssertReturn(RT_VALID_PTR(pStopPcAddr), VERR_INVALID_POINTER);
1821 AssertReturn(DBGFADDRESS_IS_VALID(pStopPcAddr), VERR_INVALID_PARAMETER);
1822 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPcAddr), VERR_INVALID_PARAMETER);
1823 }
1824 AssertReturn(!(fFlags & DBGF_STEP_F_STOP_ON_STACK_POP) || RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1825 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1826 {
1827 AssertReturn(RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1828 AssertReturn(DBGFADDRESS_IS_VALID(pStopPopAddr), VERR_INVALID_PARAMETER);
1829 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPopAddr), VERR_INVALID_PARAMETER);
1830 AssertReturn(cbStopPop > 0, VERR_INVALID_PARAMETER);
1831 }
1832
1833 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1834 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1835 if (RT_LIKELY(dbgfR3CpuIsHalted(pUVCpu)))
1836 { /* likely */ }
1837 else
1838 return VERR_SEM_OUT_OF_TURN;
1839 Assert(pVM->dbgf.s.SteppingFilter.idCpu == NIL_VMCPUID);
1840
1841 /*
1842 * Send the emulation thread a single-step command.
1843 */
1844 if (fFlags == DBGF_STEP_F_INTO)
1845 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1846 else
1847 pVM->dbgf.s.SteppingFilter.idCpu = idCpu;
1848 pVM->dbgf.s.SteppingFilter.fFlags = fFlags;
1849 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1850 pVM->dbgf.s.SteppingFilter.AddrPc = pStopPcAddr->FlatPtr;
1851 else
1852 pVM->dbgf.s.SteppingFilter.AddrPc = 0;
1853 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1854 {
1855 pVM->dbgf.s.SteppingFilter.AddrStackPop = pStopPopAddr->FlatPtr;
1856 pVM->dbgf.s.SteppingFilter.cbStackPop = cbStopPop;
1857 }
1858 else
1859 {
1860 pVM->dbgf.s.SteppingFilter.AddrStackPop = 0;
1861 pVM->dbgf.s.SteppingFilter.cbStackPop = RTGCPTR_MAX;
1862 }
1863
1864 pVM->dbgf.s.SteppingFilter.cMaxSteps = cMaxSteps;
1865 pVM->dbgf.s.SteppingFilter.cSteps = 0;
1866 pVM->dbgf.s.SteppingFilter.uCallDepth = 0;
1867
1868 Assert(dbgfR3CpuIsHalted(pUVCpu));
1869 return dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_SINGLE_STEP);
1870}
1871
1872
1873
1874/**
1875 * dbgfR3EventConfigEx argument packet.
1876 */
1877typedef struct DBGFR3EVENTCONFIGEXARGS
1878{
1879 PCDBGFEVENTCONFIG paConfigs;
1880 size_t cConfigs;
1881 int rc;
1882} DBGFR3EVENTCONFIGEXARGS;
1883/** Pointer to a dbgfR3EventConfigEx argument packet. */
1884typedef DBGFR3EVENTCONFIGEXARGS *PDBGFR3EVENTCONFIGEXARGS;
1885
1886
1887/**
1888 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Worker for DBGFR3EventConfigEx.}
1889 */
1890static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
1891{
1892 if (pVCpu->idCpu == 0)
1893 {
1894 PDBGFR3EVENTCONFIGEXARGS pArgs = (PDBGFR3EVENTCONFIGEXARGS)pvUser;
1895 DBGFEVENTCONFIG volatile const *paConfigs = pArgs->paConfigs;
1896 size_t cConfigs = pArgs->cConfigs;
1897
1898 /*
1899 * Apply the changes.
1900 */
1901 unsigned cChanges = 0;
1902 for (uint32_t i = 0; i < cConfigs; i++)
1903 {
1904 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
1905 AssertReturn(enmType >= DBGFEVENT_FIRST_SELECTABLE && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1906 if (paConfigs[i].fEnabled)
1907 cChanges += ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, enmType) == false;
1908 else
1909 cChanges += ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, enmType) == true;
1910 }
1911
1912 /*
1913 * Inform HM about changes.
1914 */
1915 if (cChanges > 0)
1916 {
1917 if (HMIsEnabled(pVM))
1918 {
1919 HMR3NotifyDebugEventChanged(pVM);
1920 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1921 }
1922 else if (VM_IS_NEM_ENABLED(pVM))
1923 {
1924 NEMR3NotifyDebugEventChanged(pVM);
1925 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1926 }
1927 }
1928 }
1929 else if (HMIsEnabled(pVM))
1930 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1931 else if (VM_IS_NEM_ENABLED(pVM))
1932 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1933
1934 return VINF_SUCCESS;
1935}
1936
1937
1938/**
1939 * Configures (enables/disables) multiple selectable debug events.
1940 *
1941 * @returns VBox status code.
1942 * @param pUVM The user mode VM handle.
1943 * @param paConfigs The event to configure and their new state.
1944 * @param cConfigs Number of entries in @a paConfigs.
1945 */
1946VMMR3DECL(int) DBGFR3EventConfigEx(PUVM pUVM, PCDBGFEVENTCONFIG paConfigs, size_t cConfigs)
1947{
1948 /*
1949 * Validate input.
1950 */
1951 size_t i = cConfigs;
1952 while (i-- > 0)
1953 {
1954 AssertReturn(paConfigs[i].enmType >= DBGFEVENT_FIRST_SELECTABLE, VERR_INVALID_PARAMETER);
1955 AssertReturn(paConfigs[i].enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1956 }
1957 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1958 PVM pVM = pUVM->pVM;
1959 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1960
1961 /*
1962 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
1963 * can sync their data and execution with new debug state.
1964 */
1965 DBGFR3EVENTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
1966 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
1967 dbgfR3EventConfigEx, &Args);
1968 if (RT_SUCCESS(rc))
1969 rc = Args.rc;
1970 return rc;
1971}
1972
1973
1974/**
1975 * Enables or disables a selectable debug event.
1976 *
1977 * @returns VBox status code.
1978 * @param pUVM The user mode VM handle.
1979 * @param enmEvent The selectable debug event.
1980 * @param fEnabled The new state.
1981 */
1982VMMR3DECL(int) DBGFR3EventConfig(PUVM pUVM, DBGFEVENTTYPE enmEvent, bool fEnabled)
1983{
1984 /*
1985 * Convert to an array call.
1986 */
1987 DBGFEVENTCONFIG EvtCfg = { enmEvent, fEnabled };
1988 return DBGFR3EventConfigEx(pUVM, &EvtCfg, 1);
1989}
1990
1991
1992/**
1993 * Checks if the given selectable event is enabled.
1994 *
1995 * @returns true if enabled, false if not or invalid input.
1996 * @param pUVM The user mode VM handle.
1997 * @param enmEvent The selectable debug event.
1998 * @sa DBGFR3EventQuery
1999 */
2000VMMR3DECL(bool) DBGFR3EventIsEnabled(PUVM pUVM, DBGFEVENTTYPE enmEvent)
2001{
2002 /*
2003 * Validate input.
2004 */
2005 AssertReturn( enmEvent >= DBGFEVENT_HALT_DONE
2006 && enmEvent < DBGFEVENT_END, false);
2007 Assert( enmEvent >= DBGFEVENT_FIRST_SELECTABLE
2008 || enmEvent == DBGFEVENT_BREAKPOINT
2009 || enmEvent == DBGFEVENT_BREAKPOINT_IO
2010 || enmEvent == DBGFEVENT_BREAKPOINT_MMIO);
2011
2012 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2013 PVM pVM = pUVM->pVM;
2014 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2015
2016 /*
2017 * Check the event status.
2018 */
2019 return ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, enmEvent);
2020}
2021
2022
2023/**
2024 * Queries the status of a set of events.
2025 *
2026 * @returns VBox status code.
2027 * @param pUVM The user mode VM handle.
2028 * @param paConfigs The events to query and where to return the state.
2029 * @param cConfigs The number of elements in @a paConfigs.
2030 * @sa DBGFR3EventIsEnabled, DBGF_IS_EVENT_ENABLED
2031 */
2032VMMR3DECL(int) DBGFR3EventQuery(PUVM pUVM, PDBGFEVENTCONFIG paConfigs, size_t cConfigs)
2033{
2034 /*
2035 * Validate input.
2036 */
2037 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2038 PVM pVM = pUVM->pVM;
2039 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2040
2041 for (size_t i = 0; i < cConfigs; i++)
2042 {
2043 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
2044 AssertReturn( enmType >= DBGFEVENT_HALT_DONE
2045 && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
2046 Assert( enmType >= DBGFEVENT_FIRST_SELECTABLE
2047 || enmType == DBGFEVENT_BREAKPOINT
2048 || enmType == DBGFEVENT_BREAKPOINT_IO
2049 || enmType == DBGFEVENT_BREAKPOINT_MMIO);
2050 paConfigs[i].fEnabled = ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, paConfigs[i].enmType);
2051 }
2052
2053 return VINF_SUCCESS;
2054}
2055
2056
2057/**
2058 * dbgfR3InterruptConfigEx argument packet.
2059 */
2060typedef struct DBGFR3INTERRUPTCONFIGEXARGS
2061{
2062 PCDBGFINTERRUPTCONFIG paConfigs;
2063 size_t cConfigs;
2064 int rc;
2065} DBGFR3INTERRUPTCONFIGEXARGS;
2066/** Pointer to a dbgfR3InterruptConfigEx argument packet. */
2067typedef DBGFR3INTERRUPTCONFIGEXARGS *PDBGFR3INTERRUPTCONFIGEXARGS;
2068
2069/**
2070 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
2071 * Worker for DBGFR3InterruptConfigEx.}
2072 */
2073static DECLCALLBACK(VBOXSTRICTRC) dbgfR3InterruptConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
2074{
2075 if (pVCpu->idCpu == 0)
2076 {
2077 PDBGFR3INTERRUPTCONFIGEXARGS pArgs = (PDBGFR3INTERRUPTCONFIGEXARGS)pvUser;
2078 PCDBGFINTERRUPTCONFIG paConfigs = pArgs->paConfigs;
2079 size_t cConfigs = pArgs->cConfigs;
2080
2081 /*
2082 * Apply the changes.
2083 */
2084 bool fChanged = false;
2085 bool fThis;
2086 for (uint32_t i = 0; i < cConfigs; i++)
2087 {
2088 /*
2089 * Hardware interrupts.
2090 */
2091 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2092 {
2093 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == false;
2094 if (fThis)
2095 {
2096 Assert(pVM->dbgf.s.cHardIntBreakpoints < 256);
2097 pVM->dbgf.s.cHardIntBreakpoints++;
2098 }
2099 }
2100 else if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_DISABLED)
2101 {
2102 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == true;
2103 if (fThis)
2104 {
2105 Assert(pVM->dbgf.s.cHardIntBreakpoints > 0);
2106 pVM->dbgf.s.cHardIntBreakpoints--;
2107 }
2108 }
2109
2110 /*
2111 * Software interrupts.
2112 */
2113 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2114 {
2115 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == false;
2116 if (fThis)
2117 {
2118 Assert(pVM->dbgf.s.cSoftIntBreakpoints < 256);
2119 pVM->dbgf.s.cSoftIntBreakpoints++;
2120 }
2121 }
2122 else if (paConfigs[i].enmSoftState == DBGFINTERRUPTSTATE_DISABLED)
2123 {
2124 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == true;
2125 if (fThis)
2126 {
2127 Assert(pVM->dbgf.s.cSoftIntBreakpoints > 0);
2128 pVM->dbgf.s.cSoftIntBreakpoints--;
2129 }
2130 }
2131 }
2132
2133 /*
2134 * Update the event bitmap entries.
2135 */
2136 if (pVM->dbgf.s.cHardIntBreakpoints > 0)
2137 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == false;
2138 else
2139 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == true;
2140
2141 if (pVM->dbgf.s.cSoftIntBreakpoints > 0)
2142 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == false;
2143 else
2144 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == true;
2145
2146 /*
2147 * Inform HM about changes.
2148 */
2149 if (fChanged)
2150 {
2151 if (HMIsEnabled(pVM))
2152 {
2153 HMR3NotifyDebugEventChanged(pVM);
2154 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2155 }
2156 else if (VM_IS_NEM_ENABLED(pVM))
2157 {
2158 NEMR3NotifyDebugEventChanged(pVM);
2159 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2160 }
2161 }
2162 }
2163 else if (HMIsEnabled(pVM))
2164 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2165 else if (VM_IS_NEM_ENABLED(pVM))
2166 NEMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2167
2168 return VINF_SUCCESS;
2169}
2170
2171
2172/**
2173 * Changes
2174 *
2175 * @returns VBox status code.
2176 * @param pUVM The user mode VM handle.
2177 * @param paConfigs The events to query and where to return the state.
2178 * @param cConfigs The number of elements in @a paConfigs.
2179 * @sa DBGFR3InterruptConfigHardware, DBGFR3InterruptConfigSoftware
2180 */
2181VMMR3DECL(int) DBGFR3InterruptConfigEx(PUVM pUVM, PCDBGFINTERRUPTCONFIG paConfigs, size_t cConfigs)
2182{
2183 /*
2184 * Validate input.
2185 */
2186 size_t i = cConfigs;
2187 while (i-- > 0)
2188 {
2189 AssertReturn(paConfigs[i].enmHardState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2190 AssertReturn(paConfigs[i].enmSoftState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2191 }
2192
2193 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2194 PVM pVM = pUVM->pVM;
2195 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2196
2197 /*
2198 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
2199 * can sync their data and execution with new debug state.
2200 */
2201 DBGFR3INTERRUPTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
2202 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
2203 dbgfR3InterruptConfigEx, &Args);
2204 if (RT_SUCCESS(rc))
2205 rc = Args.rc;
2206 return rc;
2207}
2208
2209
2210/**
2211 * Configures interception of a hardware interrupt.
2212 *
2213 * @returns VBox status code.
2214 * @param pUVM The user mode VM handle.
2215 * @param iInterrupt The interrupt number.
2216 * @param fEnabled Whether interception is enabled or not.
2217 * @sa DBGFR3InterruptSoftwareConfig, DBGFR3InterruptConfigEx
2218 */
2219VMMR3DECL(int) DBGFR3InterruptHardwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2220{
2221 /*
2222 * Convert to DBGFR3InterruptConfigEx call.
2223 */
2224 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, (uint8_t)fEnabled, DBGFINTERRUPTSTATE_DONT_TOUCH };
2225 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2226}
2227
2228
2229/**
2230 * Configures interception of a software interrupt.
2231 *
2232 * @returns VBox status code.
2233 * @param pUVM The user mode VM handle.
2234 * @param iInterrupt The interrupt number.
2235 * @param fEnabled Whether interception is enabled or not.
2236 * @sa DBGFR3InterruptHardwareConfig, DBGFR3InterruptConfigEx
2237 */
2238VMMR3DECL(int) DBGFR3InterruptSoftwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2239{
2240 /*
2241 * Convert to DBGFR3InterruptConfigEx call.
2242 */
2243 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, DBGFINTERRUPTSTATE_DONT_TOUCH, (uint8_t)fEnabled };
2244 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2245}
2246
2247
2248/**
2249 * Checks whether interception is enabled for a hardware interrupt.
2250 *
2251 * @returns true if enabled, false if not or invalid input.
2252 * @param pUVM The user mode VM handle.
2253 * @param iInterrupt The interrupt number.
2254 * @sa DBGFR3InterruptSoftwareIsEnabled, DBGF_IS_HARDWARE_INT_ENABLED,
2255 * DBGF_IS_SOFTWARE_INT_ENABLED
2256 */
2257VMMR3DECL(int) DBGFR3InterruptHardwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2258{
2259 /*
2260 * Validate input.
2261 */
2262 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2263 PVM pVM = pUVM->pVM;
2264 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2265
2266 /*
2267 * Check it.
2268 */
2269 return ASMBitTest(&pVM->dbgf.s.bmHardIntBreakpoints, iInterrupt);
2270}
2271
2272
2273/**
2274 * Checks whether interception is enabled for a software interrupt.
2275 *
2276 * @returns true if enabled, false if not or invalid input.
2277 * @param pUVM The user mode VM handle.
2278 * @param iInterrupt The interrupt number.
2279 * @sa DBGFR3InterruptHardwareIsEnabled, DBGF_IS_SOFTWARE_INT_ENABLED,
2280 * DBGF_IS_HARDWARE_INT_ENABLED,
2281 */
2282VMMR3DECL(int) DBGFR3InterruptSoftwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2283{
2284 /*
2285 * Validate input.
2286 */
2287 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2288 PVM pVM = pUVM->pVM;
2289 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2290
2291 /*
2292 * Check it.
2293 */
2294 return ASMBitTest(&pVM->dbgf.s.bmSoftIntBreakpoints, iInterrupt);
2295}
2296
2297
2298
2299/**
2300 * Call this to single step programmatically.
2301 *
2302 * You must pass down the return code to the EM loop! That's
2303 * where the actual single stepping take place (at least in the
2304 * current implementation).
2305 *
2306 * @returns VINF_EM_DBG_STEP
2307 *
2308 * @param pVCpu The cross context virtual CPU structure.
2309 *
2310 * @thread VCpu EMT
2311 * @internal
2312 */
2313VMMR3_INT_DECL(int) DBGFR3PrgStep(PVMCPU pVCpu)
2314{
2315 VMCPU_ASSERT_EMT(pVCpu);
2316
2317 pVCpu->dbgf.s.fSingleSteppingRaw = true;
2318 return VINF_EM_DBG_STEP;
2319}
2320
2321
2322/**
2323 * Inject an NMI into a running VM (only VCPU 0!)
2324 *
2325 * @returns VBox status code.
2326 * @param pUVM The user mode VM structure.
2327 * @param idCpu The ID of the CPU to inject the NMI on.
2328 */
2329VMMR3DECL(int) DBGFR3InjectNMI(PUVM pUVM, VMCPUID idCpu)
2330{
2331 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2332 PVM pVM = pUVM->pVM;
2333 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2334 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
2335
2336 /** @todo Implement generic NMI injection. */
2337 /** @todo NEM: NMI injection */
2338 if (!HMIsEnabled(pVM))
2339 return VERR_NOT_SUP_BY_NEM;
2340
2341 VMCPU_FF_SET(pVM->apCpusR3[idCpu], VMCPU_FF_INTERRUPT_NMI);
2342 return VINF_SUCCESS;
2343}
2344
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette