VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGF.cpp@ 89364

Last change on this file since 89364 was 87594, checked in by vboxsync, 4 years ago

VMM/DBGF,Debugger: Removed the !defined(VBOX_WITH_LOTS_OF_DBGF_BPS) code. bugref:9837

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 75.1 KB
Line 
1/* $Id: DBGF.cpp 87594 2021-02-03 20:23:46Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_dbgf DBGF - The Debugger Facility
20 *
21 * The purpose of the DBGF is to provide an interface for debuggers to
22 * manipulate the VMM without having to mess up the source code for each of
23 * them. The DBGF is always built in and will always work when a debugger
24 * attaches to the VM. The DBGF provides the basic debugger features, such as
25 * halting execution, handling breakpoints, single step execution, instruction
26 * disassembly, info querying, OS specific diggers, symbol and module
27 * management.
28 *
29 * The interface is working in a manner similar to the win32, linux and os2
30 * debugger interfaces. The interface has an asynchronous nature. This comes
31 * from the fact that the VMM and the Debugger are running in different threads.
32 * They are referred to as the "emulation thread" and the "debugger thread", or
33 * as the "ping thread" and the "pong thread, respectivly. (The last set of
34 * names comes from the use of the Ping-Pong synchronization construct from the
35 * RTSem API.)
36 *
37 * @see grp_dbgf
38 *
39 *
40 * @section sec_dbgf_scenario Usage Scenario
41 *
42 * The debugger starts by attaching to the VM. For practical reasons we limit the
43 * number of concurrently attached debuggers to 1 per VM. The action of
44 * attaching to the VM causes the VM to check and generate debug events.
45 *
46 * The debugger then will wait/poll for debug events and issue commands.
47 *
48 * The waiting and polling is done by the DBGFEventWait() function. It will wait
49 * for the emulation thread to send a ping, thus indicating that there is an
50 * event waiting to be processed.
51 *
52 * An event can be a response to a command issued previously, the hitting of a
53 * breakpoint, or running into a bad/fatal VMM condition. The debugger now has
54 * the ping and must respond to the event at hand - the VMM is waiting. This
55 * usually means that the user of the debugger must do something, but it doesn't
56 * have to. The debugger is free to call any DBGF function (nearly at least)
57 * while processing the event.
58 *
59 * Typically the user will issue a request for the execution to be resumed, so
60 * the debugger calls DBGFResume() and goes back to waiting/polling for events.
61 *
62 * When the user eventually terminates the debugging session or selects another
63 * VM, the debugger detaches from the VM. This means that breakpoints are
64 * disabled and that the emulation thread no longer polls for debugger commands.
65 *
66 */
67
68
69/*********************************************************************************************************************************
70* Header Files *
71*********************************************************************************************************************************/
72#define LOG_GROUP LOG_GROUP_DBGF
73#include <VBox/vmm/dbgf.h>
74#include <VBox/vmm/selm.h>
75#include <VBox/vmm/em.h>
76#include <VBox/vmm/hm.h>
77#include <VBox/vmm/mm.h>
78#include "DBGFInternal.h"
79#include <VBox/vmm/vm.h>
80#include <VBox/vmm/uvm.h>
81#include <VBox/err.h>
82
83#include <VBox/log.h>
84#include <iprt/semaphore.h>
85#include <iprt/thread.h>
86#include <iprt/asm.h>
87#include <iprt/time.h>
88#include <iprt/assert.h>
89#include <iprt/stream.h>
90#include <iprt/env.h>
91
92
93/*********************************************************************************************************************************
94* Structures and Typedefs *
95*********************************************************************************************************************************/
96/**
97 * Instruction type returned by dbgfStepGetCurInstrType.
98 */
99typedef enum DBGFSTEPINSTRTYPE
100{
101 DBGFSTEPINSTRTYPE_INVALID = 0,
102 DBGFSTEPINSTRTYPE_OTHER,
103 DBGFSTEPINSTRTYPE_RET,
104 DBGFSTEPINSTRTYPE_CALL,
105 DBGFSTEPINSTRTYPE_END,
106 DBGFSTEPINSTRTYPE_32BIT_HACK = 0x7fffffff
107} DBGFSTEPINSTRTYPE;
108
109
110/*********************************************************************************************************************************
111* Internal Functions *
112*********************************************************************************************************************************/
113DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx);
114DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu);
115static int dbgfR3CpuWait(PVMCPU pVCpu);
116static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution);
117static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu);
118static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu);
119
120
121
122/**
123 * Initializes the DBGF.
124 *
125 * @returns VBox status code.
126 * @param pVM The cross context VM structure.
127 */
128VMMR3_INT_DECL(int) DBGFR3Init(PVM pVM)
129{
130 PUVM pUVM = pVM->pUVM;
131 AssertCompile(sizeof(pUVM->dbgf.s) <= sizeof(pUVM->dbgf.padding));
132 AssertCompile(sizeof(pUVM->aCpus[0].dbgf.s) <= sizeof(pUVM->aCpus[0].dbgf.padding));
133
134 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
135
136 /*
137 * The usual sideways mountain climbing style of init:
138 */
139 int rc = dbgfR3InfoInit(pUVM); /* (First, initalizes the shared critical section.) */
140 if (RT_SUCCESS(rc))
141 {
142 rc = dbgfR3TraceInit(pVM);
143 if (RT_SUCCESS(rc))
144 {
145 rc = dbgfR3RegInit(pUVM);
146 if (RT_SUCCESS(rc))
147 {
148 rc = dbgfR3AsInit(pUVM);
149 if (RT_SUCCESS(rc))
150 {
151 rc = dbgfR3BpInit(pUVM);
152 if (RT_SUCCESS(rc))
153 {
154 rc = dbgfR3OSInit(pUVM);
155 if (RT_SUCCESS(rc))
156 {
157 rc = dbgfR3PlugInInit(pUVM);
158 if (RT_SUCCESS(rc))
159 {
160 rc = dbgfR3BugCheckInit(pVM);
161 if (RT_SUCCESS(rc))
162 {
163#ifdef VBOX_WITH_DBGF_TRACING
164 rc = dbgfR3TracerInit(pVM);
165#endif
166 if (RT_SUCCESS(rc))
167 {
168 return VINF_SUCCESS;
169 }
170 }
171 dbgfR3PlugInTerm(pUVM);
172 }
173 dbgfR3OSTermPart1(pUVM);
174 dbgfR3OSTermPart2(pUVM);
175 }
176 dbgfR3BpTerm(pUVM);
177 }
178 dbgfR3AsTerm(pUVM);
179 }
180 dbgfR3RegTerm(pUVM);
181 }
182 dbgfR3TraceTerm(pVM);
183 }
184 dbgfR3InfoTerm(pUVM);
185 }
186 return rc;
187}
188
189
190/**
191 * Terminates and cleans up resources allocated by the DBGF.
192 *
193 * @returns VBox status code.
194 * @param pVM The cross context VM structure.
195 */
196VMMR3_INT_DECL(int) DBGFR3Term(PVM pVM)
197{
198 PUVM pUVM = pVM->pUVM;
199
200#ifdef VBOX_WITH_DBGF_TRACING
201 dbgfR3TracerTerm(pVM);
202#endif
203 dbgfR3OSTermPart1(pUVM);
204 dbgfR3PlugInTerm(pUVM);
205 dbgfR3OSTermPart2(pUVM);
206 dbgfR3BpTerm(pUVM);
207 dbgfR3AsTerm(pUVM);
208 dbgfR3RegTerm(pUVM);
209 dbgfR3TraceTerm(pVM);
210 dbgfR3InfoTerm(pUVM);
211
212 return VINF_SUCCESS;
213}
214
215
216/**
217 * This is for tstCFGM and others to avoid trigger leak detection.
218 *
219 * @returns VBox status code.
220 * @param pUVM The user mode VM structure.
221 */
222VMMR3DECL(void) DBGFR3TermUVM(PUVM pUVM)
223{
224 dbgfR3InfoTerm(pUVM);
225}
226
227
228/**
229 * Called when the VM is powered off to detach debuggers.
230 *
231 * @param pVM The cross context VM structure.
232 */
233VMMR3_INT_DECL(void) DBGFR3PowerOff(PVM pVM)
234{
235 /*
236 * Send a termination event to any attached debugger.
237 */
238 if (pVM->dbgf.s.fAttached)
239 {
240 PVMCPU pVCpu = VMMGetCpu(pVM);
241 int rc = dbgfR3SendEventWait(pVM, pVCpu, DBGFEVENT_POWERING_OFF, DBGFEVENTCTX_OTHER);
242 AssertLogRelRC(rc);
243
244 /*
245 * Clear the FF so we won't get confused later on.
246 */
247 VM_FF_CLEAR(pVM, VM_FF_DBGF);
248 }
249}
250
251
252/**
253 * Applies relocations to data and code managed by this
254 * component. This function will be called at init and
255 * whenever the VMM need to relocate it self inside the GC.
256 *
257 * @param pVM The cross context VM structure.
258 * @param offDelta Relocation delta relative to old location.
259 */
260VMMR3_INT_DECL(void) DBGFR3Relocate(PVM pVM, RTGCINTPTR offDelta)
261{
262 dbgfR3TraceRelocate(pVM);
263 dbgfR3AsRelocate(pVM->pUVM, offDelta);
264}
265
266
267/**
268 * Waits a little while for a debuggger to attach.
269 *
270 * @returns True is a debugger have attached.
271 * @param pVM The cross context VM structure.
272 * @param pVCpu The cross context per CPU structure.
273 * @param enmEvent Event.
274 *
275 * @thread EMT(pVCpu)
276 */
277bool dbgfR3WaitForAttach(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
278{
279 /*
280 * First a message.
281 */
282#if !defined(DEBUG)
283 int cWait = 10;
284#else
285 int cWait = !VM_IS_RAW_MODE_ENABLED(pVM)
286 && ( enmEvent == DBGFEVENT_ASSERTION_HYPER
287 || enmEvent == DBGFEVENT_FATAL_ERROR)
288 && !RTEnvExist("VBOX_DBGF_WAIT_FOR_ATTACH")
289 ? 10
290 : 150;
291#endif
292 RTStrmPrintf(g_pStdErr, "DBGF: No debugger attached, waiting %d second%s for one to attach (event=%d)\n",
293 cWait / 10, cWait != 10 ? "s" : "", enmEvent);
294 RTStrmFlush(g_pStdErr);
295 while (cWait > 0)
296 {
297 RTThreadSleep(100);
298 if (pVM->dbgf.s.fAttached)
299 {
300 RTStrmPrintf(g_pStdErr, "Attached!\n");
301 RTStrmFlush(g_pStdErr);
302 return true;
303 }
304
305 /* Process rendezvous (debugger attaching involves such). */
306 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
307 {
308 int rc = VMMR3EmtRendezvousFF(pVM, pVCpu); AssertRC(rc);
309 if (rc != VINF_SUCCESS)
310 {
311 /** @todo Ignoring these could be bad. */
312 RTStrmPrintf(g_pStdErr, "[rcRendezvous=%Rrc, ignored!]", rc);
313 RTStrmFlush(g_pStdErr);
314 }
315 }
316
317 /* Process priority stuff. */
318 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
319 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
320 {
321 int rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, true /*fPriorityOnly*/);
322 if (rc == VINF_SUCCESS)
323 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, true /*fPriorityOnly*/);
324 if (rc != VINF_SUCCESS)
325 {
326 /** @todo Ignoring these could be bad. */
327 RTStrmPrintf(g_pStdErr, "[rcReq=%Rrc, ignored!]", rc);
328 RTStrmFlush(g_pStdErr);
329 }
330 }
331
332 /* next */
333 if (!(cWait % 10))
334 {
335 RTStrmPrintf(g_pStdErr, "%d.", cWait / 10);
336 RTStrmFlush(g_pStdErr);
337 }
338 cWait--;
339 }
340
341 RTStrmPrintf(g_pStdErr, "Stopping the VM!\n");
342 RTStrmFlush(g_pStdErr);
343 return false;
344}
345
346
347/**
348 * Forced action callback.
349 *
350 * The VMM will call this from it's main loop when either VM_FF_DBGF or
351 * VMCPU_FF_DBGF are set.
352 *
353 * The function checks for and executes pending commands from the debugger.
354 * Then it checks for pending debug events and serves these.
355 *
356 * @returns VINF_SUCCESS normally.
357 * @returns VERR_DBGF_RAISE_FATAL_ERROR to pretend a fatal error happened.
358 * @param pVM The cross context VM structure.
359 * @param pVCpu The cross context per CPU structure.
360 */
361VMMR3_INT_DECL(int) DBGFR3VMMForcedAction(PVM pVM, PVMCPU pVCpu)
362{
363 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
364
365 /*
366 * Dispatch pending events.
367 */
368 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_DBGF))
369 {
370 if ( pVCpu->dbgf.s.cEvents > 0
371 && pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT)
372 {
373 rcStrict = DBGFR3EventHandlePending(pVM, pVCpu);
374 /** @todo may end up with VERR_DBGF_NOT_ATTACHED here, which will prove fatal... */
375 }
376
377 /*
378 * Command pending? Process it.
379 */
380 PUVMCPU pUVCpu = pVCpu->pUVCpu;
381 if (pUVCpu->dbgf.s.enmDbgfCmd != DBGFCMD_NO_COMMAND)
382 {
383 bool fResumeExecution;
384 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
385 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
386 VBOXSTRICTRC rcStrict2 = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
387 if (!fResumeExecution)
388 rcStrict2 = dbgfR3CpuWait(pVCpu);
389 if ( rcStrict2 != VINF_SUCCESS
390 && ( rcStrict == VINF_SUCCESS
391 || RT_FAILURE(rcStrict2)
392 || rcStrict2 < rcStrict) ) /** @todo oversimplified? */
393 rcStrict = rcStrict2;
394 }
395 }
396
397 return VBOXSTRICTRC_TODO(rcStrict);
398}
399
400
401/**
402 * Try to determine the event context.
403 *
404 * @returns debug event context.
405 * @param pVCpu The cross context vCPU structure.
406 */
407static DBGFEVENTCTX dbgfR3FigureEventCtx(PVMCPU pVCpu)
408{
409 switch (EMGetState(pVCpu))
410 {
411 case EMSTATE_HM:
412 case EMSTATE_NEM:
413 case EMSTATE_DEBUG_GUEST_HM:
414 case EMSTATE_DEBUG_GUEST_NEM:
415 return DBGFEVENTCTX_HM;
416
417 case EMSTATE_IEM:
418 case EMSTATE_RAW:
419 case EMSTATE_IEM_THEN_REM:
420 case EMSTATE_DEBUG_GUEST_IEM:
421 case EMSTATE_DEBUG_GUEST_RAW:
422 return DBGFEVENTCTX_RAW;
423
424
425 case EMSTATE_REM:
426 case EMSTATE_DEBUG_GUEST_REM:
427 return DBGFEVENTCTX_REM;
428
429 case EMSTATE_DEBUG_HYPER:
430 case EMSTATE_GURU_MEDITATION:
431 return DBGFEVENTCTX_HYPER;
432
433 default:
434 return DBGFEVENTCTX_OTHER;
435 }
436}
437
438
439/**
440 * Sends the event to the debugger (i.e. adds it to the event ring buffer).
441 *
442 * @returns VBox status code.
443 * @param pVM The cross context VM structure.
444 * @param pVCpu The CPU sending the event.
445 * @param enmType The event type to send.
446 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
447 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
448 * @param cbPayload The size of the event payload, optional.
449 */
450static int dbgfR3SendEventWorker(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
451 void const *pvPayload, size_t cbPayload)
452{
453 PUVM pUVM = pVM->pUVM;
454 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID; /** @todo per vCPU stepping filter. */
455
456 /*
457 * Massage the input a little.
458 */
459 AssertStmt(cbPayload <= RT_SIZEOFMEMB(DBGFEVENT, u), cbPayload = RT_SIZEOFMEMB(DBGFEVENT, u));
460 if (enmCtx == DBGFEVENTCTX_INVALID)
461 enmCtx = dbgfR3FigureEventCtx(pVCpu);
462
463 /*
464 * Put the event into the ring buffer.
465 */
466 RTSemFastMutexRequest(pUVM->dbgf.s.hMtxDbgEvtWr);
467
468 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
469 uint32_t const idxDbgEvtWrite = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite);
470 uint32_t const idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
471 /** @todo Handle full buffer. */ RT_NOREF(idxDbgEvtRead);
472
473 PDBGFEVENT pEvent = &pUVM->dbgf.s.paDbgEvts[idxDbgEvtWrite % cDbgEvtMax];
474
475#ifdef DEBUG
476 ASMMemFill32(pEvent, sizeof(*pEvent), UINT32_C(0xdeadbeef));
477#endif
478 pEvent->enmType = enmType;
479 pEvent->enmCtx = enmCtx;
480 pEvent->idCpu = pVCpu->idCpu;
481 pEvent->uReserved = 0;
482 if (cbPayload)
483 memcpy(&pEvent->u, pvPayload, cbPayload);
484
485 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtWrite, (idxDbgEvtWrite + 1) % cDbgEvtMax);
486
487 RTSemFastMutexRelease(pUVM->dbgf.s.hMtxDbgEvtWr);
488
489 /*
490 * Signal the debugger.
491 */
492 return RTSemEventSignal(pUVM->dbgf.s.hEvtWait);
493}
494
495
496/**
497 * Send event and wait for the debugger to respond.
498 *
499 * @returns Strict VBox status code.
500 * @param pVM The cross context VM structure.
501 * @param pVCpu The CPU sending the event.
502 * @param enmType The event type to send.
503 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
504 */
505DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
506{
507 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
508 if (RT_SUCCESS(rc))
509 rc = dbgfR3CpuWait(pVCpu);
510 return rc;
511}
512
513
514/**
515 * Send event and wait for the debugger to respond, extended version.
516 *
517 * @returns Strict VBox status code.
518 * @param pVM The cross context VM structure.
519 * @param pVCpu The CPU sending the event.
520 * @param enmType The event type to send.
521 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
522 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
523 * @param cbPayload The size of the event payload, optional.
524 */
525DECLINLINE(int) dbgfR3SendEventWaitEx(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
526 void const *pvPayload, size_t cbPayload)
527{
528 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, pvPayload, cbPayload);
529 if (RT_SUCCESS(rc))
530 rc = dbgfR3CpuWait(pVCpu);
531 return rc;
532}
533
534
535/**
536 * Send event but do NOT wait for the debugger.
537 *
538 * Currently only used by dbgfR3CpuCmd().
539 *
540 * @param pVM The cross context VM structure.
541 * @param pVCpu The CPU sending the event.
542 * @param enmType The event type to send.
543 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
544 */
545DECLINLINE(int) dbgfR3SendEventNoWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
546{
547 return dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
548}
549
550
551/**
552 * The common event prologue code.
553 *
554 * It will make sure someone is attached, and perhaps process any high priority
555 * pending actions (none yet).
556 *
557 * @returns VBox status code.
558 * @param pVM The cross context VM structure.
559 * @param pVCpu The vCPU cross context structure.
560 * @param enmEvent The event to be sent.
561 */
562static int dbgfR3EventPrologue(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
563{
564 /*
565 * Check if a debugger is attached.
566 */
567 if ( !pVM->dbgf.s.fAttached
568 && !dbgfR3WaitForAttach(pVM, pVCpu, enmEvent))
569 {
570 Log(("dbgfR3EventPrologue: enmEvent=%d - debugger not attached\n", enmEvent));
571 return VERR_DBGF_NOT_ATTACHED;
572 }
573
574 /*
575 * Look thru pending commands and finish those which make sense now.
576 */
577 /** @todo Process/purge pending commands. */
578 //int rc = DBGFR3VMMForcedAction(pVM);
579 return VINF_SUCCESS;
580}
581
582
583/**
584 * Processes a pending event on the current CPU.
585 *
586 * This is called by EM in response to VINF_EM_DBG_EVENT.
587 *
588 * @returns Strict VBox status code.
589 * @param pVM The cross context VM structure.
590 * @param pVCpu The cross context per CPU structure.
591 *
592 * @thread EMT(pVCpu)
593 */
594VMMR3_INT_DECL(VBOXSTRICTRC) DBGFR3EventHandlePending(PVM pVM, PVMCPU pVCpu)
595{
596 VMCPU_ASSERT_EMT(pVCpu);
597 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
598
599 /*
600 * Check that we've got an event first.
601 */
602 AssertReturn(pVCpu->dbgf.s.cEvents > 0, VINF_SUCCESS);
603 AssertReturn(pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT, VINF_SUCCESS);
604 PDBGFEVENT pEvent = &pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].Event;
605
606 /*
607 * Make sure we've got a debugger and is allowed to speak to it.
608 */
609 int rc = dbgfR3EventPrologue(pVM, pVCpu, pEvent->enmType);
610 if (RT_FAILURE(rc))
611 {
612 /** @todo drop them events? */
613 return rc; /** @todo this will cause trouble if we're here via an FF! */
614 }
615
616 /*
617 * Send the event and mark it as ignore.
618 * ASSUMES no new events get generate while dbgfR3CpuWait is executing!
619 */
620 VBOXSTRICTRC rcStrict = dbgfR3SendEventWaitEx(pVM, pVCpu, pEvent->enmType, pEvent->enmCtx, &pEvent->u, sizeof(pEvent->u));
621 pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState = DBGFEVENTSTATE_IGNORE;
622 return rcStrict;
623}
624
625
626/**
627 * Send a generic debugger event which takes no data.
628 *
629 * @returns VBox status code.
630 * @param pVM The cross context VM structure.
631 * @param enmEvent The event to send.
632 * @internal
633 */
634VMMR3DECL(int) DBGFR3Event(PVM pVM, DBGFEVENTTYPE enmEvent)
635{
636 PVMCPU pVCpu = VMMGetCpu(pVM);
637 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
638
639 /*
640 * Do stepping filtering.
641 */
642 /** @todo Would be better if we did some of this inside the execution
643 * engines. */
644 if ( enmEvent == DBGFEVENT_STEPPED
645 || enmEvent == DBGFEVENT_STEPPED_HYPER)
646 {
647 if (!dbgfStepAreWeThereYet(pVM, pVCpu))
648 return VINF_EM_DBG_STEP;
649 }
650
651 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
652 if (RT_FAILURE(rc))
653 return rc;
654
655 /*
656 * Send the event and process the reply communication.
657 */
658 return dbgfR3SendEventWait(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID);
659}
660
661
662/**
663 * Send a debugger event which takes the full source file location.
664 *
665 * @returns VBox status code.
666 * @param pVM The cross context VM structure.
667 * @param enmEvent The event to send.
668 * @param pszFile Source file.
669 * @param uLine Line number in source file.
670 * @param pszFunction Function name.
671 * @param pszFormat Message which accompanies the event.
672 * @param ... Message arguments.
673 * @internal
674 */
675VMMR3DECL(int) DBGFR3EventSrc(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, ...)
676{
677 va_list args;
678 va_start(args, pszFormat);
679 int rc = DBGFR3EventSrcV(pVM, enmEvent, pszFile, uLine, pszFunction, pszFormat, args);
680 va_end(args);
681 return rc;
682}
683
684
685/**
686 * Send a debugger event which takes the full source file location.
687 *
688 * @returns VBox status code.
689 * @param pVM The cross context VM structure.
690 * @param enmEvent The event to send.
691 * @param pszFile Source file.
692 * @param uLine Line number in source file.
693 * @param pszFunction Function name.
694 * @param pszFormat Message which accompanies the event.
695 * @param args Message arguments.
696 * @internal
697 */
698VMMR3DECL(int) DBGFR3EventSrcV(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, va_list args)
699{
700 PVMCPU pVCpu = VMMGetCpu(pVM);
701 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
702
703 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
704 if (RT_FAILURE(rc))
705 return rc;
706
707 /*
708 * Format the message.
709 */
710 char *pszMessage = NULL;
711 char szMessage[8192];
712 if (pszFormat && *pszFormat)
713 {
714 pszMessage = &szMessage[0];
715 RTStrPrintfV(szMessage, sizeof(szMessage), pszFormat, args);
716 }
717
718 /*
719 * Send the event and process the reply communication.
720 */
721 DBGFEVENT DbgEvent; /** @todo split up DBGFEVENT so we can skip the dead wait on the stack? */
722 DbgEvent.u.Src.pszFile = pszFile;
723 DbgEvent.u.Src.uLine = uLine;
724 DbgEvent.u.Src.pszFunction = pszFunction;
725 DbgEvent.u.Src.pszMessage = pszMessage;
726 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Src));
727}
728
729
730/**
731 * Send a debugger event which takes the two assertion messages.
732 *
733 * @returns VBox status code.
734 * @param pVM The cross context VM structure.
735 * @param enmEvent The event to send.
736 * @param pszMsg1 First assertion message.
737 * @param pszMsg2 Second assertion message.
738 */
739VMMR3_INT_DECL(int) DBGFR3EventAssertion(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszMsg1, const char *pszMsg2)
740{
741 PVMCPU pVCpu = VMMGetCpu(pVM);
742 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
743
744 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
745 if (RT_FAILURE(rc))
746 return rc;
747
748 /*
749 * Send the event and process the reply communication.
750 */
751 DBGFEVENT DbgEvent;
752 DbgEvent.u.Assert.pszMsg1 = pszMsg1;
753 DbgEvent.u.Assert.pszMsg2 = pszMsg2;
754 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Assert));
755}
756
757
758/**
759 * Breakpoint was hit somewhere.
760 * Figure out which breakpoint it is and notify the debugger.
761 *
762 * @returns VBox status code.
763 * @param pVM The cross context VM structure.
764 * @param enmEvent DBGFEVENT_BREAKPOINT_HYPER or DBGFEVENT_BREAKPOINT.
765 */
766VMMR3_INT_DECL(int) DBGFR3EventBreakpoint(PVM pVM, DBGFEVENTTYPE enmEvent)
767{
768 PVMCPU pVCpu = VMMGetCpu(pVM);
769 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
770
771 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
772 if (RT_FAILURE(rc))
773 return rc;
774
775 /*
776 * Send the event and process the reply communication.
777 */
778 DBGFEVENT DbgEvent;
779 DbgEvent.u.Bp.hBp = pVCpu->dbgf.s.hBpActive;
780 pVCpu->dbgf.s.hBpActive = NIL_DBGFBP;
781 if (DbgEvent.u.Bp.hBp != NIL_DBGFBP)
782 {
783 DbgEvent.enmCtx = DBGFEVENTCTX_RAW;
784 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_RAW, &DbgEvent.u, sizeof(DbgEvent.u.Bp));
785 }
786
787 return VERR_DBGF_IPE_1;
788}
789
790
791/**
792 * Returns whether the given vCPU is waiting for the debugger.
793 *
794 * @returns Flags whether the vCPU is currently waiting for the debugger.
795 * @param pUVCpu The user mode vCPU structure.
796 */
797DECLINLINE(bool) dbgfR3CpuIsHalted(PUVMCPU pUVCpu)
798{
799 return ASMAtomicReadBool(&pUVCpu->dbgf.s.fStopped);
800}
801
802
803/**
804 * Checks whether the given vCPU is waiting in the debugger.
805 *
806 * @returns Flag whether the indicated vCPU is halted, when VMCPUID_ALL
807 * is given true is returned when at least one vCPU is halted.
808 * @param pUVM The user mode VM structure.
809 * @param idCpu The CPU ID to check, VMCPUID_ALL to check all vCPUs.
810 */
811DECLINLINE(bool) dbgfR3CpuAreAnyHaltedByCpuId(PUVM pUVM, VMCPUID idCpu)
812{
813 AssertReturn(idCpu < pUVM->cCpus || idCpu == VMCPUID_ALL, false);
814
815 /* Check that either the given vCPU or all are actually halted. */
816 if (idCpu != VMCPUID_ALL)
817 return dbgfR3CpuIsHalted(&pUVM->aCpus[idCpu]);
818
819 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
820 if (dbgfR3CpuIsHalted(&pUVM->aCpus[i]))
821 return true;
822 return false;
823}
824
825
826/**
827 * Gets the pending debug command for this EMT/CPU, replacing it with
828 * DBGFCMD_NO_COMMAND.
829 *
830 * @returns Pending command.
831 * @param pUVCpu The user mode virtual CPU structure.
832 * @thread EMT(pUVCpu)
833 */
834DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu)
835{
836 DBGFCMD enmCmd = (DBGFCMD)ASMAtomicXchgU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, DBGFCMD_NO_COMMAND);
837 Log2(("DBGF: Getting command: %d\n", enmCmd));
838 return enmCmd;
839}
840
841
842/**
843 * Send a debug command to a CPU, making sure to notify it.
844 *
845 * @returns VBox status code.
846 * @param pUVCpu The user mode virtual CPU structure.
847 * @param enmCmd The command to submit to the CPU.
848 */
849DECLINLINE(int) dbgfR3CpuSetCmdAndNotify(PUVMCPU pUVCpu, DBGFCMD enmCmd)
850{
851 Log2(("DBGF: Setting command to %d\n", enmCmd));
852 Assert(enmCmd != DBGFCMD_NO_COMMAND);
853 AssertMsg(pUVCpu->dbgf.s.enmDbgfCmd == DBGFCMD_NO_COMMAND, ("enmCmd=%d enmDbgfCmd=%d\n", enmCmd, pUVCpu->dbgf.s.enmDbgfCmd));
854
855 ASMAtomicWriteU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, enmCmd);
856 VMCPU_FF_SET(pUVCpu->pVCpu, VMCPU_FF_DBGF);
857
858 VMR3NotifyCpuFFU(pUVCpu, 0 /*fFlags*/);
859 return VINF_SUCCESS;
860}
861
862
863/**
864 * Waits for the debugger to respond.
865 *
866 * @returns VBox status code. (clearify)
867 * @param pVCpu The cross context vCPU structure.
868 */
869static int dbgfR3CpuWait(PVMCPU pVCpu)
870{
871 PVM pVM = pVCpu->CTX_SUFF(pVM);
872 PUVMCPU pUVCpu = pVCpu->pUVCpu;
873
874 LogFlow(("dbgfR3CpuWait:\n"));
875 int rcRet = VINF_SUCCESS;
876
877 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, true);
878
879 /*
880 * Waits for the debugger to reply (i.e. issue an command).
881 */
882 for (;;)
883 {
884 /*
885 * Wait.
886 */
887 for (;;)
888 {
889 /*
890 * Process forced flags before we go sleep.
891 */
892 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_DBGF | VMCPU_FF_REQUEST)
893 || VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VMCPU_FF_REQUEST | VM_FF_CHECK_VM_STATE))
894 {
895 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF))
896 break;
897
898 int rc;
899 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
900 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
901 else if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
902 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
903 {
904 LogFlow(("dbgfR3CpuWait: Processes requests...\n"));
905 rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
906 if (rc == VINF_SUCCESS)
907 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
908 LogFlow(("dbgfR3CpuWait: VMR3ReqProcess -> %Rrc rcRet=%Rrc\n", rc, rcRet));
909 }
910 else if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
911 {
912 VMSTATE enmState = VMR3GetState(pVM);
913 switch (enmState)
914 {
915 case VMSTATE_FATAL_ERROR:
916 case VMSTATE_FATAL_ERROR_LS:
917 case VMSTATE_GURU_MEDITATION:
918 case VMSTATE_GURU_MEDITATION_LS:
919 rc = VINF_EM_SUSPEND;
920 break;
921 case VMSTATE_DESTROYING:
922 rc = VINF_EM_TERMINATE;
923 break;
924 default:
925 rc = VERR_DBGF_IPE_1;
926 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
927 }
928 }
929 else
930 rc = VINF_SUCCESS;
931 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
932 {
933 switch (rc)
934 {
935 case VINF_EM_DBG_BREAKPOINT:
936 case VINF_EM_DBG_STEPPED:
937 case VINF_EM_DBG_STEP:
938 case VINF_EM_DBG_STOP:
939 case VINF_EM_DBG_EVENT:
940 AssertMsgFailed(("rc=%Rrc\n", rc));
941 break;
942
943 /* return straight away */
944 case VINF_EM_TERMINATE:
945 case VINF_EM_OFF:
946 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
947 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
948 return rc;
949
950 /* remember return code. */
951 default:
952 AssertReleaseMsgFailed(("rc=%Rrc is not in the switch!\n", rc));
953 RT_FALL_THRU();
954 case VINF_EM_RESET:
955 case VINF_EM_SUSPEND:
956 case VINF_EM_HALT:
957 case VINF_EM_RESUME:
958 case VINF_EM_RESCHEDULE:
959 case VINF_EM_RESCHEDULE_REM:
960 case VINF_EM_RESCHEDULE_RAW:
961 if (rc < rcRet || rcRet == VINF_SUCCESS)
962 rcRet = rc;
963 break;
964 }
965 }
966 else if (RT_FAILURE(rc))
967 {
968 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
969 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
970 return rc;
971 }
972 }
973 else if (pVM->dbgf.s.fAttached)
974 {
975 int rc = VMR3WaitU(pUVCpu);
976 if (RT_FAILURE(rc))
977 {
978 LogFlow(("dbgfR3CpuWait: returns %Rrc (VMR3WaitU)\n", rc));
979 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
980 return rc;
981 }
982 }
983 else
984 {
985 LogFlow(("dbgfR3CpuWait: Debugger detached, continuing normal execution (%Rrc)\n", rcRet));
986 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
987 return rcRet;
988 }
989 }
990
991 /*
992 * Process the command.
993 */
994 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
995 bool fResumeExecution;
996 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
997 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
998 int rc = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
999 if (fResumeExecution)
1000 {
1001 if (RT_FAILURE(rc))
1002 rcRet = rc;
1003 else if ( rc >= VINF_EM_FIRST
1004 && rc <= VINF_EM_LAST
1005 && (rc < rcRet || rcRet == VINF_SUCCESS))
1006 rcRet = rc;
1007 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rcRet));
1008 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1009 return rcRet;
1010 }
1011 }
1012}
1013
1014
1015/**
1016 * Executes command from debugger.
1017 *
1018 * The caller is responsible for waiting or resuming execution based on the
1019 * value returned in the *pfResumeExecution indicator.
1020 *
1021 * @returns VBox status code. (clearify!)
1022 * @param pVCpu The cross context vCPU structure.
1023 * @param enmCmd The command in question.
1024 * @param pCmdData Pointer to the command data.
1025 * @param pfResumeExecution Where to store the resume execution / continue waiting indicator.
1026 */
1027static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution)
1028{
1029 RT_NOREF(pCmdData); /* for later */
1030
1031 /*
1032 * The cases in this switch returns directly if no event to send.
1033 */
1034 DBGFEVENTTYPE enmEvent;
1035 DBGFEVENTCTX enmCtx = DBGFEVENTCTX_INVALID;
1036 switch (enmCmd)
1037 {
1038 /*
1039 * Halt is answered by an event say that we've halted.
1040 */
1041 case DBGFCMD_HALT:
1042 {
1043 *pfResumeExecution = false;
1044 enmEvent = DBGFEVENT_HALT_DONE;
1045 break;
1046 }
1047
1048
1049 /*
1050 * Resume is not answered, we just resume execution.
1051 */
1052 case DBGFCMD_GO:
1053 {
1054 pVCpu->dbgf.s.fSingleSteppingRaw = false;
1055 *pfResumeExecution = true;
1056 return VINF_SUCCESS;
1057 }
1058
1059 /** @todo implement (and define) the rest of the commands. */
1060
1061 /*
1062 * Single step, with trace into.
1063 */
1064 case DBGFCMD_SINGLE_STEP:
1065 {
1066 Log2(("Single step\n"));
1067 PVM pVM = pVCpu->CTX_SUFF(pVM);
1068 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1069 {
1070 if (dbgfStepGetCurInstrType(pVM, pVCpu) == DBGFSTEPINSTRTYPE_CALL)
1071 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1072 }
1073 if (pVM->dbgf.s.SteppingFilter.cMaxSteps > 0)
1074 {
1075 pVCpu->dbgf.s.fSingleSteppingRaw = true;
1076 *pfResumeExecution = true;
1077 return VINF_EM_DBG_STEP;
1078 }
1079 /* Stop after zero steps. Nonsense, but whatever. */
1080 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1081 *pfResumeExecution = false;
1082 enmCtx = dbgfR3FigureEventCtx(pVCpu);
1083 enmEvent = enmCtx != DBGFEVENTCTX_HYPER ? DBGFEVENT_STEPPED : DBGFEVENT_STEPPED_HYPER;
1084 break;
1085 }
1086
1087 /*
1088 * Default is to send an invalid command event.
1089 */
1090 default:
1091 {
1092 *pfResumeExecution = false;
1093 enmEvent = DBGFEVENT_INVALID_COMMAND;
1094 break;
1095 }
1096 }
1097
1098 /*
1099 * Send the pending event.
1100 */
1101 Log2(("DBGF: Emulation thread: sending event %d\n", enmEvent));
1102 int rc = dbgfR3SendEventNoWait(pVCpu->CTX_SUFF(pVM), pVCpu, enmEvent, enmCtx);
1103 AssertRCStmt(rc, *pfResumeExecution = true);
1104 return rc;
1105}
1106
1107
1108/**
1109 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1110 * EMT rendezvous worker for DBGFR3Attach - only called on one EMT.}
1111 */
1112static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Attach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1113{
1114 PUVM pUVM = pVM->pUVM;
1115 int *prcAttach = (int *)pvUser;
1116 RT_NOREF(pVCpu);
1117
1118 if (pVM->dbgf.s.fAttached)
1119 {
1120 Log(("dbgfR3Attach: Debugger already attached\n"));
1121 *prcAttach = VERR_DBGF_ALREADY_ATTACHED;
1122 return VINF_SUCCESS;
1123 }
1124
1125 /*
1126 * The per-CPU bits.
1127 */
1128 for (uint32_t i = 0; i < pUVM->cCpus; i++)
1129 {
1130 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1131
1132 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1133 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1134 }
1135
1136 /*
1137 * Init of the VM -> Debugger communication part living in the global VM structure.
1138 */
1139 pUVM->dbgf.s.cDbgEvtMax = pVM->cCpus * 5 + 10; /* Initial size of event ring, increased when being full. */
1140 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1141 pUVM->dbgf.s.idxDbgEvtRead = 0;
1142 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1143 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1144 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1145 int rc;
1146 pUVM->dbgf.s.paDbgEvts = (PDBGFEVENT)MMR3HeapAllocU(pUVM, MM_TAG_DBGF, pUVM->dbgf.s.cDbgEvtMax * sizeof(DBGFEVENT));
1147 if (pUVM->dbgf.s.paDbgEvts)
1148 {
1149 rc = RTSemEventCreate(&pUVM->dbgf.s.hEvtWait);
1150 if (RT_SUCCESS(rc))
1151 {
1152 rc = RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxDbgEvtWr);
1153 if (RT_SUCCESS(rc))
1154 {
1155 rc = RTSemEventMultiCreate(&pUVM->dbgf.s.hEvtRingBufFull);
1156 if (RT_SUCCESS(rc))
1157 {
1158 /*
1159 * At last, set the attached flag.
1160 */
1161 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, true);
1162 *prcAttach = VINF_SUCCESS;
1163 return VINF_SUCCESS;
1164 }
1165
1166 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1167 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1168 }
1169 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1170 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1171 }
1172 }
1173 else
1174 rc = VERR_NO_MEMORY;
1175
1176 *prcAttach = rc;
1177 return VINF_SUCCESS;
1178}
1179
1180
1181/**
1182 * Attaches a debugger to the specified VM.
1183 *
1184 * Only one debugger at a time.
1185 *
1186 * @returns VBox status code.
1187 * @param pUVM The user mode VM handle.
1188 */
1189VMMR3DECL(int) DBGFR3Attach(PUVM pUVM)
1190{
1191 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1192 PVM pVM = pUVM->pVM;
1193 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1194
1195 /*
1196 * Call the VM, use EMT rendezvous for serialization.
1197 */
1198 int rcAttach = VERR_IPE_UNINITIALIZED_STATUS;
1199 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Attach, &rcAttach);
1200 if (RT_SUCCESS(rc))
1201 rc = rcAttach;
1202
1203 return rc;
1204}
1205
1206
1207/**
1208 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1209 * EMT rendezvous worker for DBGFR3Detach - called on all EMTs (why?).}
1210 */
1211static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Detach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1212{
1213 if (pVCpu->idCpu == 0)
1214 {
1215 PUVM pUVM = (PUVM)pvUser;
1216
1217 /*
1218 * Per-CPU cleanup.
1219 */
1220 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1221 {
1222 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1223
1224 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1225 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1226 }
1227
1228 /*
1229 * De-init of the VM -> Debugger communication part living in the global VM structure.
1230 */
1231 if (pUVM->dbgf.s.paDbgEvts)
1232 {
1233 MMR3HeapFree(pUVM->dbgf.s.paDbgEvts);
1234 pUVM->dbgf.s.paDbgEvts = NULL;
1235 }
1236
1237 if (pUVM->dbgf.s.hEvtWait != NIL_RTSEMEVENT)
1238 {
1239 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1240 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1241 }
1242
1243 if (pUVM->dbgf.s.hMtxDbgEvtWr != NIL_RTSEMFASTMUTEX)
1244 {
1245 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1246 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1247 }
1248
1249 if (pUVM->dbgf.s.hEvtRingBufFull != NIL_RTSEMEVENTMULTI)
1250 {
1251 RTSemEventMultiDestroy(pUVM->dbgf.s.hEvtRingBufFull);
1252 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1253 }
1254
1255 pUVM->dbgf.s.cDbgEvtMax = 0;
1256 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1257 pUVM->dbgf.s.idxDbgEvtRead = 0;
1258 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1259 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1260 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1261
1262 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, false);
1263 }
1264
1265 return VINF_SUCCESS;
1266}
1267
1268
1269/**
1270 * Detaches a debugger from the specified VM.
1271 *
1272 * Caller must be attached to the VM.
1273 *
1274 * @returns VBox status code.
1275 * @param pUVM The user mode VM handle.
1276 */
1277VMMR3DECL(int) DBGFR3Detach(PUVM pUVM)
1278{
1279 LogFlow(("DBGFR3Detach:\n"));
1280
1281 /*
1282 * Validate input. The UVM handle shall be valid, the VM handle might be
1283 * in the processes of being destroyed already, so deal quietly with that.
1284 */
1285 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1286 PVM pVM = pUVM->pVM;
1287 if (!VM_IS_VALID_EXT(pVM))
1288 return VERR_INVALID_VM_HANDLE;
1289
1290 /*
1291 * Check if attached.
1292 */
1293 if (!pVM->dbgf.s.fAttached)
1294 return VERR_DBGF_NOT_ATTACHED;
1295
1296 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Detach, pUVM);
1297}
1298
1299
1300/**
1301 * Wait for a debug event.
1302 *
1303 * @returns VBox status code. Will not return VBOX_INTERRUPTED.
1304 * @param pUVM The user mode VM handle.
1305 * @param cMillies Number of millis to wait.
1306 * @param pEvent Where to store the event data.
1307 */
1308VMMR3DECL(int) DBGFR3EventWait(PUVM pUVM, RTMSINTERVAL cMillies, PDBGFEVENT pEvent)
1309{
1310 /*
1311 * Check state.
1312 */
1313 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1314 PVM pVM = pUVM->pVM;
1315 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1316 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1317
1318 RT_BZERO(pEvent, sizeof(*pEvent));
1319
1320 /*
1321 * Wait for an event to arrive if there are none.
1322 */
1323 int rc = VINF_SUCCESS;
1324 uint32_t idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
1325 if (idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite))
1326 {
1327 do
1328 {
1329 rc = RTSemEventWait(pUVM->dbgf.s.hEvtWait, cMillies);
1330 } while ( RT_SUCCESS(rc)
1331 && idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1332 }
1333
1334 if (RT_SUCCESS(rc))
1335 {
1336 Assert(idxDbgEvtRead != ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1337
1338 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
1339 memcpy(pEvent, &pUVM->dbgf.s.paDbgEvts[idxDbgEvtRead % cDbgEvtMax], sizeof(*pEvent));
1340 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtRead, (idxDbgEvtRead + 1) % cDbgEvtMax);
1341 }
1342
1343 Log2(("DBGFR3EventWait: rc=%Rrc (event type %d)\n", rc, pEvent->enmType));
1344 return rc;
1345}
1346
1347
1348/**
1349 * Halts VM execution.
1350 *
1351 * After calling this the VM isn't actually halted till an DBGFEVENT_HALT_DONE
1352 * arrives. Until that time it's not possible to issue any new commands.
1353 *
1354 * @returns VBox status code.
1355 * @retval VWRN_DBGF_ALREADY_HALTED if @a idCpu is VMCPUID_ALL and all vCPUs
1356 * are halted.
1357 * @param pUVM The user mode VM handle.
1358 * @param idCpu The vCPU to halt, VMCPUID_ALL halts all still running vCPUs.
1359 */
1360VMMR3DECL(int) DBGFR3Halt(PUVM pUVM, VMCPUID idCpu)
1361{
1362 /*
1363 * Check state.
1364 */
1365 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1366 PVM pVM = pUVM->pVM;
1367 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1368 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1369 AssertReturn(idCpu == VMCPUID_ALL || idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1370
1371 /*
1372 * Halt the requested CPUs as needed.
1373 */
1374 int rc;
1375 if (idCpu != VMCPUID_ALL)
1376 {
1377 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1378 if (!dbgfR3CpuIsHalted(pUVCpu))
1379 {
1380 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1381 rc = VINF_SUCCESS;
1382 }
1383 else
1384 rc = VWRN_DBGF_ALREADY_HALTED;
1385 }
1386 else
1387 {
1388 rc = VWRN_DBGF_ALREADY_HALTED;
1389 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1390 {
1391 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1392 if (!dbgfR3CpuIsHalted(pUVCpu))
1393 {
1394 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1395 rc = VINF_SUCCESS;
1396 }
1397 }
1398 }
1399
1400 return rc;
1401}
1402
1403
1404/**
1405 * Checks if any of the specified vCPUs have been halted by the debugger.
1406 *
1407 * @returns True if at least one halted vCPUs.
1408 * @returns False if no halted vCPUs.
1409 * @param pUVM The user mode VM handle.
1410 * @param idCpu The CPU id to check for, VMCPUID_ALL will return true if
1411 * at least a single vCPU is halted in the debugger.
1412 */
1413VMMR3DECL(bool) DBGFR3IsHalted(PUVM pUVM, VMCPUID idCpu)
1414{
1415 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1416 PVM pVM = pUVM->pVM;
1417 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1418 AssertReturn(pVM->dbgf.s.fAttached, false);
1419
1420 return dbgfR3CpuAreAnyHaltedByCpuId(pUVM, idCpu);
1421}
1422
1423
1424/**
1425 * Checks if the debugger can wait for events or not.
1426 *
1427 * This function is only used by lazy, multiplexing debuggers. :-)
1428 *
1429 * @returns VBox status code.
1430 * @retval VINF_SUCCESS if waitable.
1431 * @retval VERR_SEM_OUT_OF_TURN if not waitable.
1432 * @retval VERR_INVALID_VM_HANDLE if the VM is being (/ has been) destroyed
1433 * (not asserted) or if the handle is invalid (asserted).
1434 * @retval VERR_DBGF_NOT_ATTACHED if not attached.
1435 *
1436 * @param pUVM The user mode VM handle.
1437 */
1438VMMR3DECL(int) DBGFR3QueryWaitable(PUVM pUVM)
1439{
1440 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1441
1442 /* Note! There is a slight race here, unfortunately. */
1443 PVM pVM = pUVM->pVM;
1444 if (!RT_VALID_PTR(pVM))
1445 return VERR_INVALID_VM_HANDLE;
1446 if (pVM->enmVMState >= VMSTATE_DESTROYING)
1447 return VERR_INVALID_VM_HANDLE;
1448 if (!pVM->dbgf.s.fAttached)
1449 return VERR_DBGF_NOT_ATTACHED;
1450
1451 /** @todo was: if (!RTSemPongShouldWait(...)) return VERR_SEM_OUT_OF_TURN; */
1452 return VINF_SUCCESS;
1453}
1454
1455
1456/**
1457 * Resumes VM execution.
1458 *
1459 * There is no receipt event on this command.
1460 *
1461 * @returns VBox status code.
1462 * @retval VWRN_DBGF_ALREADY_RUNNING if the specified vCPUs are all running.
1463 * @param pUVM The user mode VM handle.
1464 * @param idCpu The vCPU to resume, VMCPUID_ALL resumes all still halted vCPUs.
1465 */
1466VMMR3DECL(int) DBGFR3Resume(PUVM pUVM, VMCPUID idCpu)
1467{
1468 /*
1469 * Validate input and attachment state.
1470 */
1471 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1472 PVM pVM = pUVM->pVM;
1473 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1474 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1475
1476 /*
1477 * Ping the halted emulation threads, telling them to run.
1478 */
1479 int rc = VWRN_DBGF_ALREADY_RUNNING;
1480 if (idCpu != VMCPUID_ALL)
1481 {
1482 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1483 if (dbgfR3CpuIsHalted(pUVCpu))
1484 {
1485 rc = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1486 AssertRC(rc);
1487 }
1488 }
1489 else
1490 {
1491 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1492 {
1493 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1494 if (dbgfR3CpuIsHalted(pUVCpu))
1495 {
1496 int rc2 = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1497 AssertRC(rc2);
1498 if (rc == VWRN_DBGF_ALREADY_RUNNING || RT_FAILURE(rc2))
1499 rc = rc2;
1500 }
1501 }
1502 }
1503
1504 return rc;
1505}
1506
1507
1508/**
1509 * Classifies the current instruction.
1510 *
1511 * @returns Type of instruction.
1512 * @param pVM The cross context VM structure.
1513 * @param pVCpu The current CPU.
1514 * @thread EMT(pVCpu)
1515 */
1516static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu)
1517{
1518 /*
1519 * Read the instruction.
1520 */
1521 size_t cbRead = 0;
1522 uint8_t abOpcode[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1523 int rc = PGMR3DbgReadGCPtr(pVM, abOpcode, CPUMGetGuestFlatPC(pVCpu), sizeof(abOpcode) - 1, 0 /*fFlags*/, &cbRead);
1524 if (RT_SUCCESS(rc))
1525 {
1526 /*
1527 * Do minimal parsing. No real need to involve the disassembler here.
1528 */
1529 uint8_t *pb = abOpcode;
1530 for (;;)
1531 {
1532 switch (*pb++)
1533 {
1534 default:
1535 return DBGFSTEPINSTRTYPE_OTHER;
1536
1537 case 0xe8: /* call rel16/32 */
1538 case 0x9a: /* call farptr */
1539 case 0xcc: /* int3 */
1540 case 0xcd: /* int xx */
1541 // case 0xce: /* into */
1542 return DBGFSTEPINSTRTYPE_CALL;
1543
1544 case 0xc2: /* ret xx */
1545 case 0xc3: /* ret */
1546 case 0xca: /* retf xx */
1547 case 0xcb: /* retf */
1548 case 0xcf: /* iret */
1549 return DBGFSTEPINSTRTYPE_RET;
1550
1551 case 0xff:
1552 if ( ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 2 /* call indir */
1553 || ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 3) /* call indir-farptr */
1554 return DBGFSTEPINSTRTYPE_CALL;
1555 return DBGFSTEPINSTRTYPE_OTHER;
1556
1557 case 0x0f:
1558 switch (*pb++)
1559 {
1560 case 0x05: /* syscall */
1561 case 0x34: /* sysenter */
1562 return DBGFSTEPINSTRTYPE_CALL;
1563 case 0x07: /* sysret */
1564 case 0x35: /* sysexit */
1565 return DBGFSTEPINSTRTYPE_RET;
1566 }
1567 break;
1568
1569 /* Must handle some REX prefixes. So we do all normal prefixes. */
1570 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
1571 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
1572 if (!CPUMIsGuestIn64BitCode(pVCpu))
1573 return DBGFSTEPINSTRTYPE_OTHER;
1574 break;
1575
1576 case 0x2e: /* CS */
1577 case 0x36: /* SS */
1578 case 0x3e: /* DS */
1579 case 0x26: /* ES */
1580 case 0x64: /* FS */
1581 case 0x65: /* GS */
1582 case 0x66: /* op size */
1583 case 0x67: /* addr size */
1584 case 0xf0: /* lock */
1585 case 0xf2: /* REPNZ */
1586 case 0xf3: /* REPZ */
1587 break;
1588 }
1589 }
1590 }
1591
1592 return DBGFSTEPINSTRTYPE_INVALID;
1593}
1594
1595
1596/**
1597 * Checks if the stepping has reached a stop point.
1598 *
1599 * Called when raising a stepped event.
1600 *
1601 * @returns true if the event should be raised, false if we should take one more
1602 * step first.
1603 * @param pVM The cross context VM structure.
1604 * @param pVCpu The cross context per CPU structure of the calling EMT.
1605 * @thread EMT(pVCpu)
1606 */
1607static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu)
1608{
1609 /*
1610 * Check valid pVCpu and that it matches the CPU one stepping.
1611 */
1612 if (pVCpu)
1613 {
1614 if (pVCpu->idCpu == pVM->dbgf.s.SteppingFilter.idCpu)
1615 {
1616 /*
1617 * Increase the number of steps and see if we've reached the max.
1618 */
1619 pVM->dbgf.s.SteppingFilter.cSteps++;
1620 if (pVM->dbgf.s.SteppingFilter.cSteps < pVM->dbgf.s.SteppingFilter.cMaxSteps)
1621 {
1622 /*
1623 * Check PC and SP address filtering.
1624 */
1625 if (pVM->dbgf.s.SteppingFilter.fFlags & (DBGF_STEP_F_STOP_ON_ADDRESS | DBGF_STEP_F_STOP_ON_STACK_POP))
1626 {
1627 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1628 && pVM->dbgf.s.SteppingFilter.AddrPc == CPUMGetGuestFlatPC(pVCpu))
1629 return true;
1630 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1631 && CPUMGetGuestFlatSP(pVCpu) - pVM->dbgf.s.SteppingFilter.AddrStackPop
1632 < pVM->dbgf.s.SteppingFilter.cbStackPop)
1633 return true;
1634 }
1635
1636 /*
1637 * Do step-over filtering separate from the step-into one.
1638 */
1639 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1640 {
1641 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1642 switch (enmType)
1643 {
1644 default:
1645 if ( pVM->dbgf.s.SteppingFilter.uCallDepth != 0
1646 || (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_FILTER_MASK))
1647 break;
1648 return true;
1649 case DBGFSTEPINSTRTYPE_CALL:
1650 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1651 && pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1652 return true;
1653 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1654 break;
1655 case DBGFSTEPINSTRTYPE_RET:
1656 if (pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1657 {
1658 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1659 return true;
1660 /* If after return, we use the cMaxStep limit to stop the next time. */
1661 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1662 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1663 }
1664 else if (pVM->dbgf.s.SteppingFilter.uCallDepth > 0)
1665 pVM->dbgf.s.SteppingFilter.uCallDepth--;
1666 break;
1667 }
1668 return false;
1669 }
1670 /*
1671 * Filtered step-into.
1672 */
1673 else if ( pVM->dbgf.s.SteppingFilter.fFlags
1674 & (DBGF_STEP_F_STOP_ON_CALL | DBGF_STEP_F_STOP_ON_RET | DBGF_STEP_F_STOP_AFTER_RET))
1675 {
1676 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1677 switch (enmType)
1678 {
1679 default:
1680 break;
1681 case DBGFSTEPINSTRTYPE_CALL:
1682 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1683 return true;
1684 break;
1685 case DBGFSTEPINSTRTYPE_RET:
1686 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1687 return true;
1688 /* If after return, we use the cMaxStep limit to stop the next time. */
1689 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1690 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1691 break;
1692 }
1693 return false;
1694 }
1695 }
1696 }
1697 }
1698
1699 return true;
1700}
1701
1702
1703/**
1704 * Step Into.
1705 *
1706 * A single step event is generated from this command.
1707 * The current implementation is not reliable, so don't rely on the event coming.
1708 *
1709 * @returns VBox status code.
1710 * @param pUVM The user mode VM handle.
1711 * @param idCpu The ID of the CPU to single step on.
1712 */
1713VMMR3DECL(int) DBGFR3Step(PUVM pUVM, VMCPUID idCpu)
1714{
1715 return DBGFR3StepEx(pUVM, idCpu, DBGF_STEP_F_INTO, NULL, NULL, 0, 1);
1716}
1717
1718
1719/**
1720 * Full fleged step.
1721 *
1722 * This extended stepping API allows for doing multiple steps before raising an
1723 * event, helping implementing step over, step out and other more advanced
1724 * features.
1725 *
1726 * Like the DBGFR3Step() API, this will normally generate a DBGFEVENT_STEPPED or
1727 * DBGFEVENT_STEPPED_EVENT. However the stepping may be interrupted by other
1728 * events, which will abort the stepping.
1729 *
1730 * The stop on pop area feature is for safeguarding step out.
1731 *
1732 * Please note though, that it will always use stepping and never breakpoints.
1733 * While this allows for a much greater flexibility it can at times be rather
1734 * slow.
1735 *
1736 * @returns VBox status code.
1737 * @param pUVM The user mode VM handle.
1738 * @param idCpu The ID of the CPU to single step on.
1739 * @param fFlags Flags controlling the stepping, DBGF_STEP_F_XXX.
1740 * Either DBGF_STEP_F_INTO or DBGF_STEP_F_OVER must
1741 * always be specified.
1742 * @param pStopPcAddr Address to stop executing at. Completely ignored
1743 * unless DBGF_STEP_F_STOP_ON_ADDRESS is specified.
1744 * @param pStopPopAddr Stack address that SP must be lower than when
1745 * performing DBGF_STEP_F_STOP_ON_STACK_POP filtering.
1746 * @param cbStopPop The range starting at @a pStopPopAddr which is
1747 * considered to be within the same thread stack. Note
1748 * that the API allows @a pStopPopAddr and @a cbStopPop
1749 * to form an area that wraps around and it will
1750 * consider the part starting at 0 as included.
1751 * @param cMaxSteps The maximum number of steps to take. This is to
1752 * prevent stepping for ever, so passing UINT32_MAX is
1753 * not recommended.
1754 *
1755 * @remarks The two address arguments must be guest context virtual addresses,
1756 * or HMA. The code doesn't make much of a point of out HMA, though.
1757 */
1758VMMR3DECL(int) DBGFR3StepEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, PCDBGFADDRESS pStopPcAddr,
1759 PCDBGFADDRESS pStopPopAddr, RTGCUINTPTR cbStopPop, uint32_t cMaxSteps)
1760{
1761 /*
1762 * Check state.
1763 */
1764 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1765 PVM pVM = pUVM->pVM;
1766 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1767 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
1768 AssertReturn(!(fFlags & ~DBGF_STEP_F_VALID_MASK), VERR_INVALID_FLAGS);
1769 AssertReturn(RT_BOOL(fFlags & DBGF_STEP_F_INTO) != RT_BOOL(fFlags & DBGF_STEP_F_OVER), VERR_INVALID_FLAGS);
1770 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1771 {
1772 AssertReturn(RT_VALID_PTR(pStopPcAddr), VERR_INVALID_POINTER);
1773 AssertReturn(DBGFADDRESS_IS_VALID(pStopPcAddr), VERR_INVALID_PARAMETER);
1774 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPcAddr), VERR_INVALID_PARAMETER);
1775 }
1776 AssertReturn(!(fFlags & DBGF_STEP_F_STOP_ON_STACK_POP) || RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1777 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1778 {
1779 AssertReturn(RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1780 AssertReturn(DBGFADDRESS_IS_VALID(pStopPopAddr), VERR_INVALID_PARAMETER);
1781 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPopAddr), VERR_INVALID_PARAMETER);
1782 AssertReturn(cbStopPop > 0, VERR_INVALID_PARAMETER);
1783 }
1784
1785 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1786 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1787 if (RT_LIKELY(dbgfR3CpuIsHalted(pUVCpu)))
1788 { /* likely */ }
1789 else
1790 return VERR_SEM_OUT_OF_TURN;
1791 Assert(pVM->dbgf.s.SteppingFilter.idCpu == NIL_VMCPUID);
1792
1793 /*
1794 * Send the emulation thread a single-step command.
1795 */
1796 if (fFlags == DBGF_STEP_F_INTO)
1797 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1798 else
1799 pVM->dbgf.s.SteppingFilter.idCpu = idCpu;
1800 pVM->dbgf.s.SteppingFilter.fFlags = fFlags;
1801 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1802 pVM->dbgf.s.SteppingFilter.AddrPc = pStopPcAddr->FlatPtr;
1803 else
1804 pVM->dbgf.s.SteppingFilter.AddrPc = 0;
1805 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1806 {
1807 pVM->dbgf.s.SteppingFilter.AddrStackPop = pStopPopAddr->FlatPtr;
1808 pVM->dbgf.s.SteppingFilter.cbStackPop = cbStopPop;
1809 }
1810 else
1811 {
1812 pVM->dbgf.s.SteppingFilter.AddrStackPop = 0;
1813 pVM->dbgf.s.SteppingFilter.cbStackPop = RTGCPTR_MAX;
1814 }
1815
1816 pVM->dbgf.s.SteppingFilter.cMaxSteps = cMaxSteps;
1817 pVM->dbgf.s.SteppingFilter.cSteps = 0;
1818 pVM->dbgf.s.SteppingFilter.uCallDepth = 0;
1819
1820 Assert(dbgfR3CpuIsHalted(pUVCpu));
1821 return dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_SINGLE_STEP);
1822}
1823
1824
1825
1826/**
1827 * dbgfR3EventConfigEx argument packet.
1828 */
1829typedef struct DBGFR3EVENTCONFIGEXARGS
1830{
1831 PCDBGFEVENTCONFIG paConfigs;
1832 size_t cConfigs;
1833 int rc;
1834} DBGFR3EVENTCONFIGEXARGS;
1835/** Pointer to a dbgfR3EventConfigEx argument packet. */
1836typedef DBGFR3EVENTCONFIGEXARGS *PDBGFR3EVENTCONFIGEXARGS;
1837
1838
1839/**
1840 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Worker for DBGFR3EventConfigEx.}
1841 */
1842static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
1843{
1844 if (pVCpu->idCpu == 0)
1845 {
1846 PDBGFR3EVENTCONFIGEXARGS pArgs = (PDBGFR3EVENTCONFIGEXARGS)pvUser;
1847 DBGFEVENTCONFIG volatile const *paConfigs = pArgs->paConfigs;
1848 size_t cConfigs = pArgs->cConfigs;
1849
1850 /*
1851 * Apply the changes.
1852 */
1853 unsigned cChanges = 0;
1854 for (uint32_t i = 0; i < cConfigs; i++)
1855 {
1856 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
1857 AssertReturn(enmType >= DBGFEVENT_FIRST_SELECTABLE && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1858 if (paConfigs[i].fEnabled)
1859 cChanges += ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, enmType) == false;
1860 else
1861 cChanges += ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, enmType) == true;
1862 }
1863
1864 /*
1865 * Inform HM about changes.
1866 */
1867 if (cChanges > 0 && HMIsEnabled(pVM))
1868 {
1869 HMR3NotifyDebugEventChanged(pVM);
1870 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1871 }
1872 }
1873 else if (HMIsEnabled(pVM))
1874 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1875
1876 return VINF_SUCCESS;
1877}
1878
1879
1880/**
1881 * Configures (enables/disables) multiple selectable debug events.
1882 *
1883 * @returns VBox status code.
1884 * @param pUVM The user mode VM handle.
1885 * @param paConfigs The event to configure and their new state.
1886 * @param cConfigs Number of entries in @a paConfigs.
1887 */
1888VMMR3DECL(int) DBGFR3EventConfigEx(PUVM pUVM, PCDBGFEVENTCONFIG paConfigs, size_t cConfigs)
1889{
1890 /*
1891 * Validate input.
1892 */
1893 size_t i = cConfigs;
1894 while (i-- > 0)
1895 {
1896 AssertReturn(paConfigs[i].enmType >= DBGFEVENT_FIRST_SELECTABLE, VERR_INVALID_PARAMETER);
1897 AssertReturn(paConfigs[i].enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1898 }
1899 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1900 PVM pVM = pUVM->pVM;
1901 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1902
1903 /*
1904 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
1905 * can sync their data and execution with new debug state.
1906 */
1907 DBGFR3EVENTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
1908 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
1909 dbgfR3EventConfigEx, &Args);
1910 if (RT_SUCCESS(rc))
1911 rc = Args.rc;
1912 return rc;
1913}
1914
1915
1916/**
1917 * Enables or disables a selectable debug event.
1918 *
1919 * @returns VBox status code.
1920 * @param pUVM The user mode VM handle.
1921 * @param enmEvent The selectable debug event.
1922 * @param fEnabled The new state.
1923 */
1924VMMR3DECL(int) DBGFR3EventConfig(PUVM pUVM, DBGFEVENTTYPE enmEvent, bool fEnabled)
1925{
1926 /*
1927 * Convert to an array call.
1928 */
1929 DBGFEVENTCONFIG EvtCfg = { enmEvent, fEnabled };
1930 return DBGFR3EventConfigEx(pUVM, &EvtCfg, 1);
1931}
1932
1933
1934/**
1935 * Checks if the given selectable event is enabled.
1936 *
1937 * @returns true if enabled, false if not or invalid input.
1938 * @param pUVM The user mode VM handle.
1939 * @param enmEvent The selectable debug event.
1940 * @sa DBGFR3EventQuery
1941 */
1942VMMR3DECL(bool) DBGFR3EventIsEnabled(PUVM pUVM, DBGFEVENTTYPE enmEvent)
1943{
1944 /*
1945 * Validate input.
1946 */
1947 AssertReturn( enmEvent >= DBGFEVENT_HALT_DONE
1948 && enmEvent < DBGFEVENT_END, false);
1949 Assert( enmEvent >= DBGFEVENT_FIRST_SELECTABLE
1950 || enmEvent == DBGFEVENT_BREAKPOINT
1951 || enmEvent == DBGFEVENT_BREAKPOINT_IO
1952 || enmEvent == DBGFEVENT_BREAKPOINT_MMIO);
1953
1954 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1955 PVM pVM = pUVM->pVM;
1956 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1957
1958 /*
1959 * Check the event status.
1960 */
1961 return ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, enmEvent);
1962}
1963
1964
1965/**
1966 * Queries the status of a set of events.
1967 *
1968 * @returns VBox status code.
1969 * @param pUVM The user mode VM handle.
1970 * @param paConfigs The events to query and where to return the state.
1971 * @param cConfigs The number of elements in @a paConfigs.
1972 * @sa DBGFR3EventIsEnabled, DBGF_IS_EVENT_ENABLED
1973 */
1974VMMR3DECL(int) DBGFR3EventQuery(PUVM pUVM, PDBGFEVENTCONFIG paConfigs, size_t cConfigs)
1975{
1976 /*
1977 * Validate input.
1978 */
1979 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1980 PVM pVM = pUVM->pVM;
1981 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1982
1983 for (size_t i = 0; i < cConfigs; i++)
1984 {
1985 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
1986 AssertReturn( enmType >= DBGFEVENT_HALT_DONE
1987 && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1988 Assert( enmType >= DBGFEVENT_FIRST_SELECTABLE
1989 || enmType == DBGFEVENT_BREAKPOINT
1990 || enmType == DBGFEVENT_BREAKPOINT_IO
1991 || enmType == DBGFEVENT_BREAKPOINT_MMIO);
1992 paConfigs[i].fEnabled = ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, paConfigs[i].enmType);
1993 }
1994
1995 return VINF_SUCCESS;
1996}
1997
1998
1999/**
2000 * dbgfR3InterruptConfigEx argument packet.
2001 */
2002typedef struct DBGFR3INTERRUPTCONFIGEXARGS
2003{
2004 PCDBGFINTERRUPTCONFIG paConfigs;
2005 size_t cConfigs;
2006 int rc;
2007} DBGFR3INTERRUPTCONFIGEXARGS;
2008/** Pointer to a dbgfR3InterruptConfigEx argument packet. */
2009typedef DBGFR3INTERRUPTCONFIGEXARGS *PDBGFR3INTERRUPTCONFIGEXARGS;
2010
2011/**
2012 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
2013 * Worker for DBGFR3InterruptConfigEx.}
2014 */
2015static DECLCALLBACK(VBOXSTRICTRC) dbgfR3InterruptConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
2016{
2017 if (pVCpu->idCpu == 0)
2018 {
2019 PDBGFR3INTERRUPTCONFIGEXARGS pArgs = (PDBGFR3INTERRUPTCONFIGEXARGS)pvUser;
2020 PCDBGFINTERRUPTCONFIG paConfigs = pArgs->paConfigs;
2021 size_t cConfigs = pArgs->cConfigs;
2022
2023 /*
2024 * Apply the changes.
2025 */
2026 bool fChanged = false;
2027 bool fThis;
2028 for (uint32_t i = 0; i < cConfigs; i++)
2029 {
2030 /*
2031 * Hardware interrupts.
2032 */
2033 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2034 {
2035 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == false;
2036 if (fThis)
2037 {
2038 Assert(pVM->dbgf.s.cHardIntBreakpoints < 256);
2039 pVM->dbgf.s.cHardIntBreakpoints++;
2040 }
2041 }
2042 else if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_DISABLED)
2043 {
2044 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == true;
2045 if (fThis)
2046 {
2047 Assert(pVM->dbgf.s.cHardIntBreakpoints > 0);
2048 pVM->dbgf.s.cHardIntBreakpoints--;
2049 }
2050 }
2051
2052 /*
2053 * Software interrupts.
2054 */
2055 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2056 {
2057 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == false;
2058 if (fThis)
2059 {
2060 Assert(pVM->dbgf.s.cSoftIntBreakpoints < 256);
2061 pVM->dbgf.s.cSoftIntBreakpoints++;
2062 }
2063 }
2064 else if (paConfigs[i].enmSoftState == DBGFINTERRUPTSTATE_DISABLED)
2065 {
2066 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == true;
2067 if (fThis)
2068 {
2069 Assert(pVM->dbgf.s.cSoftIntBreakpoints > 0);
2070 pVM->dbgf.s.cSoftIntBreakpoints--;
2071 }
2072 }
2073 }
2074
2075 /*
2076 * Update the event bitmap entries.
2077 */
2078 if (pVM->dbgf.s.cHardIntBreakpoints > 0)
2079 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == false;
2080 else
2081 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == true;
2082
2083 if (pVM->dbgf.s.cSoftIntBreakpoints > 0)
2084 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == false;
2085 else
2086 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == true;
2087
2088 /*
2089 * Inform HM about changes.
2090 */
2091 if (fChanged && HMIsEnabled(pVM))
2092 {
2093 HMR3NotifyDebugEventChanged(pVM);
2094 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2095 }
2096 }
2097 else if (HMIsEnabled(pVM))
2098 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2099
2100 return VINF_SUCCESS;
2101}
2102
2103
2104/**
2105 * Changes
2106 *
2107 * @returns VBox status code.
2108 * @param pUVM The user mode VM handle.
2109 * @param paConfigs The events to query and where to return the state.
2110 * @param cConfigs The number of elements in @a paConfigs.
2111 * @sa DBGFR3InterruptConfigHardware, DBGFR3InterruptConfigSoftware
2112 */
2113VMMR3DECL(int) DBGFR3InterruptConfigEx(PUVM pUVM, PCDBGFINTERRUPTCONFIG paConfigs, size_t cConfigs)
2114{
2115 /*
2116 * Validate input.
2117 */
2118 size_t i = cConfigs;
2119 while (i-- > 0)
2120 {
2121 AssertReturn(paConfigs[i].enmHardState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2122 AssertReturn(paConfigs[i].enmSoftState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2123 }
2124
2125 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2126 PVM pVM = pUVM->pVM;
2127 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2128
2129 /*
2130 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
2131 * can sync their data and execution with new debug state.
2132 */
2133 DBGFR3INTERRUPTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
2134 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
2135 dbgfR3InterruptConfigEx, &Args);
2136 if (RT_SUCCESS(rc))
2137 rc = Args.rc;
2138 return rc;
2139}
2140
2141
2142/**
2143 * Configures interception of a hardware interrupt.
2144 *
2145 * @returns VBox status code.
2146 * @param pUVM The user mode VM handle.
2147 * @param iInterrupt The interrupt number.
2148 * @param fEnabled Whether interception is enabled or not.
2149 * @sa DBGFR3InterruptSoftwareConfig, DBGFR3InterruptConfigEx
2150 */
2151VMMR3DECL(int) DBGFR3InterruptHardwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2152{
2153 /*
2154 * Convert to DBGFR3InterruptConfigEx call.
2155 */
2156 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, (uint8_t)fEnabled, DBGFINTERRUPTSTATE_DONT_TOUCH };
2157 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2158}
2159
2160
2161/**
2162 * Configures interception of a software interrupt.
2163 *
2164 * @returns VBox status code.
2165 * @param pUVM The user mode VM handle.
2166 * @param iInterrupt The interrupt number.
2167 * @param fEnabled Whether interception is enabled or not.
2168 * @sa DBGFR3InterruptHardwareConfig, DBGFR3InterruptConfigEx
2169 */
2170VMMR3DECL(int) DBGFR3InterruptSoftwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2171{
2172 /*
2173 * Convert to DBGFR3InterruptConfigEx call.
2174 */
2175 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, DBGFINTERRUPTSTATE_DONT_TOUCH, (uint8_t)fEnabled };
2176 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2177}
2178
2179
2180/**
2181 * Checks whether interception is enabled for a hardware interrupt.
2182 *
2183 * @returns true if enabled, false if not or invalid input.
2184 * @param pUVM The user mode VM handle.
2185 * @param iInterrupt The interrupt number.
2186 * @sa DBGFR3InterruptSoftwareIsEnabled, DBGF_IS_HARDWARE_INT_ENABLED,
2187 * DBGF_IS_SOFTWARE_INT_ENABLED
2188 */
2189VMMR3DECL(int) DBGFR3InterruptHardwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2190{
2191 /*
2192 * Validate input.
2193 */
2194 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2195 PVM pVM = pUVM->pVM;
2196 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2197
2198 /*
2199 * Check it.
2200 */
2201 return ASMBitTest(&pVM->dbgf.s.bmHardIntBreakpoints, iInterrupt);
2202}
2203
2204
2205/**
2206 * Checks whether interception is enabled for a software interrupt.
2207 *
2208 * @returns true if enabled, false if not or invalid input.
2209 * @param pUVM The user mode VM handle.
2210 * @param iInterrupt The interrupt number.
2211 * @sa DBGFR3InterruptHardwareIsEnabled, DBGF_IS_SOFTWARE_INT_ENABLED,
2212 * DBGF_IS_HARDWARE_INT_ENABLED,
2213 */
2214VMMR3DECL(int) DBGFR3InterruptSoftwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2215{
2216 /*
2217 * Validate input.
2218 */
2219 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2220 PVM pVM = pUVM->pVM;
2221 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2222
2223 /*
2224 * Check it.
2225 */
2226 return ASMBitTest(&pVM->dbgf.s.bmSoftIntBreakpoints, iInterrupt);
2227}
2228
2229
2230
2231/**
2232 * Call this to single step programmatically.
2233 *
2234 * You must pass down the return code to the EM loop! That's
2235 * where the actual single stepping take place (at least in the
2236 * current implementation).
2237 *
2238 * @returns VINF_EM_DBG_STEP
2239 *
2240 * @param pVCpu The cross context virtual CPU structure.
2241 *
2242 * @thread VCpu EMT
2243 * @internal
2244 */
2245VMMR3_INT_DECL(int) DBGFR3PrgStep(PVMCPU pVCpu)
2246{
2247 VMCPU_ASSERT_EMT(pVCpu);
2248
2249 pVCpu->dbgf.s.fSingleSteppingRaw = true;
2250 return VINF_EM_DBG_STEP;
2251}
2252
2253
2254/**
2255 * Inject an NMI into a running VM (only VCPU 0!)
2256 *
2257 * @returns VBox status code.
2258 * @param pUVM The user mode VM structure.
2259 * @param idCpu The ID of the CPU to inject the NMI on.
2260 */
2261VMMR3DECL(int) DBGFR3InjectNMI(PUVM pUVM, VMCPUID idCpu)
2262{
2263 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2264 PVM pVM = pUVM->pVM;
2265 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2266 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
2267
2268 /** @todo Implement generic NMI injection. */
2269 /** @todo NEM: NMI injection */
2270 if (!HMIsEnabled(pVM))
2271 return VERR_NOT_SUP_BY_NEM;
2272
2273 VMCPU_FF_SET(pVM->apCpusR3[idCpu], VMCPU_FF_INTERRUPT_NMI);
2274 return VINF_SUCCESS;
2275}
2276
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette