VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/DBGF.cpp@ 87240

Last change on this file since 87240 was 86755, checked in by vboxsync, 4 years ago

VMM/DBGF,Debugger: Implement changes for the new breakpoint manager, bugref:9837

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 76.4 KB
Line 
1/* $Id: DBGF.cpp 86755 2020-10-29 08:30:25Z vboxsync $ */
2/** @file
3 * DBGF - Debugger Facility.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_dbgf DBGF - The Debugger Facility
20 *
21 * The purpose of the DBGF is to provide an interface for debuggers to
22 * manipulate the VMM without having to mess up the source code for each of
23 * them. The DBGF is always built in and will always work when a debugger
24 * attaches to the VM. The DBGF provides the basic debugger features, such as
25 * halting execution, handling breakpoints, single step execution, instruction
26 * disassembly, info querying, OS specific diggers, symbol and module
27 * management.
28 *
29 * The interface is working in a manner similar to the win32, linux and os2
30 * debugger interfaces. The interface has an asynchronous nature. This comes
31 * from the fact that the VMM and the Debugger are running in different threads.
32 * They are referred to as the "emulation thread" and the "debugger thread", or
33 * as the "ping thread" and the "pong thread, respectivly. (The last set of
34 * names comes from the use of the Ping-Pong synchronization construct from the
35 * RTSem API.)
36 *
37 * @see grp_dbgf
38 *
39 *
40 * @section sec_dbgf_scenario Usage Scenario
41 *
42 * The debugger starts by attaching to the VM. For practical reasons we limit the
43 * number of concurrently attached debuggers to 1 per VM. The action of
44 * attaching to the VM causes the VM to check and generate debug events.
45 *
46 * The debugger then will wait/poll for debug events and issue commands.
47 *
48 * The waiting and polling is done by the DBGFEventWait() function. It will wait
49 * for the emulation thread to send a ping, thus indicating that there is an
50 * event waiting to be processed.
51 *
52 * An event can be a response to a command issued previously, the hitting of a
53 * breakpoint, or running into a bad/fatal VMM condition. The debugger now has
54 * the ping and must respond to the event at hand - the VMM is waiting. This
55 * usually means that the user of the debugger must do something, but it doesn't
56 * have to. The debugger is free to call any DBGF function (nearly at least)
57 * while processing the event.
58 *
59 * Typically the user will issue a request for the execution to be resumed, so
60 * the debugger calls DBGFResume() and goes back to waiting/polling for events.
61 *
62 * When the user eventually terminates the debugging session or selects another
63 * VM, the debugger detaches from the VM. This means that breakpoints are
64 * disabled and that the emulation thread no longer polls for debugger commands.
65 *
66 */
67
68
69/*********************************************************************************************************************************
70* Header Files *
71*********************************************************************************************************************************/
72#define LOG_GROUP LOG_GROUP_DBGF
73#include <VBox/vmm/dbgf.h>
74#include <VBox/vmm/selm.h>
75#include <VBox/vmm/em.h>
76#include <VBox/vmm/hm.h>
77#include <VBox/vmm/mm.h>
78#include "DBGFInternal.h"
79#include <VBox/vmm/vm.h>
80#include <VBox/vmm/uvm.h>
81#include <VBox/err.h>
82
83#include <VBox/log.h>
84#include <iprt/semaphore.h>
85#include <iprt/thread.h>
86#include <iprt/asm.h>
87#include <iprt/time.h>
88#include <iprt/assert.h>
89#include <iprt/stream.h>
90#include <iprt/env.h>
91
92
93/*********************************************************************************************************************************
94* Structures and Typedefs *
95*********************************************************************************************************************************/
96/**
97 * Instruction type returned by dbgfStepGetCurInstrType.
98 */
99typedef enum DBGFSTEPINSTRTYPE
100{
101 DBGFSTEPINSTRTYPE_INVALID = 0,
102 DBGFSTEPINSTRTYPE_OTHER,
103 DBGFSTEPINSTRTYPE_RET,
104 DBGFSTEPINSTRTYPE_CALL,
105 DBGFSTEPINSTRTYPE_END,
106 DBGFSTEPINSTRTYPE_32BIT_HACK = 0x7fffffff
107} DBGFSTEPINSTRTYPE;
108
109
110/*********************************************************************************************************************************
111* Internal Functions *
112*********************************************************************************************************************************/
113DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx);
114DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu);
115static int dbgfR3CpuWait(PVMCPU pVCpu);
116static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution);
117static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu);
118static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu);
119
120
121
122/**
123 * Initializes the DBGF.
124 *
125 * @returns VBox status code.
126 * @param pVM The cross context VM structure.
127 */
128VMMR3_INT_DECL(int) DBGFR3Init(PVM pVM)
129{
130 PUVM pUVM = pVM->pUVM;
131 AssertCompile(sizeof(pUVM->dbgf.s) <= sizeof(pUVM->dbgf.padding));
132 AssertCompile(sizeof(pUVM->aCpus[0].dbgf.s) <= sizeof(pUVM->aCpus[0].dbgf.padding));
133
134 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
135
136 /*
137 * The usual sideways mountain climbing style of init:
138 */
139 int rc = dbgfR3InfoInit(pUVM); /* (First, initalizes the shared critical section.) */
140 if (RT_SUCCESS(rc))
141 {
142 rc = dbgfR3TraceInit(pVM);
143 if (RT_SUCCESS(rc))
144 {
145 rc = dbgfR3RegInit(pUVM);
146 if (RT_SUCCESS(rc))
147 {
148 rc = dbgfR3AsInit(pUVM);
149 if (RT_SUCCESS(rc))
150 {
151#ifndef VBOX_WITH_LOTS_OF_DBGF_BPS
152 rc = dbgfR3BpInit(pVM);
153#else
154 rc = dbgfR3BpInit(pUVM);
155#endif
156 if (RT_SUCCESS(rc))
157 {
158 rc = dbgfR3OSInit(pUVM);
159 if (RT_SUCCESS(rc))
160 {
161 rc = dbgfR3PlugInInit(pUVM);
162 if (RT_SUCCESS(rc))
163 {
164 rc = dbgfR3BugCheckInit(pVM);
165 if (RT_SUCCESS(rc))
166 {
167#ifdef VBOX_WITH_DBGF_TRACING
168 rc = dbgfR3TracerInit(pVM);
169#endif
170 if (RT_SUCCESS(rc))
171 {
172 return VINF_SUCCESS;
173 }
174 }
175 dbgfR3PlugInTerm(pUVM);
176 }
177 dbgfR3OSTermPart1(pUVM);
178 dbgfR3OSTermPart2(pUVM);
179 }
180#ifdef VBOX_WITH_LOTS_OF_DBGF_BPS
181 dbgfR3BpTerm(pUVM);
182#endif
183 }
184 dbgfR3AsTerm(pUVM);
185 }
186 dbgfR3RegTerm(pUVM);
187 }
188 dbgfR3TraceTerm(pVM);
189 }
190 dbgfR3InfoTerm(pUVM);
191 }
192 return rc;
193}
194
195
196/**
197 * Terminates and cleans up resources allocated by the DBGF.
198 *
199 * @returns VBox status code.
200 * @param pVM The cross context VM structure.
201 */
202VMMR3_INT_DECL(int) DBGFR3Term(PVM pVM)
203{
204 PUVM pUVM = pVM->pUVM;
205
206#ifdef VBOX_WITH_DBGF_TRACING
207 dbgfR3TracerTerm(pVM);
208#endif
209 dbgfR3OSTermPart1(pUVM);
210 dbgfR3PlugInTerm(pUVM);
211 dbgfR3OSTermPart2(pUVM);
212#ifdef VBOX_WITH_LOTS_OF_DBGF_BPS
213 dbgfR3BpTerm(pUVM);
214#endif
215 dbgfR3AsTerm(pUVM);
216 dbgfR3RegTerm(pUVM);
217 dbgfR3TraceTerm(pVM);
218 dbgfR3InfoTerm(pUVM);
219
220 return VINF_SUCCESS;
221}
222
223
224/**
225 * This is for tstCFGM and others to avoid trigger leak detection.
226 *
227 * @returns VBox status code.
228 * @param pUVM The user mode VM structure.
229 */
230VMMR3DECL(void) DBGFR3TermUVM(PUVM pUVM)
231{
232 dbgfR3InfoTerm(pUVM);
233}
234
235
236/**
237 * Called when the VM is powered off to detach debuggers.
238 *
239 * @param pVM The cross context VM structure.
240 */
241VMMR3_INT_DECL(void) DBGFR3PowerOff(PVM pVM)
242{
243 /*
244 * Send a termination event to any attached debugger.
245 */
246 if (pVM->dbgf.s.fAttached)
247 {
248 PVMCPU pVCpu = VMMGetCpu(pVM);
249 int rc = dbgfR3SendEventWait(pVM, pVCpu, DBGFEVENT_POWERING_OFF, DBGFEVENTCTX_OTHER);
250 AssertLogRelRC(rc);
251
252 /*
253 * Clear the FF so we won't get confused later on.
254 */
255 VM_FF_CLEAR(pVM, VM_FF_DBGF);
256 }
257}
258
259
260/**
261 * Applies relocations to data and code managed by this
262 * component. This function will be called at init and
263 * whenever the VMM need to relocate it self inside the GC.
264 *
265 * @param pVM The cross context VM structure.
266 * @param offDelta Relocation delta relative to old location.
267 */
268VMMR3_INT_DECL(void) DBGFR3Relocate(PVM pVM, RTGCINTPTR offDelta)
269{
270 dbgfR3TraceRelocate(pVM);
271 dbgfR3AsRelocate(pVM->pUVM, offDelta);
272}
273
274
275/**
276 * Waits a little while for a debuggger to attach.
277 *
278 * @returns True is a debugger have attached.
279 * @param pVM The cross context VM structure.
280 * @param pVCpu The cross context per CPU structure.
281 * @param enmEvent Event.
282 *
283 * @thread EMT(pVCpu)
284 */
285bool dbgfR3WaitForAttach(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
286{
287 /*
288 * First a message.
289 */
290#if !defined(DEBUG)
291 int cWait = 10;
292#else
293 int cWait = !VM_IS_RAW_MODE_ENABLED(pVM)
294 && ( enmEvent == DBGFEVENT_ASSERTION_HYPER
295 || enmEvent == DBGFEVENT_FATAL_ERROR)
296 && !RTEnvExist("VBOX_DBGF_WAIT_FOR_ATTACH")
297 ? 10
298 : 150;
299#endif
300 RTStrmPrintf(g_pStdErr, "DBGF: No debugger attached, waiting %d second%s for one to attach (event=%d)\n",
301 cWait / 10, cWait != 10 ? "s" : "", enmEvent);
302 RTStrmFlush(g_pStdErr);
303 while (cWait > 0)
304 {
305 RTThreadSleep(100);
306 if (pVM->dbgf.s.fAttached)
307 {
308 RTStrmPrintf(g_pStdErr, "Attached!\n");
309 RTStrmFlush(g_pStdErr);
310 return true;
311 }
312
313 /* Process rendezvous (debugger attaching involves such). */
314 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
315 {
316 int rc = VMMR3EmtRendezvousFF(pVM, pVCpu); AssertRC(rc);
317 if (rc != VINF_SUCCESS)
318 {
319 /** @todo Ignoring these could be bad. */
320 RTStrmPrintf(g_pStdErr, "[rcRendezvous=%Rrc, ignored!]", rc);
321 RTStrmFlush(g_pStdErr);
322 }
323 }
324
325 /* Process priority stuff. */
326 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
327 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
328 {
329 int rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, true /*fPriorityOnly*/);
330 if (rc == VINF_SUCCESS)
331 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, true /*fPriorityOnly*/);
332 if (rc != VINF_SUCCESS)
333 {
334 /** @todo Ignoring these could be bad. */
335 RTStrmPrintf(g_pStdErr, "[rcReq=%Rrc, ignored!]", rc);
336 RTStrmFlush(g_pStdErr);
337 }
338 }
339
340 /* next */
341 if (!(cWait % 10))
342 {
343 RTStrmPrintf(g_pStdErr, "%d.", cWait / 10);
344 RTStrmFlush(g_pStdErr);
345 }
346 cWait--;
347 }
348
349 RTStrmPrintf(g_pStdErr, "Stopping the VM!\n");
350 RTStrmFlush(g_pStdErr);
351 return false;
352}
353
354
355/**
356 * Forced action callback.
357 *
358 * The VMM will call this from it's main loop when either VM_FF_DBGF or
359 * VMCPU_FF_DBGF are set.
360 *
361 * The function checks for and executes pending commands from the debugger.
362 * Then it checks for pending debug events and serves these.
363 *
364 * @returns VINF_SUCCESS normally.
365 * @returns VERR_DBGF_RAISE_FATAL_ERROR to pretend a fatal error happened.
366 * @param pVM The cross context VM structure.
367 * @param pVCpu The cross context per CPU structure.
368 */
369VMMR3_INT_DECL(int) DBGFR3VMMForcedAction(PVM pVM, PVMCPU pVCpu)
370{
371 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
372
373 /*
374 * Dispatch pending events.
375 */
376 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_DBGF))
377 {
378 if ( pVCpu->dbgf.s.cEvents > 0
379 && pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT)
380 {
381 rcStrict = DBGFR3EventHandlePending(pVM, pVCpu);
382 /** @todo may end up with VERR_DBGF_NOT_ATTACHED here, which will prove fatal... */
383 }
384
385 /*
386 * Command pending? Process it.
387 */
388 PUVMCPU pUVCpu = pVCpu->pUVCpu;
389 if (pUVCpu->dbgf.s.enmDbgfCmd != DBGFCMD_NO_COMMAND)
390 {
391 bool fResumeExecution;
392 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
393 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
394 VBOXSTRICTRC rcStrict2 = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
395 if (!fResumeExecution)
396 rcStrict2 = dbgfR3CpuWait(pVCpu);
397 if ( rcStrict2 != VINF_SUCCESS
398 && ( rcStrict == VINF_SUCCESS
399 || RT_FAILURE(rcStrict2)
400 || rcStrict2 < rcStrict) ) /** @todo oversimplified? */
401 rcStrict = rcStrict2;
402 }
403 }
404
405 return VBOXSTRICTRC_TODO(rcStrict);
406}
407
408
409/**
410 * Try to determine the event context.
411 *
412 * @returns debug event context.
413 * @param pVCpu The cross context vCPU structure.
414 */
415static DBGFEVENTCTX dbgfR3FigureEventCtx(PVMCPU pVCpu)
416{
417 switch (EMGetState(pVCpu))
418 {
419 case EMSTATE_HM:
420 case EMSTATE_NEM:
421 case EMSTATE_DEBUG_GUEST_HM:
422 case EMSTATE_DEBUG_GUEST_NEM:
423 return DBGFEVENTCTX_HM;
424
425 case EMSTATE_IEM:
426 case EMSTATE_RAW:
427 case EMSTATE_IEM_THEN_REM:
428 case EMSTATE_DEBUG_GUEST_IEM:
429 case EMSTATE_DEBUG_GUEST_RAW:
430 return DBGFEVENTCTX_RAW;
431
432
433 case EMSTATE_REM:
434 case EMSTATE_DEBUG_GUEST_REM:
435 return DBGFEVENTCTX_REM;
436
437 case EMSTATE_DEBUG_HYPER:
438 case EMSTATE_GURU_MEDITATION:
439 return DBGFEVENTCTX_HYPER;
440
441 default:
442 return DBGFEVENTCTX_OTHER;
443 }
444}
445
446
447/**
448 * Sends the event to the debugger (i.e. adds it to the event ring buffer).
449 *
450 * @returns VBox status code.
451 * @param pVM The cross context VM structure.
452 * @param pVCpu The CPU sending the event.
453 * @param enmType The event type to send.
454 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
455 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
456 * @param cbPayload The size of the event payload, optional.
457 */
458static int dbgfR3SendEventWorker(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
459 void const *pvPayload, size_t cbPayload)
460{
461 PUVM pUVM = pVM->pUVM;
462 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID; /** @todo per vCPU stepping filter. */
463
464 /*
465 * Massage the input a little.
466 */
467 AssertStmt(cbPayload <= RT_SIZEOFMEMB(DBGFEVENT, u), cbPayload = RT_SIZEOFMEMB(DBGFEVENT, u));
468 if (enmCtx == DBGFEVENTCTX_INVALID)
469 enmCtx = dbgfR3FigureEventCtx(pVCpu);
470
471 /*
472 * Put the event into the ring buffer.
473 */
474 RTSemFastMutexRequest(pUVM->dbgf.s.hMtxDbgEvtWr);
475
476 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
477 uint32_t const idxDbgEvtWrite = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite);
478 uint32_t const idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
479 /** @todo Handle full buffer. */ RT_NOREF(idxDbgEvtRead);
480
481 PDBGFEVENT pEvent = &pUVM->dbgf.s.paDbgEvts[idxDbgEvtWrite % cDbgEvtMax];
482
483#ifdef DEBUG
484 ASMMemFill32(pEvent, sizeof(*pEvent), UINT32_C(0xdeadbeef));
485#endif
486 pEvent->enmType = enmType;
487 pEvent->enmCtx = enmCtx;
488 pEvent->idCpu = pVCpu->idCpu;
489 pEvent->uReserved = 0;
490 if (cbPayload)
491 memcpy(&pEvent->u, pvPayload, cbPayload);
492
493 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtWrite, (idxDbgEvtWrite + 1) % cDbgEvtMax);
494
495 RTSemFastMutexRelease(pUVM->dbgf.s.hMtxDbgEvtWr);
496
497 /*
498 * Signal the debugger.
499 */
500 return RTSemEventSignal(pUVM->dbgf.s.hEvtWait);
501}
502
503
504/**
505 * Send event and wait for the debugger to respond.
506 *
507 * @returns Strict VBox status code.
508 * @param pVM The cross context VM structure.
509 * @param pVCpu The CPU sending the event.
510 * @param enmType The event type to send.
511 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
512 */
513DECLINLINE(int) dbgfR3SendEventWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
514{
515 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
516 if (RT_SUCCESS(rc))
517 rc = dbgfR3CpuWait(pVCpu);
518 return rc;
519}
520
521
522/**
523 * Send event and wait for the debugger to respond, extended version.
524 *
525 * @returns Strict VBox status code.
526 * @param pVM The cross context VM structure.
527 * @param pVCpu The CPU sending the event.
528 * @param enmType The event type to send.
529 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
530 * @param pvPayload Event payload (DBGFEVENT::u data), optional.
531 * @param cbPayload The size of the event payload, optional.
532 */
533DECLINLINE(int) dbgfR3SendEventWaitEx(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx,
534 void const *pvPayload, size_t cbPayload)
535{
536 int rc = dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, pvPayload, cbPayload);
537 if (RT_SUCCESS(rc))
538 rc = dbgfR3CpuWait(pVCpu);
539 return rc;
540}
541
542
543/**
544 * Send event but do NOT wait for the debugger.
545 *
546 * Currently only used by dbgfR3CpuCmd().
547 *
548 * @param pVM The cross context VM structure.
549 * @param pVCpu The CPU sending the event.
550 * @param enmType The event type to send.
551 * @param enmCtx The event context, DBGFEVENTCTX_INVALID will be resolved.
552 */
553DECLINLINE(int) dbgfR3SendEventNoWait(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmType, DBGFEVENTCTX enmCtx)
554{
555 return dbgfR3SendEventWorker(pVM, pVCpu, enmType, enmCtx, NULL, 0);
556}
557
558
559/**
560 * The common event prologue code.
561 *
562 * It will make sure someone is attached, and perhaps process any high priority
563 * pending actions (none yet).
564 *
565 * @returns VBox status code.
566 * @param pVM The cross context VM structure.
567 * @param pVCpu The vCPU cross context structure.
568 * @param enmEvent The event to be sent.
569 */
570static int dbgfR3EventPrologue(PVM pVM, PVMCPU pVCpu, DBGFEVENTTYPE enmEvent)
571{
572 /*
573 * Check if a debugger is attached.
574 */
575 if ( !pVM->dbgf.s.fAttached
576 && !dbgfR3WaitForAttach(pVM, pVCpu, enmEvent))
577 {
578 Log(("dbgfR3EventPrologue: enmEvent=%d - debugger not attached\n", enmEvent));
579 return VERR_DBGF_NOT_ATTACHED;
580 }
581
582 /*
583 * Look thru pending commands and finish those which make sense now.
584 */
585 /** @todo Process/purge pending commands. */
586 //int rc = DBGFR3VMMForcedAction(pVM);
587 return VINF_SUCCESS;
588}
589
590
591/**
592 * Processes a pending event on the current CPU.
593 *
594 * This is called by EM in response to VINF_EM_DBG_EVENT.
595 *
596 * @returns Strict VBox status code.
597 * @param pVM The cross context VM structure.
598 * @param pVCpu The cross context per CPU structure.
599 *
600 * @thread EMT(pVCpu)
601 */
602VMMR3_INT_DECL(VBOXSTRICTRC) DBGFR3EventHandlePending(PVM pVM, PVMCPU pVCpu)
603{
604 VMCPU_ASSERT_EMT(pVCpu);
605 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
606
607 /*
608 * Check that we've got an event first.
609 */
610 AssertReturn(pVCpu->dbgf.s.cEvents > 0, VINF_SUCCESS);
611 AssertReturn(pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState == DBGFEVENTSTATE_CURRENT, VINF_SUCCESS);
612 PDBGFEVENT pEvent = &pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].Event;
613
614 /*
615 * Make sure we've got a debugger and is allowed to speak to it.
616 */
617 int rc = dbgfR3EventPrologue(pVM, pVCpu, pEvent->enmType);
618 if (RT_FAILURE(rc))
619 {
620 /** @todo drop them events? */
621 return rc; /** @todo this will cause trouble if we're here via an FF! */
622 }
623
624 /*
625 * Send the event and mark it as ignore.
626 * ASSUMES no new events get generate while dbgfR3CpuWait is executing!
627 */
628 VBOXSTRICTRC rcStrict = dbgfR3SendEventWaitEx(pVM, pVCpu, pEvent->enmType, pEvent->enmCtx, &pEvent->u, sizeof(pEvent->u));
629 pVCpu->dbgf.s.aEvents[pVCpu->dbgf.s.cEvents - 1].enmState = DBGFEVENTSTATE_IGNORE;
630 return rcStrict;
631}
632
633
634/**
635 * Send a generic debugger event which takes no data.
636 *
637 * @returns VBox status code.
638 * @param pVM The cross context VM structure.
639 * @param enmEvent The event to send.
640 * @internal
641 */
642VMMR3DECL(int) DBGFR3Event(PVM pVM, DBGFEVENTTYPE enmEvent)
643{
644 PVMCPU pVCpu = VMMGetCpu(pVM);
645 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
646
647 /*
648 * Do stepping filtering.
649 */
650 /** @todo Would be better if we did some of this inside the execution
651 * engines. */
652 if ( enmEvent == DBGFEVENT_STEPPED
653 || enmEvent == DBGFEVENT_STEPPED_HYPER)
654 {
655 if (!dbgfStepAreWeThereYet(pVM, pVCpu))
656 return VINF_EM_DBG_STEP;
657 }
658
659 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
660 if (RT_FAILURE(rc))
661 return rc;
662
663 /*
664 * Send the event and process the reply communication.
665 */
666 return dbgfR3SendEventWait(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID);
667}
668
669
670/**
671 * Send a debugger event which takes the full source file location.
672 *
673 * @returns VBox status code.
674 * @param pVM The cross context VM structure.
675 * @param enmEvent The event to send.
676 * @param pszFile Source file.
677 * @param uLine Line number in source file.
678 * @param pszFunction Function name.
679 * @param pszFormat Message which accompanies the event.
680 * @param ... Message arguments.
681 * @internal
682 */
683VMMR3DECL(int) DBGFR3EventSrc(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, ...)
684{
685 va_list args;
686 va_start(args, pszFormat);
687 int rc = DBGFR3EventSrcV(pVM, enmEvent, pszFile, uLine, pszFunction, pszFormat, args);
688 va_end(args);
689 return rc;
690}
691
692
693/**
694 * Send a debugger event which takes the full source file location.
695 *
696 * @returns VBox status code.
697 * @param pVM The cross context VM structure.
698 * @param enmEvent The event to send.
699 * @param pszFile Source file.
700 * @param uLine Line number in source file.
701 * @param pszFunction Function name.
702 * @param pszFormat Message which accompanies the event.
703 * @param args Message arguments.
704 * @internal
705 */
706VMMR3DECL(int) DBGFR3EventSrcV(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszFile, unsigned uLine, const char *pszFunction, const char *pszFormat, va_list args)
707{
708 PVMCPU pVCpu = VMMGetCpu(pVM);
709 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
710
711 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
712 if (RT_FAILURE(rc))
713 return rc;
714
715 /*
716 * Format the message.
717 */
718 char *pszMessage = NULL;
719 char szMessage[8192];
720 if (pszFormat && *pszFormat)
721 {
722 pszMessage = &szMessage[0];
723 RTStrPrintfV(szMessage, sizeof(szMessage), pszFormat, args);
724 }
725
726 /*
727 * Send the event and process the reply communication.
728 */
729 DBGFEVENT DbgEvent; /** @todo split up DBGFEVENT so we can skip the dead wait on the stack? */
730 DbgEvent.u.Src.pszFile = pszFile;
731 DbgEvent.u.Src.uLine = uLine;
732 DbgEvent.u.Src.pszFunction = pszFunction;
733 DbgEvent.u.Src.pszMessage = pszMessage;
734 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Src));
735}
736
737
738/**
739 * Send a debugger event which takes the two assertion messages.
740 *
741 * @returns VBox status code.
742 * @param pVM The cross context VM structure.
743 * @param enmEvent The event to send.
744 * @param pszMsg1 First assertion message.
745 * @param pszMsg2 Second assertion message.
746 */
747VMMR3_INT_DECL(int) DBGFR3EventAssertion(PVM pVM, DBGFEVENTTYPE enmEvent, const char *pszMsg1, const char *pszMsg2)
748{
749 PVMCPU pVCpu = VMMGetCpu(pVM);
750 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
751
752 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
753 if (RT_FAILURE(rc))
754 return rc;
755
756 /*
757 * Send the event and process the reply communication.
758 */
759 DBGFEVENT DbgEvent;
760 DbgEvent.u.Assert.pszMsg1 = pszMsg1;
761 DbgEvent.u.Assert.pszMsg2 = pszMsg2;
762 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_INVALID, &DbgEvent.u, sizeof(DbgEvent.u.Assert));
763}
764
765
766/**
767 * Breakpoint was hit somewhere.
768 * Figure out which breakpoint it is and notify the debugger.
769 *
770 * @returns VBox status code.
771 * @param pVM The cross context VM structure.
772 * @param enmEvent DBGFEVENT_BREAKPOINT_HYPER or DBGFEVENT_BREAKPOINT.
773 */
774VMMR3_INT_DECL(int) DBGFR3EventBreakpoint(PVM pVM, DBGFEVENTTYPE enmEvent)
775{
776 PVMCPU pVCpu = VMMGetCpu(pVM);
777 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
778
779 int rc = dbgfR3EventPrologue(pVM, pVCpu, enmEvent);
780 if (RT_FAILURE(rc))
781 return rc;
782
783 /*
784 * Send the event and process the reply communication.
785 */
786 DBGFEVENT DbgEvent;
787#ifndef VBOX_WITH_LOTS_OF_DBGF_BPS
788 RTUINT iBp = DbgEvent.u.Bp.hBp = pVCpu->dbgf.s.iActiveBp;
789 pVCpu->dbgf.s.iActiveBp = ~0U;
790 if (iBp != ~0U)
791 {
792 DbgEvent.enmCtx = DBGFEVENTCTX_RAW;
793 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_RAW, &DbgEvent.u, sizeof(DbgEvent.u.Bp));
794 }
795#else
796 DbgEvent.u.Bp.hBp = pVCpu->dbgf.s.hBpActive;
797 pVCpu->dbgf.s.hBpActive = NIL_DBGFBP;
798 if (DbgEvent.u.Bp.hBp != NIL_DBGFBP)
799 {
800 DbgEvent.enmCtx = DBGFEVENTCTX_RAW;
801 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_RAW, &DbgEvent.u, sizeof(DbgEvent.u.Bp));
802 }
803#endif
804
805#ifndef VBOX_WITH_LOTS_OF_DBGF_BPS
806 AssertFailed(); /** @todo this should be obsolete now... */
807
808 /* REM breakpoints has be been searched for. */
809#if 0 /** @todo get flat PC api! */
810 uint32_t eip = CPUMGetGuestEIP(pVM);
811#else
812 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
813 RTGCPTR eip = pCtx->rip + pCtx->cs.u64Base;
814#endif
815 for (size_t i = 0; i < RT_ELEMENTS(pVM->dbgf.s.aBreakpoints); i++)
816 if ( pVM->dbgf.s.aBreakpoints[i].enmType == DBGFBPTYPE_REM
817 && pVM->dbgf.s.aBreakpoints[i].u.Rem.GCPtr == eip)
818 {
819 DbgEvent.u.Bp.hBp = pVM->dbgf.s.aBreakpoints[i].iBp;
820 break;
821 }
822 AssertMsg(DbgEvent.u.Bp.hBp != ~0U, ("eip=%08x\n", eip));
823 return dbgfR3SendEventWaitEx(pVM, pVCpu, enmEvent, DBGFEVENTCTX_REM, &DbgEvent.u, sizeof(DbgEvent.u.Bp));
824#else
825 return VERR_DBGF_IPE_1;
826#endif
827}
828
829
830/**
831 * Returns whether the given vCPU is waiting for the debugger.
832 *
833 * @returns Flags whether the vCPU is currently waiting for the debugger.
834 * @param pUVCpu The user mode vCPU structure.
835 */
836DECLINLINE(bool) dbgfR3CpuIsHalted(PUVMCPU pUVCpu)
837{
838 return ASMAtomicReadBool(&pUVCpu->dbgf.s.fStopped);
839}
840
841
842/**
843 * Checks whether the given vCPU is waiting in the debugger.
844 *
845 * @returns Flag whether the indicated vCPU is halted, when VMCPUID_ALL
846 * is given true is returned when at least one vCPU is halted.
847 * @param pUVM The user mode VM structure.
848 * @param idCpu The CPU ID to check, VMCPUID_ALL to check all vCPUs.
849 */
850DECLINLINE(bool) dbgfR3CpuAreAnyHaltedByCpuId(PUVM pUVM, VMCPUID idCpu)
851{
852 AssertReturn(idCpu < pUVM->cCpus || idCpu == VMCPUID_ALL, false);
853
854 /* Check that either the given vCPU or all are actually halted. */
855 if (idCpu != VMCPUID_ALL)
856 return dbgfR3CpuIsHalted(&pUVM->aCpus[idCpu]);
857
858 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
859 if (dbgfR3CpuIsHalted(&pUVM->aCpus[i]))
860 return true;
861 return false;
862}
863
864
865/**
866 * Gets the pending debug command for this EMT/CPU, replacing it with
867 * DBGFCMD_NO_COMMAND.
868 *
869 * @returns Pending command.
870 * @param pUVCpu The user mode virtual CPU structure.
871 * @thread EMT(pUVCpu)
872 */
873DECLINLINE(DBGFCMD) dbgfR3CpuGetCmd(PUVMCPU pUVCpu)
874{
875 DBGFCMD enmCmd = (DBGFCMD)ASMAtomicXchgU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, DBGFCMD_NO_COMMAND);
876 Log2(("DBGF: Getting command: %d\n", enmCmd));
877 return enmCmd;
878}
879
880
881/**
882 * Send a debug command to a CPU, making sure to notify it.
883 *
884 * @returns VBox status code.
885 * @param pUVCpu The user mode virtual CPU structure.
886 * @param enmCmd The command to submit to the CPU.
887 */
888DECLINLINE(int) dbgfR3CpuSetCmdAndNotify(PUVMCPU pUVCpu, DBGFCMD enmCmd)
889{
890 Log2(("DBGF: Setting command to %d\n", enmCmd));
891 Assert(enmCmd != DBGFCMD_NO_COMMAND);
892 AssertMsg(pUVCpu->dbgf.s.enmDbgfCmd == DBGFCMD_NO_COMMAND, ("enmCmd=%d enmDbgfCmd=%d\n", enmCmd, pUVCpu->dbgf.s.enmDbgfCmd));
893
894 ASMAtomicWriteU32((uint32_t volatile *)(void *)&pUVCpu->dbgf.s.enmDbgfCmd, enmCmd);
895 VMCPU_FF_SET(pUVCpu->pVCpu, VMCPU_FF_DBGF);
896
897 VMR3NotifyCpuFFU(pUVCpu, 0 /*fFlags*/);
898 return VINF_SUCCESS;
899}
900
901
902/**
903 * Waits for the debugger to respond.
904 *
905 * @returns VBox status code. (clearify)
906 * @param pVCpu The cross context vCPU structure.
907 */
908static int dbgfR3CpuWait(PVMCPU pVCpu)
909{
910 PVM pVM = pVCpu->CTX_SUFF(pVM);
911 PUVMCPU pUVCpu = pVCpu->pUVCpu;
912
913 LogFlow(("dbgfR3CpuWait:\n"));
914 int rcRet = VINF_SUCCESS;
915
916 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, true);
917
918 /*
919 * Waits for the debugger to reply (i.e. issue an command).
920 */
921 for (;;)
922 {
923 /*
924 * Wait.
925 */
926 for (;;)
927 {
928 /*
929 * Process forced flags before we go sleep.
930 */
931 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_DBGF | VMCPU_FF_REQUEST)
932 || VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VMCPU_FF_REQUEST | VM_FF_CHECK_VM_STATE))
933 {
934 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF))
935 break;
936
937 int rc;
938 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
939 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
940 else if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
941 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
942 {
943 LogFlow(("dbgfR3CpuWait: Processes requests...\n"));
944 rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
945 if (rc == VINF_SUCCESS)
946 rc = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
947 LogFlow(("dbgfR3CpuWait: VMR3ReqProcess -> %Rrc rcRet=%Rrc\n", rc, rcRet));
948 }
949 else if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
950 {
951 VMSTATE enmState = VMR3GetState(pVM);
952 switch (enmState)
953 {
954 case VMSTATE_FATAL_ERROR:
955 case VMSTATE_FATAL_ERROR_LS:
956 case VMSTATE_GURU_MEDITATION:
957 case VMSTATE_GURU_MEDITATION_LS:
958 rc = VINF_EM_SUSPEND;
959 break;
960 case VMSTATE_DESTROYING:
961 rc = VINF_EM_TERMINATE;
962 break;
963 default:
964 rc = VERR_DBGF_IPE_1;
965 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
966 }
967 }
968 else
969 rc = VINF_SUCCESS;
970 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
971 {
972 switch (rc)
973 {
974 case VINF_EM_DBG_BREAKPOINT:
975 case VINF_EM_DBG_STEPPED:
976 case VINF_EM_DBG_STEP:
977 case VINF_EM_DBG_STOP:
978 case VINF_EM_DBG_EVENT:
979 AssertMsgFailed(("rc=%Rrc\n", rc));
980 break;
981
982 /* return straight away */
983 case VINF_EM_TERMINATE:
984 case VINF_EM_OFF:
985 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
986 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
987 return rc;
988
989 /* remember return code. */
990 default:
991 AssertReleaseMsgFailed(("rc=%Rrc is not in the switch!\n", rc));
992 RT_FALL_THRU();
993 case VINF_EM_RESET:
994 case VINF_EM_SUSPEND:
995 case VINF_EM_HALT:
996 case VINF_EM_RESUME:
997 case VINF_EM_RESCHEDULE:
998 case VINF_EM_RESCHEDULE_REM:
999 case VINF_EM_RESCHEDULE_RAW:
1000 if (rc < rcRet || rcRet == VINF_SUCCESS)
1001 rcRet = rc;
1002 break;
1003 }
1004 }
1005 else if (RT_FAILURE(rc))
1006 {
1007 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rc));
1008 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1009 return rc;
1010 }
1011 }
1012 else if (pVM->dbgf.s.fAttached)
1013 {
1014 int rc = VMR3WaitU(pUVCpu);
1015 if (RT_FAILURE(rc))
1016 {
1017 LogFlow(("dbgfR3CpuWait: returns %Rrc (VMR3WaitU)\n", rc));
1018 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1019 return rc;
1020 }
1021 }
1022 else
1023 {
1024 LogFlow(("dbgfR3CpuWait: Debugger detached, continuing normal execution (%Rrc)\n", rcRet));
1025 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1026 return rcRet;
1027 }
1028 }
1029
1030 /*
1031 * Process the command.
1032 */
1033 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_DBGF);
1034 bool fResumeExecution;
1035 DBGFCMDDATA CmdData = pUVCpu->dbgf.s.DbgfCmdData;
1036 DBGFCMD enmCmd = dbgfR3CpuGetCmd(pUVCpu);
1037 int rc = dbgfR3CpuCmd(pVCpu, enmCmd, &CmdData, &fResumeExecution);
1038 if (fResumeExecution)
1039 {
1040 if (RT_FAILURE(rc))
1041 rcRet = rc;
1042 else if ( rc >= VINF_EM_FIRST
1043 && rc <= VINF_EM_LAST
1044 && (rc < rcRet || rcRet == VINF_SUCCESS))
1045 rcRet = rc;
1046 LogFlow(("dbgfR3CpuWait: returns %Rrc\n", rcRet));
1047 ASMAtomicWriteBool(&pUVCpu->dbgf.s.fStopped, false);
1048 return rcRet;
1049 }
1050 }
1051}
1052
1053
1054/**
1055 * Executes command from debugger.
1056 *
1057 * The caller is responsible for waiting or resuming execution based on the
1058 * value returned in the *pfResumeExecution indicator.
1059 *
1060 * @returns VBox status code. (clearify!)
1061 * @param pVCpu The cross context vCPU structure.
1062 * @param enmCmd The command in question.
1063 * @param pCmdData Pointer to the command data.
1064 * @param pfResumeExecution Where to store the resume execution / continue waiting indicator.
1065 */
1066static int dbgfR3CpuCmd(PVMCPU pVCpu, DBGFCMD enmCmd, PDBGFCMDDATA pCmdData, bool *pfResumeExecution)
1067{
1068 RT_NOREF(pCmdData); /* for later */
1069
1070 /*
1071 * The cases in this switch returns directly if no event to send.
1072 */
1073 DBGFEVENTTYPE enmEvent;
1074 DBGFEVENTCTX enmCtx = DBGFEVENTCTX_INVALID;
1075 switch (enmCmd)
1076 {
1077 /*
1078 * Halt is answered by an event say that we've halted.
1079 */
1080 case DBGFCMD_HALT:
1081 {
1082 *pfResumeExecution = false;
1083 enmEvent = DBGFEVENT_HALT_DONE;
1084 break;
1085 }
1086
1087
1088 /*
1089 * Resume is not answered, we just resume execution.
1090 */
1091 case DBGFCMD_GO:
1092 {
1093 pVCpu->dbgf.s.fSingleSteppingRaw = false;
1094 *pfResumeExecution = true;
1095 return VINF_SUCCESS;
1096 }
1097
1098 /** @todo implement (and define) the rest of the commands. */
1099
1100 /*
1101 * Single step, with trace into.
1102 */
1103 case DBGFCMD_SINGLE_STEP:
1104 {
1105 Log2(("Single step\n"));
1106 PVM pVM = pVCpu->CTX_SUFF(pVM);
1107 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1108 {
1109 if (dbgfStepGetCurInstrType(pVM, pVCpu) == DBGFSTEPINSTRTYPE_CALL)
1110 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1111 }
1112 if (pVM->dbgf.s.SteppingFilter.cMaxSteps > 0)
1113 {
1114 pVCpu->dbgf.s.fSingleSteppingRaw = true;
1115 *pfResumeExecution = true;
1116 return VINF_EM_DBG_STEP;
1117 }
1118 /* Stop after zero steps. Nonsense, but whatever. */
1119 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1120 *pfResumeExecution = false;
1121 enmCtx = dbgfR3FigureEventCtx(pVCpu);
1122 enmEvent = enmCtx != DBGFEVENTCTX_HYPER ? DBGFEVENT_STEPPED : DBGFEVENT_STEPPED_HYPER;
1123 break;
1124 }
1125
1126 /*
1127 * Default is to send an invalid command event.
1128 */
1129 default:
1130 {
1131 *pfResumeExecution = false;
1132 enmEvent = DBGFEVENT_INVALID_COMMAND;
1133 break;
1134 }
1135 }
1136
1137 /*
1138 * Send the pending event.
1139 */
1140 Log2(("DBGF: Emulation thread: sending event %d\n", enmEvent));
1141 int rc = dbgfR3SendEventNoWait(pVCpu->CTX_SUFF(pVM), pVCpu, enmEvent, enmCtx);
1142 AssertRCStmt(rc, *pfResumeExecution = true);
1143 return rc;
1144}
1145
1146
1147/**
1148 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1149 * EMT rendezvous worker for DBGFR3Attach - only called on one EMT.}
1150 */
1151static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Attach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1152{
1153 PUVM pUVM = pVM->pUVM;
1154 int *prcAttach = (int *)pvUser;
1155 RT_NOREF(pVCpu);
1156
1157 if (pVM->dbgf.s.fAttached)
1158 {
1159 Log(("dbgfR3Attach: Debugger already attached\n"));
1160 *prcAttach = VERR_DBGF_ALREADY_ATTACHED;
1161 return VINF_SUCCESS;
1162 }
1163
1164 /*
1165 * The per-CPU bits.
1166 */
1167 for (uint32_t i = 0; i < pUVM->cCpus; i++)
1168 {
1169 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1170
1171 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1172 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1173 }
1174
1175 /*
1176 * Init of the VM -> Debugger communication part living in the global VM structure.
1177 */
1178 pUVM->dbgf.s.cDbgEvtMax = pVM->cCpus * 5 + 10; /* Initial size of event ring, increased when being full. */
1179 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1180 pUVM->dbgf.s.idxDbgEvtRead = 0;
1181 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1182 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1183 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1184 int rc;
1185 pUVM->dbgf.s.paDbgEvts = (PDBGFEVENT)MMR3HeapAllocU(pUVM, MM_TAG_DBGF, pUVM->dbgf.s.cDbgEvtMax * sizeof(DBGFEVENT));
1186 if (pUVM->dbgf.s.paDbgEvts)
1187 {
1188 rc = RTSemEventCreate(&pUVM->dbgf.s.hEvtWait);
1189 if (RT_SUCCESS(rc))
1190 {
1191 rc = RTSemFastMutexCreate(&pUVM->dbgf.s.hMtxDbgEvtWr);
1192 if (RT_SUCCESS(rc))
1193 {
1194 rc = RTSemEventMultiCreate(&pUVM->dbgf.s.hEvtRingBufFull);
1195 if (RT_SUCCESS(rc))
1196 {
1197 /*
1198 * At last, set the attached flag.
1199 */
1200 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, true);
1201 *prcAttach = VINF_SUCCESS;
1202 return VINF_SUCCESS;
1203 }
1204
1205 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1206 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1207 }
1208 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1209 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1210 }
1211 }
1212 else
1213 rc = VERR_NO_MEMORY;
1214
1215 *prcAttach = rc;
1216 return VINF_SUCCESS;
1217}
1218
1219
1220/**
1221 * Attaches a debugger to the specified VM.
1222 *
1223 * Only one debugger at a time.
1224 *
1225 * @returns VBox status code.
1226 * @param pUVM The user mode VM handle.
1227 */
1228VMMR3DECL(int) DBGFR3Attach(PUVM pUVM)
1229{
1230 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1231 PVM pVM = pUVM->pVM;
1232 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1233
1234 /*
1235 * Call the VM, use EMT rendezvous for serialization.
1236 */
1237 int rcAttach = VERR_IPE_UNINITIALIZED_STATUS;
1238 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Attach, &rcAttach);
1239 if (RT_SUCCESS(rc))
1240 rc = rcAttach;
1241
1242 return rc;
1243}
1244
1245
1246/**
1247 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
1248 * EMT rendezvous worker for DBGFR3Detach - called on all EMTs (why?).}
1249 */
1250static DECLCALLBACK(VBOXSTRICTRC) dbgfR3Detach(PVM pVM, PVMCPU pVCpu, void *pvUser)
1251{
1252 if (pVCpu->idCpu == 0)
1253 {
1254 PUVM pUVM = (PUVM)pvUser;
1255
1256 /*
1257 * Per-CPU cleanup.
1258 */
1259 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1260 {
1261 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1262
1263 pUVCpu->dbgf.s.enmDbgfCmd = DBGFCMD_NO_COMMAND;
1264 RT_ZERO(pUVCpu->dbgf.s.DbgfCmdData);
1265 }
1266
1267 /*
1268 * De-init of the VM -> Debugger communication part living in the global VM structure.
1269 */
1270 if (pUVM->dbgf.s.paDbgEvts)
1271 {
1272 MMR3HeapFree(pUVM->dbgf.s.paDbgEvts);
1273 pUVM->dbgf.s.paDbgEvts = NULL;
1274 }
1275
1276 if (pUVM->dbgf.s.hEvtWait != NIL_RTSEMEVENT)
1277 {
1278 RTSemEventDestroy(pUVM->dbgf.s.hEvtWait);
1279 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1280 }
1281
1282 if (pUVM->dbgf.s.hMtxDbgEvtWr != NIL_RTSEMFASTMUTEX)
1283 {
1284 RTSemFastMutexDestroy(pUVM->dbgf.s.hMtxDbgEvtWr);
1285 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1286 }
1287
1288 if (pUVM->dbgf.s.hEvtRingBufFull != NIL_RTSEMEVENTMULTI)
1289 {
1290 RTSemEventMultiDestroy(pUVM->dbgf.s.hEvtRingBufFull);
1291 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1292 }
1293
1294 pUVM->dbgf.s.cDbgEvtMax = 0;
1295 pUVM->dbgf.s.idxDbgEvtWrite = 0;
1296 pUVM->dbgf.s.idxDbgEvtRead = 0;
1297 pUVM->dbgf.s.hEvtWait = NIL_RTSEMEVENT;
1298 pUVM->dbgf.s.hEvtRingBufFull = NIL_RTSEMEVENTMULTI;
1299 pUVM->dbgf.s.hMtxDbgEvtWr = NIL_RTSEMFASTMUTEX;
1300
1301 ASMAtomicWriteBool(&pVM->dbgf.s.fAttached, false);
1302 }
1303
1304 return VINF_SUCCESS;
1305}
1306
1307
1308/**
1309 * Detaches a debugger from the specified VM.
1310 *
1311 * Caller must be attached to the VM.
1312 *
1313 * @returns VBox status code.
1314 * @param pUVM The user mode VM handle.
1315 */
1316VMMR3DECL(int) DBGFR3Detach(PUVM pUVM)
1317{
1318 LogFlow(("DBGFR3Detach:\n"));
1319
1320 /*
1321 * Validate input. The UVM handle shall be valid, the VM handle might be
1322 * in the processes of being destroyed already, so deal quietly with that.
1323 */
1324 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1325 PVM pVM = pUVM->pVM;
1326 if (!VM_IS_VALID_EXT(pVM))
1327 return VERR_INVALID_VM_HANDLE;
1328
1329 /*
1330 * Check if attached.
1331 */
1332 if (!pVM->dbgf.s.fAttached)
1333 return VERR_DBGF_NOT_ATTACHED;
1334
1335 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE | VMMEMTRENDEZVOUS_FLAGS_PRIORITY, dbgfR3Detach, pUVM);
1336}
1337
1338
1339/**
1340 * Wait for a debug event.
1341 *
1342 * @returns VBox status code. Will not return VBOX_INTERRUPTED.
1343 * @param pUVM The user mode VM handle.
1344 * @param cMillies Number of millis to wait.
1345 * @param pEvent Where to store the event data.
1346 */
1347VMMR3DECL(int) DBGFR3EventWait(PUVM pUVM, RTMSINTERVAL cMillies, PDBGFEVENT pEvent)
1348{
1349 /*
1350 * Check state.
1351 */
1352 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1353 PVM pVM = pUVM->pVM;
1354 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1355 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1356
1357 RT_BZERO(pEvent, sizeof(*pEvent));
1358
1359 /*
1360 * Wait for an event to arrive if there are none.
1361 */
1362 int rc = VINF_SUCCESS;
1363 uint32_t idxDbgEvtRead = ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtRead);
1364 if (idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite))
1365 {
1366 do
1367 {
1368 rc = RTSemEventWait(pUVM->dbgf.s.hEvtWait, cMillies);
1369 } while ( RT_SUCCESS(rc)
1370 && idxDbgEvtRead == ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1371 }
1372
1373 if (RT_SUCCESS(rc))
1374 {
1375 Assert(idxDbgEvtRead != ASMAtomicReadU32(&pUVM->dbgf.s.idxDbgEvtWrite));
1376
1377 uint32_t const cDbgEvtMax = RT_MAX(1, pUVM->dbgf.s.cDbgEvtMax);
1378 memcpy(pEvent, &pUVM->dbgf.s.paDbgEvts[idxDbgEvtRead % cDbgEvtMax], sizeof(*pEvent));
1379 ASMAtomicWriteU32(&pUVM->dbgf.s.idxDbgEvtRead, (idxDbgEvtRead + 1) % cDbgEvtMax);
1380 }
1381
1382 Log2(("DBGFR3EventWait: rc=%Rrc (event type %d)\n", rc, pEvent->enmType));
1383 return rc;
1384}
1385
1386
1387/**
1388 * Halts VM execution.
1389 *
1390 * After calling this the VM isn't actually halted till an DBGFEVENT_HALT_DONE
1391 * arrives. Until that time it's not possible to issue any new commands.
1392 *
1393 * @returns VBox status code.
1394 * @retval VWRN_DBGF_ALREADY_HALTED if @a idCpu is VMCPUID_ALL and all vCPUs
1395 * are halted.
1396 * @param pUVM The user mode VM handle.
1397 * @param idCpu The vCPU to halt, VMCPUID_ALL halts all still running vCPUs.
1398 */
1399VMMR3DECL(int) DBGFR3Halt(PUVM pUVM, VMCPUID idCpu)
1400{
1401 /*
1402 * Check state.
1403 */
1404 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1405 PVM pVM = pUVM->pVM;
1406 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1407 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1408 AssertReturn(idCpu == VMCPUID_ALL || idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1409
1410 /*
1411 * Halt the requested CPUs as needed.
1412 */
1413 int rc;
1414 if (idCpu != VMCPUID_ALL)
1415 {
1416 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1417 if (!dbgfR3CpuIsHalted(pUVCpu))
1418 {
1419 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1420 rc = VINF_SUCCESS;
1421 }
1422 else
1423 rc = VWRN_DBGF_ALREADY_HALTED;
1424 }
1425 else
1426 {
1427 rc = VWRN_DBGF_ALREADY_HALTED;
1428 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1429 {
1430 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1431 if (!dbgfR3CpuIsHalted(pUVCpu))
1432 {
1433 dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_HALT);
1434 rc = VINF_SUCCESS;
1435 }
1436 }
1437 }
1438
1439 return rc;
1440}
1441
1442
1443/**
1444 * Checks if any of the specified vCPUs have been halted by the debugger.
1445 *
1446 * @returns True if at least one halted vCPUs.
1447 * @returns False if no halted vCPUs.
1448 * @param pUVM The user mode VM handle.
1449 * @param idCpu The CPU id to check for, VMCPUID_ALL will return true if
1450 * at least a single vCPU is halted in the debugger.
1451 */
1452VMMR3DECL(bool) DBGFR3IsHalted(PUVM pUVM, VMCPUID idCpu)
1453{
1454 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1455 PVM pVM = pUVM->pVM;
1456 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1457 AssertReturn(pVM->dbgf.s.fAttached, false);
1458
1459 return dbgfR3CpuAreAnyHaltedByCpuId(pUVM, idCpu);
1460}
1461
1462
1463/**
1464 * Checks if the debugger can wait for events or not.
1465 *
1466 * This function is only used by lazy, multiplexing debuggers. :-)
1467 *
1468 * @returns VBox status code.
1469 * @retval VINF_SUCCESS if waitable.
1470 * @retval VERR_SEM_OUT_OF_TURN if not waitable.
1471 * @retval VERR_INVALID_VM_HANDLE if the VM is being (/ has been) destroyed
1472 * (not asserted) or if the handle is invalid (asserted).
1473 * @retval VERR_DBGF_NOT_ATTACHED if not attached.
1474 *
1475 * @param pUVM The user mode VM handle.
1476 */
1477VMMR3DECL(int) DBGFR3QueryWaitable(PUVM pUVM)
1478{
1479 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1480
1481 /* Note! There is a slight race here, unfortunately. */
1482 PVM pVM = pUVM->pVM;
1483 if (!RT_VALID_PTR(pVM))
1484 return VERR_INVALID_VM_HANDLE;
1485 if (pVM->enmVMState >= VMSTATE_DESTROYING)
1486 return VERR_INVALID_VM_HANDLE;
1487 if (!pVM->dbgf.s.fAttached)
1488 return VERR_DBGF_NOT_ATTACHED;
1489
1490 /** @todo was: if (!RTSemPongShouldWait(...)) return VERR_SEM_OUT_OF_TURN; */
1491 return VINF_SUCCESS;
1492}
1493
1494
1495/**
1496 * Resumes VM execution.
1497 *
1498 * There is no receipt event on this command.
1499 *
1500 * @returns VBox status code.
1501 * @retval VWRN_DBGF_ALREADY_RUNNING if the specified vCPUs are all running.
1502 * @param pUVM The user mode VM handle.
1503 * @param idCpu The vCPU to resume, VMCPUID_ALL resumes all still halted vCPUs.
1504 */
1505VMMR3DECL(int) DBGFR3Resume(PUVM pUVM, VMCPUID idCpu)
1506{
1507 /*
1508 * Validate input and attachment state.
1509 */
1510 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1511 PVM pVM = pUVM->pVM;
1512 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1513 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1514
1515 /*
1516 * Ping the halted emulation threads, telling them to run.
1517 */
1518 int rc = VWRN_DBGF_ALREADY_RUNNING;
1519 if (idCpu != VMCPUID_ALL)
1520 {
1521 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1522 if (dbgfR3CpuIsHalted(pUVCpu))
1523 {
1524 rc = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1525 AssertRC(rc);
1526 }
1527 }
1528 else
1529 {
1530 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
1531 {
1532 PUVMCPU pUVCpu = &pUVM->aCpus[i];
1533 if (dbgfR3CpuIsHalted(pUVCpu))
1534 {
1535 int rc2 = dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_GO);
1536 AssertRC(rc2);
1537 if (rc == VWRN_DBGF_ALREADY_RUNNING || RT_FAILURE(rc2))
1538 rc = rc2;
1539 }
1540 }
1541 }
1542
1543 return rc;
1544}
1545
1546
1547/**
1548 * Classifies the current instruction.
1549 *
1550 * @returns Type of instruction.
1551 * @param pVM The cross context VM structure.
1552 * @param pVCpu The current CPU.
1553 * @thread EMT(pVCpu)
1554 */
1555static DBGFSTEPINSTRTYPE dbgfStepGetCurInstrType(PVM pVM, PVMCPU pVCpu)
1556{
1557 /*
1558 * Read the instruction.
1559 */
1560 size_t cbRead = 0;
1561 uint8_t abOpcode[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1562 int rc = PGMR3DbgReadGCPtr(pVM, abOpcode, CPUMGetGuestFlatPC(pVCpu), sizeof(abOpcode) - 1, 0 /*fFlags*/, &cbRead);
1563 if (RT_SUCCESS(rc))
1564 {
1565 /*
1566 * Do minimal parsing. No real need to involve the disassembler here.
1567 */
1568 uint8_t *pb = abOpcode;
1569 for (;;)
1570 {
1571 switch (*pb++)
1572 {
1573 default:
1574 return DBGFSTEPINSTRTYPE_OTHER;
1575
1576 case 0xe8: /* call rel16/32 */
1577 case 0x9a: /* call farptr */
1578 case 0xcc: /* int3 */
1579 case 0xcd: /* int xx */
1580 // case 0xce: /* into */
1581 return DBGFSTEPINSTRTYPE_CALL;
1582
1583 case 0xc2: /* ret xx */
1584 case 0xc3: /* ret */
1585 case 0xca: /* retf xx */
1586 case 0xcb: /* retf */
1587 case 0xcf: /* iret */
1588 return DBGFSTEPINSTRTYPE_RET;
1589
1590 case 0xff:
1591 if ( ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 2 /* call indir */
1592 || ((*pb >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) == 3) /* call indir-farptr */
1593 return DBGFSTEPINSTRTYPE_CALL;
1594 return DBGFSTEPINSTRTYPE_OTHER;
1595
1596 case 0x0f:
1597 switch (*pb++)
1598 {
1599 case 0x05: /* syscall */
1600 case 0x34: /* sysenter */
1601 return DBGFSTEPINSTRTYPE_CALL;
1602 case 0x07: /* sysret */
1603 case 0x35: /* sysexit */
1604 return DBGFSTEPINSTRTYPE_RET;
1605 }
1606 break;
1607
1608 /* Must handle some REX prefixes. So we do all normal prefixes. */
1609 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
1610 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
1611 if (!CPUMIsGuestIn64BitCode(pVCpu))
1612 return DBGFSTEPINSTRTYPE_OTHER;
1613 break;
1614
1615 case 0x2e: /* CS */
1616 case 0x36: /* SS */
1617 case 0x3e: /* DS */
1618 case 0x26: /* ES */
1619 case 0x64: /* FS */
1620 case 0x65: /* GS */
1621 case 0x66: /* op size */
1622 case 0x67: /* addr size */
1623 case 0xf0: /* lock */
1624 case 0xf2: /* REPNZ */
1625 case 0xf3: /* REPZ */
1626 break;
1627 }
1628 }
1629 }
1630
1631 return DBGFSTEPINSTRTYPE_INVALID;
1632}
1633
1634
1635/**
1636 * Checks if the stepping has reached a stop point.
1637 *
1638 * Called when raising a stepped event.
1639 *
1640 * @returns true if the event should be raised, false if we should take one more
1641 * step first.
1642 * @param pVM The cross context VM structure.
1643 * @param pVCpu The cross context per CPU structure of the calling EMT.
1644 * @thread EMT(pVCpu)
1645 */
1646static bool dbgfStepAreWeThereYet(PVM pVM, PVMCPU pVCpu)
1647{
1648 /*
1649 * Check valid pVCpu and that it matches the CPU one stepping.
1650 */
1651 if (pVCpu)
1652 {
1653 if (pVCpu->idCpu == pVM->dbgf.s.SteppingFilter.idCpu)
1654 {
1655 /*
1656 * Increase the number of steps and see if we've reached the max.
1657 */
1658 pVM->dbgf.s.SteppingFilter.cSteps++;
1659 if (pVM->dbgf.s.SteppingFilter.cSteps < pVM->dbgf.s.SteppingFilter.cMaxSteps)
1660 {
1661 /*
1662 * Check PC and SP address filtering.
1663 */
1664 if (pVM->dbgf.s.SteppingFilter.fFlags & (DBGF_STEP_F_STOP_ON_ADDRESS | DBGF_STEP_F_STOP_ON_STACK_POP))
1665 {
1666 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1667 && pVM->dbgf.s.SteppingFilter.AddrPc == CPUMGetGuestFlatPC(pVCpu))
1668 return true;
1669 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1670 && CPUMGetGuestFlatSP(pVCpu) - pVM->dbgf.s.SteppingFilter.AddrStackPop
1671 < pVM->dbgf.s.SteppingFilter.cbStackPop)
1672 return true;
1673 }
1674
1675 /*
1676 * Do step-over filtering separate from the step-into one.
1677 */
1678 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_OVER)
1679 {
1680 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1681 switch (enmType)
1682 {
1683 default:
1684 if ( pVM->dbgf.s.SteppingFilter.uCallDepth != 0
1685 || (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_FILTER_MASK))
1686 break;
1687 return true;
1688 case DBGFSTEPINSTRTYPE_CALL:
1689 if ( (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1690 && pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1691 return true;
1692 pVM->dbgf.s.SteppingFilter.uCallDepth++;
1693 break;
1694 case DBGFSTEPINSTRTYPE_RET:
1695 if (pVM->dbgf.s.SteppingFilter.uCallDepth == 0)
1696 {
1697 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1698 return true;
1699 /* If after return, we use the cMaxStep limit to stop the next time. */
1700 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1701 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1702 }
1703 else if (pVM->dbgf.s.SteppingFilter.uCallDepth > 0)
1704 pVM->dbgf.s.SteppingFilter.uCallDepth--;
1705 break;
1706 }
1707 return false;
1708 }
1709 /*
1710 * Filtered step-into.
1711 */
1712 else if ( pVM->dbgf.s.SteppingFilter.fFlags
1713 & (DBGF_STEP_F_STOP_ON_CALL | DBGF_STEP_F_STOP_ON_RET | DBGF_STEP_F_STOP_AFTER_RET))
1714 {
1715 DBGFSTEPINSTRTYPE enmType = dbgfStepGetCurInstrType(pVM, pVCpu);
1716 switch (enmType)
1717 {
1718 default:
1719 break;
1720 case DBGFSTEPINSTRTYPE_CALL:
1721 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_CALL)
1722 return true;
1723 break;
1724 case DBGFSTEPINSTRTYPE_RET:
1725 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_ON_RET)
1726 return true;
1727 /* If after return, we use the cMaxStep limit to stop the next time. */
1728 if (pVM->dbgf.s.SteppingFilter.fFlags & DBGF_STEP_F_STOP_AFTER_RET)
1729 pVM->dbgf.s.SteppingFilter.cMaxSteps = pVM->dbgf.s.SteppingFilter.cSteps + 1;
1730 break;
1731 }
1732 return false;
1733 }
1734 }
1735 }
1736 }
1737
1738 return true;
1739}
1740
1741
1742/**
1743 * Step Into.
1744 *
1745 * A single step event is generated from this command.
1746 * The current implementation is not reliable, so don't rely on the event coming.
1747 *
1748 * @returns VBox status code.
1749 * @param pUVM The user mode VM handle.
1750 * @param idCpu The ID of the CPU to single step on.
1751 */
1752VMMR3DECL(int) DBGFR3Step(PUVM pUVM, VMCPUID idCpu)
1753{
1754 return DBGFR3StepEx(pUVM, idCpu, DBGF_STEP_F_INTO, NULL, NULL, 0, 1);
1755}
1756
1757
1758/**
1759 * Full fleged step.
1760 *
1761 * This extended stepping API allows for doing multiple steps before raising an
1762 * event, helping implementing step over, step out and other more advanced
1763 * features.
1764 *
1765 * Like the DBGFR3Step() API, this will normally generate a DBGFEVENT_STEPPED or
1766 * DBGFEVENT_STEPPED_EVENT. However the stepping may be interrupted by other
1767 * events, which will abort the stepping.
1768 *
1769 * The stop on pop area feature is for safeguarding step out.
1770 *
1771 * Please note though, that it will always use stepping and never breakpoints.
1772 * While this allows for a much greater flexibility it can at times be rather
1773 * slow.
1774 *
1775 * @returns VBox status code.
1776 * @param pUVM The user mode VM handle.
1777 * @param idCpu The ID of the CPU to single step on.
1778 * @param fFlags Flags controlling the stepping, DBGF_STEP_F_XXX.
1779 * Either DBGF_STEP_F_INTO or DBGF_STEP_F_OVER must
1780 * always be specified.
1781 * @param pStopPcAddr Address to stop executing at. Completely ignored
1782 * unless DBGF_STEP_F_STOP_ON_ADDRESS is specified.
1783 * @param pStopPopAddr Stack address that SP must be lower than when
1784 * performing DBGF_STEP_F_STOP_ON_STACK_POP filtering.
1785 * @param cbStopPop The range starting at @a pStopPopAddr which is
1786 * considered to be within the same thread stack. Note
1787 * that the API allows @a pStopPopAddr and @a cbStopPop
1788 * to form an area that wraps around and it will
1789 * consider the part starting at 0 as included.
1790 * @param cMaxSteps The maximum number of steps to take. This is to
1791 * prevent stepping for ever, so passing UINT32_MAX is
1792 * not recommended.
1793 *
1794 * @remarks The two address arguments must be guest context virtual addresses,
1795 * or HMA. The code doesn't make much of a point of out HMA, though.
1796 */
1797VMMR3DECL(int) DBGFR3StepEx(PUVM pUVM, VMCPUID idCpu, uint32_t fFlags, PCDBGFADDRESS pStopPcAddr,
1798 PCDBGFADDRESS pStopPopAddr, RTGCUINTPTR cbStopPop, uint32_t cMaxSteps)
1799{
1800 /*
1801 * Check state.
1802 */
1803 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1804 PVM pVM = pUVM->pVM;
1805 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1806 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_PARAMETER);
1807 AssertReturn(!(fFlags & ~DBGF_STEP_F_VALID_MASK), VERR_INVALID_FLAGS);
1808 AssertReturn(RT_BOOL(fFlags & DBGF_STEP_F_INTO) != RT_BOOL(fFlags & DBGF_STEP_F_OVER), VERR_INVALID_FLAGS);
1809 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1810 {
1811 AssertReturn(RT_VALID_PTR(pStopPcAddr), VERR_INVALID_POINTER);
1812 AssertReturn(DBGFADDRESS_IS_VALID(pStopPcAddr), VERR_INVALID_PARAMETER);
1813 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPcAddr), VERR_INVALID_PARAMETER);
1814 }
1815 AssertReturn(!(fFlags & DBGF_STEP_F_STOP_ON_STACK_POP) || RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1816 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1817 {
1818 AssertReturn(RT_VALID_PTR(pStopPopAddr), VERR_INVALID_POINTER);
1819 AssertReturn(DBGFADDRESS_IS_VALID(pStopPopAddr), VERR_INVALID_PARAMETER);
1820 AssertReturn(DBGFADDRESS_IS_VIRT_GC(pStopPopAddr), VERR_INVALID_PARAMETER);
1821 AssertReturn(cbStopPop > 0, VERR_INVALID_PARAMETER);
1822 }
1823
1824 AssertReturn(pVM->dbgf.s.fAttached, VERR_DBGF_NOT_ATTACHED);
1825 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
1826 if (RT_LIKELY(dbgfR3CpuIsHalted(pUVCpu)))
1827 { /* likely */ }
1828 else
1829 return VERR_SEM_OUT_OF_TURN;
1830 Assert(pVM->dbgf.s.SteppingFilter.idCpu == NIL_VMCPUID);
1831
1832 /*
1833 * Send the emulation thread a single-step command.
1834 */
1835 if (fFlags == DBGF_STEP_F_INTO)
1836 pVM->dbgf.s.SteppingFilter.idCpu = NIL_VMCPUID;
1837 else
1838 pVM->dbgf.s.SteppingFilter.idCpu = idCpu;
1839 pVM->dbgf.s.SteppingFilter.fFlags = fFlags;
1840 if (fFlags & DBGF_STEP_F_STOP_ON_ADDRESS)
1841 pVM->dbgf.s.SteppingFilter.AddrPc = pStopPcAddr->FlatPtr;
1842 else
1843 pVM->dbgf.s.SteppingFilter.AddrPc = 0;
1844 if (fFlags & DBGF_STEP_F_STOP_ON_STACK_POP)
1845 {
1846 pVM->dbgf.s.SteppingFilter.AddrStackPop = pStopPopAddr->FlatPtr;
1847 pVM->dbgf.s.SteppingFilter.cbStackPop = cbStopPop;
1848 }
1849 else
1850 {
1851 pVM->dbgf.s.SteppingFilter.AddrStackPop = 0;
1852 pVM->dbgf.s.SteppingFilter.cbStackPop = RTGCPTR_MAX;
1853 }
1854
1855 pVM->dbgf.s.SteppingFilter.cMaxSteps = cMaxSteps;
1856 pVM->dbgf.s.SteppingFilter.cSteps = 0;
1857 pVM->dbgf.s.SteppingFilter.uCallDepth = 0;
1858
1859 Assert(dbgfR3CpuIsHalted(pUVCpu));
1860 return dbgfR3CpuSetCmdAndNotify(pUVCpu, DBGFCMD_SINGLE_STEP);
1861}
1862
1863
1864
1865/**
1866 * dbgfR3EventConfigEx argument packet.
1867 */
1868typedef struct DBGFR3EVENTCONFIGEXARGS
1869{
1870 PCDBGFEVENTCONFIG paConfigs;
1871 size_t cConfigs;
1872 int rc;
1873} DBGFR3EVENTCONFIGEXARGS;
1874/** Pointer to a dbgfR3EventConfigEx argument packet. */
1875typedef DBGFR3EVENTCONFIGEXARGS *PDBGFR3EVENTCONFIGEXARGS;
1876
1877
1878/**
1879 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Worker for DBGFR3EventConfigEx.}
1880 */
1881static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
1882{
1883 if (pVCpu->idCpu == 0)
1884 {
1885 PDBGFR3EVENTCONFIGEXARGS pArgs = (PDBGFR3EVENTCONFIGEXARGS)pvUser;
1886 DBGFEVENTCONFIG volatile const *paConfigs = pArgs->paConfigs;
1887 size_t cConfigs = pArgs->cConfigs;
1888
1889 /*
1890 * Apply the changes.
1891 */
1892 unsigned cChanges = 0;
1893 for (uint32_t i = 0; i < cConfigs; i++)
1894 {
1895 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
1896 AssertReturn(enmType >= DBGFEVENT_FIRST_SELECTABLE && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1897 if (paConfigs[i].fEnabled)
1898 cChanges += ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, enmType) == false;
1899 else
1900 cChanges += ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, enmType) == true;
1901 }
1902
1903 /*
1904 * Inform HM about changes.
1905 */
1906 if (cChanges > 0 && HMIsEnabled(pVM))
1907 {
1908 HMR3NotifyDebugEventChanged(pVM);
1909 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1910 }
1911 }
1912 else if (HMIsEnabled(pVM))
1913 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
1914
1915 return VINF_SUCCESS;
1916}
1917
1918
1919/**
1920 * Configures (enables/disables) multiple selectable debug events.
1921 *
1922 * @returns VBox status code.
1923 * @param pUVM The user mode VM handle.
1924 * @param paConfigs The event to configure and their new state.
1925 * @param cConfigs Number of entries in @a paConfigs.
1926 */
1927VMMR3DECL(int) DBGFR3EventConfigEx(PUVM pUVM, PCDBGFEVENTCONFIG paConfigs, size_t cConfigs)
1928{
1929 /*
1930 * Validate input.
1931 */
1932 size_t i = cConfigs;
1933 while (i-- > 0)
1934 {
1935 AssertReturn(paConfigs[i].enmType >= DBGFEVENT_FIRST_SELECTABLE, VERR_INVALID_PARAMETER);
1936 AssertReturn(paConfigs[i].enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
1937 }
1938 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1939 PVM pVM = pUVM->pVM;
1940 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1941
1942 /*
1943 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
1944 * can sync their data and execution with new debug state.
1945 */
1946 DBGFR3EVENTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
1947 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
1948 dbgfR3EventConfigEx, &Args);
1949 if (RT_SUCCESS(rc))
1950 rc = Args.rc;
1951 return rc;
1952}
1953
1954
1955/**
1956 * Enables or disables a selectable debug event.
1957 *
1958 * @returns VBox status code.
1959 * @param pUVM The user mode VM handle.
1960 * @param enmEvent The selectable debug event.
1961 * @param fEnabled The new state.
1962 */
1963VMMR3DECL(int) DBGFR3EventConfig(PUVM pUVM, DBGFEVENTTYPE enmEvent, bool fEnabled)
1964{
1965 /*
1966 * Convert to an array call.
1967 */
1968 DBGFEVENTCONFIG EvtCfg = { enmEvent, fEnabled };
1969 return DBGFR3EventConfigEx(pUVM, &EvtCfg, 1);
1970}
1971
1972
1973/**
1974 * Checks if the given selectable event is enabled.
1975 *
1976 * @returns true if enabled, false if not or invalid input.
1977 * @param pUVM The user mode VM handle.
1978 * @param enmEvent The selectable debug event.
1979 * @sa DBGFR3EventQuery
1980 */
1981VMMR3DECL(bool) DBGFR3EventIsEnabled(PUVM pUVM, DBGFEVENTTYPE enmEvent)
1982{
1983 /*
1984 * Validate input.
1985 */
1986 AssertReturn( enmEvent >= DBGFEVENT_HALT_DONE
1987 && enmEvent < DBGFEVENT_END, false);
1988 Assert( enmEvent >= DBGFEVENT_FIRST_SELECTABLE
1989 || enmEvent == DBGFEVENT_BREAKPOINT
1990 || enmEvent == DBGFEVENT_BREAKPOINT_IO
1991 || enmEvent == DBGFEVENT_BREAKPOINT_MMIO);
1992
1993 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1994 PVM pVM = pUVM->pVM;
1995 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1996
1997 /*
1998 * Check the event status.
1999 */
2000 return ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, enmEvent);
2001}
2002
2003
2004/**
2005 * Queries the status of a set of events.
2006 *
2007 * @returns VBox status code.
2008 * @param pUVM The user mode VM handle.
2009 * @param paConfigs The events to query and where to return the state.
2010 * @param cConfigs The number of elements in @a paConfigs.
2011 * @sa DBGFR3EventIsEnabled, DBGF_IS_EVENT_ENABLED
2012 */
2013VMMR3DECL(int) DBGFR3EventQuery(PUVM pUVM, PDBGFEVENTCONFIG paConfigs, size_t cConfigs)
2014{
2015 /*
2016 * Validate input.
2017 */
2018 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2019 PVM pVM = pUVM->pVM;
2020 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2021
2022 for (size_t i = 0; i < cConfigs; i++)
2023 {
2024 DBGFEVENTTYPE enmType = paConfigs[i].enmType;
2025 AssertReturn( enmType >= DBGFEVENT_HALT_DONE
2026 && enmType < DBGFEVENT_END, VERR_INVALID_PARAMETER);
2027 Assert( enmType >= DBGFEVENT_FIRST_SELECTABLE
2028 || enmType == DBGFEVENT_BREAKPOINT
2029 || enmType == DBGFEVENT_BREAKPOINT_IO
2030 || enmType == DBGFEVENT_BREAKPOINT_MMIO);
2031 paConfigs[i].fEnabled = ASMBitTest(&pVM->dbgf.s.bmSelectedEvents, paConfigs[i].enmType);
2032 }
2033
2034 return VINF_SUCCESS;
2035}
2036
2037
2038/**
2039 * dbgfR3InterruptConfigEx argument packet.
2040 */
2041typedef struct DBGFR3INTERRUPTCONFIGEXARGS
2042{
2043 PCDBGFINTERRUPTCONFIG paConfigs;
2044 size_t cConfigs;
2045 int rc;
2046} DBGFR3INTERRUPTCONFIGEXARGS;
2047/** Pointer to a dbgfR3InterruptConfigEx argument packet. */
2048typedef DBGFR3INTERRUPTCONFIGEXARGS *PDBGFR3INTERRUPTCONFIGEXARGS;
2049
2050/**
2051 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
2052 * Worker for DBGFR3InterruptConfigEx.}
2053 */
2054static DECLCALLBACK(VBOXSTRICTRC) dbgfR3InterruptConfigEx(PVM pVM, PVMCPU pVCpu, void *pvUser)
2055{
2056 if (pVCpu->idCpu == 0)
2057 {
2058 PDBGFR3INTERRUPTCONFIGEXARGS pArgs = (PDBGFR3INTERRUPTCONFIGEXARGS)pvUser;
2059 PCDBGFINTERRUPTCONFIG paConfigs = pArgs->paConfigs;
2060 size_t cConfigs = pArgs->cConfigs;
2061
2062 /*
2063 * Apply the changes.
2064 */
2065 bool fChanged = false;
2066 bool fThis;
2067 for (uint32_t i = 0; i < cConfigs; i++)
2068 {
2069 /*
2070 * Hardware interrupts.
2071 */
2072 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2073 {
2074 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == false;
2075 if (fThis)
2076 {
2077 Assert(pVM->dbgf.s.cHardIntBreakpoints < 256);
2078 pVM->dbgf.s.cHardIntBreakpoints++;
2079 }
2080 }
2081 else if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_DISABLED)
2082 {
2083 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == true;
2084 if (fThis)
2085 {
2086 Assert(pVM->dbgf.s.cHardIntBreakpoints > 0);
2087 pVM->dbgf.s.cHardIntBreakpoints--;
2088 }
2089 }
2090
2091 /*
2092 * Software interrupts.
2093 */
2094 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED)
2095 {
2096 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == false;
2097 if (fThis)
2098 {
2099 Assert(pVM->dbgf.s.cSoftIntBreakpoints < 256);
2100 pVM->dbgf.s.cSoftIntBreakpoints++;
2101 }
2102 }
2103 else if (paConfigs[i].enmSoftState == DBGFINTERRUPTSTATE_DISABLED)
2104 {
2105 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == true;
2106 if (fThis)
2107 {
2108 Assert(pVM->dbgf.s.cSoftIntBreakpoints > 0);
2109 pVM->dbgf.s.cSoftIntBreakpoints--;
2110 }
2111 }
2112 }
2113
2114 /*
2115 * Update the event bitmap entries.
2116 */
2117 if (pVM->dbgf.s.cHardIntBreakpoints > 0)
2118 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == false;
2119 else
2120 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == true;
2121
2122 if (pVM->dbgf.s.cSoftIntBreakpoints > 0)
2123 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == false;
2124 else
2125 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == true;
2126
2127 /*
2128 * Inform HM about changes.
2129 */
2130 if (fChanged && HMIsEnabled(pVM))
2131 {
2132 HMR3NotifyDebugEventChanged(pVM);
2133 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2134 }
2135 }
2136 else if (HMIsEnabled(pVM))
2137 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu);
2138
2139 return VINF_SUCCESS;
2140}
2141
2142
2143/**
2144 * Changes
2145 *
2146 * @returns VBox status code.
2147 * @param pUVM The user mode VM handle.
2148 * @param paConfigs The events to query and where to return the state.
2149 * @param cConfigs The number of elements in @a paConfigs.
2150 * @sa DBGFR3InterruptConfigHardware, DBGFR3InterruptConfigSoftware
2151 */
2152VMMR3DECL(int) DBGFR3InterruptConfigEx(PUVM pUVM, PCDBGFINTERRUPTCONFIG paConfigs, size_t cConfigs)
2153{
2154 /*
2155 * Validate input.
2156 */
2157 size_t i = cConfigs;
2158 while (i-- > 0)
2159 {
2160 AssertReturn(paConfigs[i].enmHardState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2161 AssertReturn(paConfigs[i].enmSoftState <= DBGFINTERRUPTSTATE_DONT_TOUCH, VERR_INVALID_PARAMETER);
2162 }
2163
2164 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2165 PVM pVM = pUVM->pVM;
2166 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2167
2168 /*
2169 * Apply the changes in EMT(0) and rendezvous with the other CPUs so they
2170 * can sync their data and execution with new debug state.
2171 */
2172 DBGFR3INTERRUPTCONFIGEXARGS Args = { paConfigs, cConfigs, VINF_SUCCESS };
2173 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING | VMMEMTRENDEZVOUS_FLAGS_PRIORITY,
2174 dbgfR3InterruptConfigEx, &Args);
2175 if (RT_SUCCESS(rc))
2176 rc = Args.rc;
2177 return rc;
2178}
2179
2180
2181/**
2182 * Configures interception of a hardware interrupt.
2183 *
2184 * @returns VBox status code.
2185 * @param pUVM The user mode VM handle.
2186 * @param iInterrupt The interrupt number.
2187 * @param fEnabled Whether interception is enabled or not.
2188 * @sa DBGFR3InterruptSoftwareConfig, DBGFR3InterruptConfigEx
2189 */
2190VMMR3DECL(int) DBGFR3InterruptHardwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2191{
2192 /*
2193 * Convert to DBGFR3InterruptConfigEx call.
2194 */
2195 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, (uint8_t)fEnabled, DBGFINTERRUPTSTATE_DONT_TOUCH };
2196 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2197}
2198
2199
2200/**
2201 * Configures interception of a software interrupt.
2202 *
2203 * @returns VBox status code.
2204 * @param pUVM The user mode VM handle.
2205 * @param iInterrupt The interrupt number.
2206 * @param fEnabled Whether interception is enabled or not.
2207 * @sa DBGFR3InterruptHardwareConfig, DBGFR3InterruptConfigEx
2208 */
2209VMMR3DECL(int) DBGFR3InterruptSoftwareConfig(PUVM pUVM, uint8_t iInterrupt, bool fEnabled)
2210{
2211 /*
2212 * Convert to DBGFR3InterruptConfigEx call.
2213 */
2214 DBGFINTERRUPTCONFIG IntCfg = { iInterrupt, DBGFINTERRUPTSTATE_DONT_TOUCH, (uint8_t)fEnabled };
2215 return DBGFR3InterruptConfigEx(pUVM, &IntCfg, 1);
2216}
2217
2218
2219/**
2220 * Checks whether interception is enabled for a hardware interrupt.
2221 *
2222 * @returns true if enabled, false if not or invalid input.
2223 * @param pUVM The user mode VM handle.
2224 * @param iInterrupt The interrupt number.
2225 * @sa DBGFR3InterruptSoftwareIsEnabled, DBGF_IS_HARDWARE_INT_ENABLED,
2226 * DBGF_IS_SOFTWARE_INT_ENABLED
2227 */
2228VMMR3DECL(int) DBGFR3InterruptHardwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2229{
2230 /*
2231 * Validate input.
2232 */
2233 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2234 PVM pVM = pUVM->pVM;
2235 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2236
2237 /*
2238 * Check it.
2239 */
2240 return ASMBitTest(&pVM->dbgf.s.bmHardIntBreakpoints, iInterrupt);
2241}
2242
2243
2244/**
2245 * Checks whether interception is enabled for a software interrupt.
2246 *
2247 * @returns true if enabled, false if not or invalid input.
2248 * @param pUVM The user mode VM handle.
2249 * @param iInterrupt The interrupt number.
2250 * @sa DBGFR3InterruptHardwareIsEnabled, DBGF_IS_SOFTWARE_INT_ENABLED,
2251 * DBGF_IS_HARDWARE_INT_ENABLED,
2252 */
2253VMMR3DECL(int) DBGFR3InterruptSoftwareIsEnabled(PUVM pUVM, uint8_t iInterrupt)
2254{
2255 /*
2256 * Validate input.
2257 */
2258 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2259 PVM pVM = pUVM->pVM;
2260 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2261
2262 /*
2263 * Check it.
2264 */
2265 return ASMBitTest(&pVM->dbgf.s.bmSoftIntBreakpoints, iInterrupt);
2266}
2267
2268
2269
2270/**
2271 * Call this to single step programmatically.
2272 *
2273 * You must pass down the return code to the EM loop! That's
2274 * where the actual single stepping take place (at least in the
2275 * current implementation).
2276 *
2277 * @returns VINF_EM_DBG_STEP
2278 *
2279 * @param pVCpu The cross context virtual CPU structure.
2280 *
2281 * @thread VCpu EMT
2282 * @internal
2283 */
2284VMMR3_INT_DECL(int) DBGFR3PrgStep(PVMCPU pVCpu)
2285{
2286 VMCPU_ASSERT_EMT(pVCpu);
2287
2288 pVCpu->dbgf.s.fSingleSteppingRaw = true;
2289 return VINF_EM_DBG_STEP;
2290}
2291
2292
2293/**
2294 * Inject an NMI into a running VM (only VCPU 0!)
2295 *
2296 * @returns VBox status code.
2297 * @param pUVM The user mode VM structure.
2298 * @param idCpu The ID of the CPU to inject the NMI on.
2299 */
2300VMMR3DECL(int) DBGFR3InjectNMI(PUVM pUVM, VMCPUID idCpu)
2301{
2302 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2303 PVM pVM = pUVM->pVM;
2304 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2305 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
2306
2307 /** @todo Implement generic NMI injection. */
2308 /** @todo NEM: NMI injection */
2309 if (!HMIsEnabled(pVM))
2310 return VERR_NOT_SUP_BY_NEM;
2311
2312 VMCPU_FF_SET(pVM->apCpusR3[idCpu], VMCPU_FF_INTERRUPT_NMI);
2313 return VINF_SUCCESS;
2314}
2315
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette