VirtualBox

source: vbox/trunk/src/VBox/VMM/include/EMHandleRCTmpl.h@ 76561

Last change on this file since 76561 was 76561, checked in by vboxsync, 6 years ago

VMM: Use VMM_INCLUDED_SRC_ as header guard prefix for files in the VMM subtree, with the cpu profiles continue using the current VBOX_CPUDB_ prefix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.2 KB
Line 
1/* $Id: EMHandleRCTmpl.h 76561 2019-01-01 03:13:40Z vboxsync $ */
2/** @file
3 * EM - emR3[Raw|Hm|Nem]HandleRC template.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VMM_INCLUDED_SRC_include_EMHandleRCTmpl_h
19#define VMM_INCLUDED_SRC_include_EMHandleRCTmpl_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#if defined(EMHANDLERC_WITH_PATM) + defined(EMHANDLERC_WITH_HM) + defined(EMHANDLERC_WITH_NEM) != 1
25# error "Exactly one of these must be defined: EMHANDLERC_WITH_PATM, EMHANDLERC_WITH_HM, EMHANDLERC_WITH_NEM"
26#endif
27
28
29/**
30 * Process a subset of the raw-mode, HM and NEM return codes.
31 *
32 * Since we have to share this with raw-mode single stepping, this inline
33 * function has been created to avoid code duplication.
34 *
35 * @returns VINF_SUCCESS if it's ok to continue raw mode.
36 * @returns VBox status code to return to the EM main loop.
37 *
38 * @param pVM The cross context VM structure.
39 * @param pVCpu The cross context virtual CPU structure.
40 * @param rc The return code.
41 */
42#ifdef EMHANDLERC_WITH_PATM
43int emR3RawHandleRC(PVM pVM, PVMCPU pVCpu, int rc)
44#elif defined(EMHANDLERC_WITH_HM) || defined(DOXYGEN_RUNNING)
45int emR3HmHandleRC(PVM pVM, PVMCPU pVCpu, int rc)
46#elif defined(EMHANDLERC_WITH_NEM)
47int emR3NemHandleRC(PVM pVM, PVMCPU pVCpu, int rc)
48#endif
49{
50 switch (rc)
51 {
52 /*
53 * Common & simple ones.
54 */
55 case VINF_SUCCESS:
56 break;
57 case VINF_EM_RESCHEDULE_RAW:
58 case VINF_EM_RESCHEDULE_HM:
59 case VINF_EM_RAW_INTERRUPT:
60 case VINF_EM_RAW_TO_R3:
61 case VINF_EM_RAW_TIMER_PENDING:
62 case VINF_EM_PENDING_REQUEST:
63 rc = VINF_SUCCESS;
64 break;
65
66#ifdef EMHANDLERC_WITH_PATM
67 /*
68 * Privileged instruction.
69 */
70 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
71 case VINF_PATM_PATCH_TRAP_GP:
72 rc = emR3RawPrivileged(pVM, pVCpu);
73 break;
74
75 case VINF_EM_RAW_GUEST_TRAP:
76 /*
77 * Got a trap which needs dispatching.
78 */
79 if (PATMR3IsInsidePatchJump(pVM, pVCpu->cpum.GstCtx.eip, NULL))
80 {
81 AssertReleaseMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", CPUMGetGuestEIP(pVCpu)));
82 rc = VERR_EM_RAW_PATCH_CONFLICT;
83 break;
84 }
85 rc = emR3RawGuestTrap(pVM, pVCpu);
86 break;
87
88 /*
89 * Trap in patch code.
90 */
91 case VINF_PATM_PATCH_TRAP_PF:
92 case VINF_PATM_PATCH_INT3:
93 rc = emR3RawPatchTrap(pVM, pVCpu, rc);
94 break;
95
96 case VINF_PATM_DUPLICATE_FUNCTION:
97 Assert(PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip));
98 rc = PATMR3DuplicateFunctionRequest(pVM, &pVCpu->cpum.GstCtx);
99 AssertRC(rc);
100 rc = VINF_SUCCESS;
101 break;
102
103 case VINF_PATM_CHECK_PATCH_PAGE:
104 rc = PATMR3HandleMonitoredPage(pVM);
105 AssertRC(rc);
106 rc = VINF_SUCCESS;
107 break;
108
109 /*
110 * Patch manager.
111 */
112 case VERR_EM_RAW_PATCH_CONFLICT:
113 AssertReleaseMsgFailed(("%Rrc handling is not yet implemented\n", rc));
114 break;
115
116 /*
117 * Memory mapped I/O access - attempt to patch the instruction
118 */
119 case VINF_PATM_HC_MMIO_PATCH_READ:
120 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pVCpu->cpum.GstCtx.eip),
121 PATMFL_MMIO_ACCESS
122 | (CPUMGetGuestCodeBits(pVCpu) == 32 ? PATMFL_CODE32 : 0));
123 if (RT_FAILURE(rc))
124 rc = emR3ExecuteInstruction(pVM, pVCpu, "MMIO");
125 break;
126
127 case VINF_PATM_HC_MMIO_PATCH_WRITE:
128 AssertFailed(); /* not yet implemented. */
129 rc = emR3ExecuteInstruction(pVM, pVCpu, "MMIO");
130 break;
131#endif /* EMHANDLERC_WITH_PATM */
132
133#ifndef EMHANDLERC_WITH_NEM
134 /*
135 * Conflict or out of page tables.
136 *
137 * VM_FF_PGM_SYNC_CR3 is set by the hypervisor and all we need to
138 * do here is to execute the pending forced actions.
139 */
140 case VINF_PGM_SYNC_CR3:
141 AssertMsg(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL),
142 ("VINF_PGM_SYNC_CR3 and no VMCPU_FF_PGM_SYNC_CR3*!\n"));
143 rc = VINF_SUCCESS;
144 break;
145
146 /*
147 * PGM pool flush pending (guest SMP only).
148 */
149 /** @todo jumping back and forth between ring 0 and 3 can burn a lot of cycles
150 * if the EMT thread that's supposed to handle the flush is currently not active
151 * (e.g. waiting to be scheduled) -> fix this properly!
152 *
153 * bird: Since the clearing is global and done via a rendezvous any CPU can do
154 * it. They would have to choose who to call VMMR3EmtRendezvous and send
155 * the rest to VMMR3EmtRendezvousFF ... Hmm ... that's not going to work
156 * all that well since the latter will race the setup done by the
157 * first. Guess that means we need some new magic in that area for
158 * handling this case. :/
159 */
160 case VINF_PGM_POOL_FLUSH_PENDING:
161 rc = VINF_SUCCESS;
162 break;
163
164 /*
165 * Paging mode change.
166 */
167 case VINF_PGM_CHANGE_MODE:
168 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
169 rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
170 if (rc == VINF_SUCCESS)
171 rc = VINF_EM_RESCHEDULE;
172 AssertMsg(RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST), ("%Rrc\n", rc));
173 break;
174#endif /* !EMHANDLERC_WITH_NEM */
175
176#ifdef EMHANDLERC_WITH_PATM
177 /*
178 * CSAM wants to perform a task in ring-3. It has set an FF action flag.
179 */
180 case VINF_CSAM_PENDING_ACTION:
181 rc = VINF_SUCCESS;
182 break;
183
184 /*
185 * Invoked Interrupt gate - must directly (!) go to the recompiler.
186 */
187 case VINF_EM_RAW_INTERRUPT_PENDING:
188 case VINF_EM_RAW_RING_SWITCH_INT:
189 Assert(TRPMHasTrap(pVCpu));
190 Assert(!PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip));
191
192 if (TRPMHasTrap(pVCpu))
193 {
194 /* If the guest gate is marked unpatched, then we will check again if we can patch it. */
195 uint8_t u8Interrupt = TRPMGetTrapNo(pVCpu);
196 if (TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) == TRPM_INVALID_HANDLER)
197 {
198 CSAMR3CheckGates(pVM, u8Interrupt, 1);
199 Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8Interrupt, TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) != TRPM_INVALID_HANDLER));
200 /* Note: If it was successful, then we could go back to raw mode, but let's keep things simple for now. */
201 }
202 }
203 rc = VINF_EM_RESCHEDULE_REM;
204 break;
205
206 /*
207 * Other ring switch types.
208 */
209 case VINF_EM_RAW_RING_SWITCH:
210 rc = emR3RawRingSwitch(pVM, pVCpu);
211 break;
212#endif /* EMHANDLERC_WITH_PATM */
213
214 /*
215 * I/O Port access - emulate the instruction.
216 */
217 case VINF_IOM_R3_IOPORT_READ:
218 case VINF_IOM_R3_IOPORT_WRITE:
219 case VINF_EM_RESUME_R3_HISTORY_EXEC: /* Resume EMHistoryExec after VMCPU_FF_IOM. */
220 rc = emR3ExecuteIOInstruction(pVM, pVCpu);
221 break;
222
223 /*
224 * Execute pending I/O Port access.
225 */
226 case VINF_EM_PENDING_R3_IOPORT_WRITE:
227 rc = VBOXSTRICTRC_TODO(emR3ExecutePendingIoPortWrite(pVM, pVCpu));
228 break;
229 case VINF_EM_PENDING_R3_IOPORT_READ:
230 rc = VBOXSTRICTRC_TODO(emR3ExecutePendingIoPortRead(pVM, pVCpu));
231 break;
232
233 /*
234 * Memory mapped I/O access - emulate the instruction.
235 */
236 case VINF_IOM_R3_MMIO_READ:
237 case VINF_IOM_R3_MMIO_WRITE:
238 case VINF_IOM_R3_MMIO_READ_WRITE:
239 rc = emR3ExecuteInstruction(pVM, pVCpu, "MMIO");
240 break;
241
242 /*
243 * Machine specific register access - emulate the instruction.
244 */
245 case VINF_CPUM_R3_MSR_READ:
246 case VINF_CPUM_R3_MSR_WRITE:
247 rc = emR3ExecuteInstruction(pVM, pVCpu, "MSR");
248 break;
249
250 /*
251 * GIM hypercall.
252 */
253 case VINF_GIM_R3_HYPERCALL:
254 rc = emR3ExecuteInstruction(pVM, pVCpu, "Hypercall");
255 break;
256
257#ifdef EMHANDLERC_WITH_HM
258 case VINF_EM_HM_PATCH_TPR_INSTR:
259 rc = HMR3PatchTprInstr(pVM, pVCpu);
260 break;
261#endif
262
263#ifdef EMHANDLERC_WITH_PATM
264 /*
265 * Execute instruction.
266 */
267 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
268 rc = emR3ExecuteInstruction(pVM, pVCpu, "LDT FAULT: ");
269 break;
270 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
271 rc = emR3ExecuteInstruction(pVM, pVCpu, "GDT FAULT: ");
272 break;
273 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
274 rc = emR3ExecuteInstruction(pVM, pVCpu, "IDT FAULT: ");
275 break;
276 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
277 rc = emR3ExecuteInstruction(pVM, pVCpu, "TSS FAULT: ");
278 break;
279
280 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
281 rc = emR3ExecuteInstruction(pVM, pVCpu, "EMUL: ", VINF_PATM_PENDING_IRQ_AFTER_IRET);
282 break;
283
284 case VINF_PATCH_EMULATE_INSTR:
285#else
286 case VINF_EM_RAW_GUEST_TRAP:
287#endif
288 case VINF_EM_RAW_EMULATE_INSTR:
289 rc = emR3ExecuteInstruction(pVM, pVCpu, "EMUL: ");
290 break;
291
292 case VINF_EM_RAW_INJECT_TRPM_EVENT:
293 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
294 rc = VBOXSTRICTRC_VAL(IEMInjectTrpmEvent(pVCpu));
295 /* The following condition should be removed when IEM_IMPLEMENTS_TASKSWITCH becomes true. */
296 if (rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
297 rc = emR3ExecuteInstruction(pVM, pVCpu, "EVENT: ");
298 break;
299
300
301#ifdef EMHANDLERC_WITH_PATM
302 /*
303 * Stale selector and iret traps => REM.
304 */
305 case VINF_EM_RAW_STALE_SELECTOR:
306 case VINF_EM_RAW_IRET_TRAP:
307 /* We will not go to the recompiler if EIP points to patch code. */
308 if (PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip))
309 {
310 pVCpu->cpum.GstCtx.eip = PATMR3PatchToGCPtr(pVM, (RTGCPTR)pVCpu->cpum.GstCtx.eip, 0);
311 }
312 LogFlow(("emR3RawHandleRC: %Rrc -> %Rrc\n", rc, VINF_EM_RESCHEDULE_REM));
313 rc = VINF_EM_RESCHEDULE_REM;
314 break;
315
316 /*
317 * Conflict in GDT, resync and continue.
318 */
319 case VINF_SELM_SYNC_GDT:
320 AssertMsg(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_TSS),
321 ("VINF_SELM_SYNC_GDT without VMCPU_FF_SELM_SYNC_GDT/LDT/TSS!\n"));
322 rc = VINF_SUCCESS;
323 break;
324#endif
325
326 /*
327 * Up a level.
328 */
329 case VINF_EM_TERMINATE:
330 case VINF_EM_OFF:
331 case VINF_EM_RESET:
332 case VINF_EM_SUSPEND:
333 case VINF_EM_HALT:
334 case VINF_EM_RESUME:
335 case VINF_EM_NO_MEMORY:
336 case VINF_EM_RESCHEDULE:
337 case VINF_EM_RESCHEDULE_REM:
338 case VINF_EM_WAIT_SIPI:
339 break;
340
341 /*
342 * Up a level and invoke the debugger.
343 */
344 case VINF_EM_DBG_STEPPED:
345 case VINF_EM_DBG_BREAKPOINT:
346 case VINF_EM_DBG_STEP:
347 case VINF_EM_DBG_HYPER_BREAKPOINT:
348 case VINF_EM_DBG_HYPER_STEPPED:
349 case VINF_EM_DBG_HYPER_ASSERTION:
350 case VINF_EM_DBG_STOP:
351 case VINF_EM_DBG_EVENT:
352 break;
353
354 /*
355 * Up a level, dump and debug.
356 */
357 case VERR_TRPM_DONT_PANIC:
358 case VERR_TRPM_PANIC:
359 case VERR_VMM_RING0_ASSERTION:
360 case VINF_EM_TRIPLE_FAULT:
361 case VERR_VMM_HYPER_CR3_MISMATCH:
362 case VERR_VMM_RING3_CALL_DISABLED:
363 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
364 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
365 case VERR_EM_GUEST_CPU_HANG:
366 break;
367
368#ifdef EMHANDLERC_WITH_HM
369 /*
370 * Up a level, after Hm have done some release logging.
371 */
372 case VERR_VMX_INVALID_VMCS_FIELD:
373 case VERR_VMX_INVALID_VMCS_PTR:
374 case VERR_VMX_INVALID_VMXON_PTR:
375 case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE:
376 case VERR_VMX_UNEXPECTED_EXCEPTION:
377 case VERR_VMX_UNEXPECTED_EXIT:
378 case VERR_VMX_INVALID_GUEST_STATE:
379 case VERR_VMX_UNABLE_TO_START_VM:
380 case VERR_SVM_UNKNOWN_EXIT:
381 case VERR_SVM_UNEXPECTED_EXIT:
382 case VERR_SVM_UNEXPECTED_PATCH_TYPE:
383 case VERR_SVM_UNEXPECTED_XCPT_EXIT:
384 HMR3CheckError(pVM, rc);
385 break;
386
387 /* Up a level; fatal */
388 case VERR_VMX_IN_VMX_ROOT_MODE:
389 case VERR_SVM_IN_USE:
390 case VERR_SVM_UNABLE_TO_START_VM:
391 break;
392#endif
393
394 /*
395 * These two should be handled via the force flag already, but just in
396 * case they end up here deal with it.
397 */
398 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
399 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
400 AssertFailed();
401 rc = VBOXSTRICTRC_TODO(IOMR3ProcessForceFlag(pVM, pVCpu, rc));
402 break;
403
404 /*
405 * Anything which is not known to us means an internal error
406 * and the termination of the VM!
407 */
408 default:
409 AssertMsgFailed(("Unknown GC return code: %Rra\n", rc));
410 break;
411 }
412 return rc;
413}
414
415#endif
416
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette