VirtualBox

source: vbox/trunk/src/VBox/VMM/include/EMHandleRCTmpl.h@ 73092

Last change on this file since 73092 was 72983, checked in by vboxsync, 7 years ago

VMM/HM, HMVMX: bugref:9193 Stop passing pCtx around and use pVCpu->cpum.GstCtx instead where possible.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.3 KB
Line 
1/* $Id: EMHandleRCTmpl.h 72983 2018-07-08 16:15:47Z vboxsync $ */
2/** @file
3 * EM - emR3[Raw|Hm|Nem]HandleRC template.
4 */
5
6/*
7 * Copyright (C) 2006-2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___EMHandleRCTmpl_h
19#define ___EMHandleRCTmpl_h
20
21#if defined(EMHANDLERC_WITH_PATM) + defined(EMHANDLERC_WITH_HM) + defined(EMHANDLERC_WITH_NEM) != 1
22# error "Exactly one of these must be defined: EMHANDLERC_WITH_PATM, EMHANDLERC_WITH_HM, EMHANDLERC_WITH_NEM"
23#endif
24
25
26/**
27 * Process a subset of the raw-mode, HM and NEM return codes.
28 *
29 * Since we have to share this with raw-mode single stepping, this inline
30 * function has been created to avoid code duplication.
31 *
32 * @returns VINF_SUCCESS if it's ok to continue raw mode.
33 * @returns VBox status code to return to the EM main loop.
34 *
35 * @param pVM The cross context VM structure.
36 * @param pVCpu The cross context virtual CPU structure.
37 * @param rc The return code.
38 */
39#ifdef EMHANDLERC_WITH_PATM
40int emR3RawHandleRC(PVM pVM, PVMCPU pVCpu, int rc)
41#elif defined(EMHANDLERC_WITH_HM) || defined(DOXYGEN_RUNNING)
42int emR3HmHandleRC(PVM pVM, PVMCPU pVCpu, int rc)
43#elif defined(EMHANDLERC_WITH_NEM)
44int emR3NemHandleRC(PVM pVM, PVMCPU pVCpu, int rc)
45#endif
46{
47 switch (rc)
48 {
49 /*
50 * Common & simple ones.
51 */
52 case VINF_SUCCESS:
53 break;
54 case VINF_EM_RESCHEDULE_RAW:
55 case VINF_EM_RESCHEDULE_HM:
56 case VINF_EM_RAW_INTERRUPT:
57 case VINF_EM_RAW_TO_R3:
58 case VINF_EM_RAW_TIMER_PENDING:
59 case VINF_EM_PENDING_REQUEST:
60 rc = VINF_SUCCESS;
61 break;
62
63#ifdef EMHANDLERC_WITH_PATM
64 /*
65 * Privileged instruction.
66 */
67 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
68 case VINF_PATM_PATCH_TRAP_GP:
69 rc = emR3RawPrivileged(pVM, pVCpu);
70 break;
71
72 case VINF_EM_RAW_GUEST_TRAP:
73 /*
74 * Got a trap which needs dispatching.
75 */
76 if (PATMR3IsInsidePatchJump(pVM, pVCpu->cpum.GstCtx.eip, NULL))
77 {
78 AssertReleaseMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", CPUMGetGuestEIP(pVCpu)));
79 rc = VERR_EM_RAW_PATCH_CONFLICT;
80 break;
81 }
82 rc = emR3RawGuestTrap(pVM, pVCpu);
83 break;
84
85 /*
86 * Trap in patch code.
87 */
88 case VINF_PATM_PATCH_TRAP_PF:
89 case VINF_PATM_PATCH_INT3:
90 rc = emR3RawPatchTrap(pVM, pVCpu, rc);
91 break;
92
93 case VINF_PATM_DUPLICATE_FUNCTION:
94 Assert(PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip));
95 rc = PATMR3DuplicateFunctionRequest(pVM, &pVCpu->cpum.GstCtx);
96 AssertRC(rc);
97 rc = VINF_SUCCESS;
98 break;
99
100 case VINF_PATM_CHECK_PATCH_PAGE:
101 rc = PATMR3HandleMonitoredPage(pVM);
102 AssertRC(rc);
103 rc = VINF_SUCCESS;
104 break;
105
106 /*
107 * Patch manager.
108 */
109 case VERR_EM_RAW_PATCH_CONFLICT:
110 AssertReleaseMsgFailed(("%Rrc handling is not yet implemented\n", rc));
111 break;
112
113 /*
114 * Memory mapped I/O access - attempt to patch the instruction
115 */
116 case VINF_PATM_HC_MMIO_PATCH_READ:
117 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pVCpu->cpum.GstCtx.eip),
118 PATMFL_MMIO_ACCESS
119 | (CPUMGetGuestCodeBits(pVCpu) == 32 ? PATMFL_CODE32 : 0));
120 if (RT_FAILURE(rc))
121 rc = emR3ExecuteInstruction(pVM, pVCpu, "MMIO");
122 break;
123
124 case VINF_PATM_HC_MMIO_PATCH_WRITE:
125 AssertFailed(); /* not yet implemented. */
126 rc = emR3ExecuteInstruction(pVM, pVCpu, "MMIO");
127 break;
128#endif /* EMHANDLERC_WITH_PATM */
129
130#ifndef EMHANDLERC_WITH_NEM
131 /*
132 * Conflict or out of page tables.
133 *
134 * VM_FF_PGM_SYNC_CR3 is set by the hypervisor and all we need to
135 * do here is to execute the pending forced actions.
136 */
137 case VINF_PGM_SYNC_CR3:
138 AssertMsg(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL),
139 ("VINF_PGM_SYNC_CR3 and no VMCPU_FF_PGM_SYNC_CR3*!\n"));
140 rc = VINF_SUCCESS;
141 break;
142
143 /*
144 * PGM pool flush pending (guest SMP only).
145 */
146 /** @todo jumping back and forth between ring 0 and 3 can burn a lot of cycles
147 * if the EMT thread that's supposed to handle the flush is currently not active
148 * (e.g. waiting to be scheduled) -> fix this properly!
149 *
150 * bird: Since the clearing is global and done via a rendezvous any CPU can do
151 * it. They would have to choose who to call VMMR3EmtRendezvous and send
152 * the rest to VMMR3EmtRendezvousFF ... Hmm ... that's not going to work
153 * all that well since the latter will race the setup done by the
154 * first. Guess that means we need some new magic in that area for
155 * handling this case. :/
156 */
157 case VINF_PGM_POOL_FLUSH_PENDING:
158 rc = VINF_SUCCESS;
159 break;
160
161 /*
162 * Paging mode change.
163 */
164 case VINF_PGM_CHANGE_MODE:
165 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
166 rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
167 if (rc == VINF_SUCCESS)
168 rc = VINF_EM_RESCHEDULE;
169 AssertMsg(RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST), ("%Rrc\n", rc));
170 break;
171#endif /* !EMHANDLERC_WITH_NEM */
172
173#ifdef EMHANDLERC_WITH_PATM
174 /*
175 * CSAM wants to perform a task in ring-3. It has set an FF action flag.
176 */
177 case VINF_CSAM_PENDING_ACTION:
178 rc = VINF_SUCCESS;
179 break;
180
181 /*
182 * Invoked Interrupt gate - must directly (!) go to the recompiler.
183 */
184 case VINF_EM_RAW_INTERRUPT_PENDING:
185 case VINF_EM_RAW_RING_SWITCH_INT:
186 Assert(TRPMHasTrap(pVCpu));
187 Assert(!PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip));
188
189 if (TRPMHasTrap(pVCpu))
190 {
191 /* If the guest gate is marked unpatched, then we will check again if we can patch it. */
192 uint8_t u8Interrupt = TRPMGetTrapNo(pVCpu);
193 if (TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) == TRPM_INVALID_HANDLER)
194 {
195 CSAMR3CheckGates(pVM, u8Interrupt, 1);
196 Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8Interrupt, TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) != TRPM_INVALID_HANDLER));
197 /* Note: If it was successful, then we could go back to raw mode, but let's keep things simple for now. */
198 }
199 }
200 rc = VINF_EM_RESCHEDULE_REM;
201 break;
202
203 /*
204 * Other ring switch types.
205 */
206 case VINF_EM_RAW_RING_SWITCH:
207 rc = emR3RawRingSwitch(pVM, pVCpu);
208 break;
209#endif /* EMHANDLERC_WITH_PATM */
210
211 /*
212 * I/O Port access - emulate the instruction.
213 */
214 case VINF_IOM_R3_IOPORT_READ:
215 case VINF_IOM_R3_IOPORT_WRITE:
216 case VINF_EM_RESUME_R3_HISTORY_EXEC: /* Resume EMHistoryExec after VMCPU_FF_IOM. */
217 rc = emR3ExecuteIOInstruction(pVM, pVCpu);
218 break;
219
220 /*
221 * Execute pending I/O Port access.
222 */
223 case VINF_EM_PENDING_R3_IOPORT_WRITE:
224 rc = VBOXSTRICTRC_TODO(emR3ExecutePendingIoPortWrite(pVM, pVCpu));
225 break;
226 case VINF_EM_PENDING_R3_IOPORT_READ:
227 rc = VBOXSTRICTRC_TODO(emR3ExecutePendingIoPortRead(pVM, pVCpu));
228 break;
229
230 /*
231 * Memory mapped I/O access - emulate the instruction.
232 */
233 case VINF_IOM_R3_MMIO_READ:
234 case VINF_IOM_R3_MMIO_WRITE:
235 case VINF_IOM_R3_MMIO_READ_WRITE:
236 rc = emR3ExecuteInstruction(pVM, pVCpu, "MMIO");
237 break;
238
239 /*
240 * Machine specific register access - emulate the instruction.
241 */
242 case VINF_CPUM_R3_MSR_READ:
243 case VINF_CPUM_R3_MSR_WRITE:
244 rc = emR3ExecuteInstruction(pVM, pVCpu, "MSR");
245 break;
246
247 /*
248 * GIM hypercall.
249 */
250 case VINF_GIM_R3_HYPERCALL:
251 rc = emR3ExecuteInstruction(pVM, pVCpu, "Hypercall");
252 break;
253
254#ifdef EMHANDLERC_WITH_HM
255 /*
256 * (MM)IO intensive code block detected; fall back to the recompiler for better performance
257 */
258 case VINF_EM_RAW_EMULATE_IO_BLOCK:
259 rc = HMR3EmulateIoBlock(pVM, &pVCpu->cpum.GstCtx);
260 break;
261
262 case VINF_EM_HM_PATCH_TPR_INSTR:
263 rc = HMR3PatchTprInstr(pVM, pVCpu);
264 break;
265#endif
266
267#ifdef EMHANDLERC_WITH_PATM
268 /*
269 * Execute instruction.
270 */
271 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
272 rc = emR3ExecuteInstruction(pVM, pVCpu, "LDT FAULT: ");
273 break;
274 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
275 rc = emR3ExecuteInstruction(pVM, pVCpu, "GDT FAULT: ");
276 break;
277 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
278 rc = emR3ExecuteInstruction(pVM, pVCpu, "IDT FAULT: ");
279 break;
280 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
281 rc = emR3ExecuteInstruction(pVM, pVCpu, "TSS FAULT: ");
282 break;
283
284 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
285 rc = emR3ExecuteInstruction(pVM, pVCpu, "EMUL: ", VINF_PATM_PENDING_IRQ_AFTER_IRET);
286 break;
287
288 case VINF_PATCH_EMULATE_INSTR:
289#else
290 case VINF_EM_RAW_GUEST_TRAP:
291#endif
292 case VINF_EM_RAW_EMULATE_INSTR:
293 rc = emR3ExecuteInstruction(pVM, pVCpu, "EMUL: ");
294 break;
295
296 case VINF_EM_RAW_INJECT_TRPM_EVENT:
297 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
298 rc = VBOXSTRICTRC_VAL(IEMInjectTrpmEvent(pVCpu));
299 /* The following condition should be removed when IEM_IMPLEMENTS_TASKSWITCH becomes true. */
300 if (rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
301 rc = emR3ExecuteInstruction(pVM, pVCpu, "EVENT: ");
302 break;
303
304
305#ifdef EMHANDLERC_WITH_PATM
306 /*
307 * Stale selector and iret traps => REM.
308 */
309 case VINF_EM_RAW_STALE_SELECTOR:
310 case VINF_EM_RAW_IRET_TRAP:
311 /* We will not go to the recompiler if EIP points to patch code. */
312 if (PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip))
313 {
314 pVCpu->cpum.GstCtx.eip = PATMR3PatchToGCPtr(pVM, (RTGCPTR)pVCpu->cpum.GstCtx.eip, 0);
315 }
316 LogFlow(("emR3RawHandleRC: %Rrc -> %Rrc\n", rc, VINF_EM_RESCHEDULE_REM));
317 rc = VINF_EM_RESCHEDULE_REM;
318 break;
319
320 /*
321 * Conflict in GDT, resync and continue.
322 */
323 case VINF_SELM_SYNC_GDT:
324 AssertMsg(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_TSS),
325 ("VINF_SELM_SYNC_GDT without VMCPU_FF_SELM_SYNC_GDT/LDT/TSS!\n"));
326 rc = VINF_SUCCESS;
327 break;
328#endif
329
330 /*
331 * Up a level.
332 */
333 case VINF_EM_TERMINATE:
334 case VINF_EM_OFF:
335 case VINF_EM_RESET:
336 case VINF_EM_SUSPEND:
337 case VINF_EM_HALT:
338 case VINF_EM_RESUME:
339 case VINF_EM_NO_MEMORY:
340 case VINF_EM_RESCHEDULE:
341 case VINF_EM_RESCHEDULE_REM:
342 case VINF_EM_WAIT_SIPI:
343 break;
344
345 /*
346 * Up a level and invoke the debugger.
347 */
348 case VINF_EM_DBG_STEPPED:
349 case VINF_EM_DBG_BREAKPOINT:
350 case VINF_EM_DBG_STEP:
351 case VINF_EM_DBG_HYPER_BREAKPOINT:
352 case VINF_EM_DBG_HYPER_STEPPED:
353 case VINF_EM_DBG_HYPER_ASSERTION:
354 case VINF_EM_DBG_STOP:
355 case VINF_EM_DBG_EVENT:
356 break;
357
358 /*
359 * Up a level, dump and debug.
360 */
361 case VERR_TRPM_DONT_PANIC:
362 case VERR_TRPM_PANIC:
363 case VERR_VMM_RING0_ASSERTION:
364 case VINF_EM_TRIPLE_FAULT:
365 case VERR_VMM_HYPER_CR3_MISMATCH:
366 case VERR_VMM_RING3_CALL_DISABLED:
367 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
368 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
369 case VERR_EM_GUEST_CPU_HANG:
370 break;
371
372#ifdef EMHANDLERC_WITH_HM
373 /*
374 * Up a level, after Hm have done some release logging.
375 */
376 case VERR_VMX_INVALID_VMCS_FIELD:
377 case VERR_VMX_INVALID_VMCS_PTR:
378 case VERR_VMX_INVALID_VMXON_PTR:
379 case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE:
380 case VERR_VMX_UNEXPECTED_EXCEPTION:
381 case VERR_VMX_UNEXPECTED_EXIT:
382 case VERR_VMX_INVALID_GUEST_STATE:
383 case VERR_VMX_UNABLE_TO_START_VM:
384 case VERR_SVM_UNKNOWN_EXIT:
385 case VERR_SVM_UNEXPECTED_EXIT:
386 case VERR_SVM_UNEXPECTED_PATCH_TYPE:
387 case VERR_SVM_UNEXPECTED_XCPT_EXIT:
388 HMR3CheckError(pVM, rc);
389 break;
390
391 /* Up a level; fatal */
392 case VERR_VMX_IN_VMX_ROOT_MODE:
393 case VERR_SVM_IN_USE:
394 case VERR_SVM_UNABLE_TO_START_VM:
395 break;
396#endif
397
398 /*
399 * These two should be handled via the force flag already, but just in
400 * case they end up here deal with it.
401 */
402 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
403 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
404 AssertFailed();
405 rc = VBOXSTRICTRC_TODO(IOMR3ProcessForceFlag(pVM, pVCpu, rc));
406 break;
407
408 /*
409 * Anything which is not known to us means an internal error
410 * and the termination of the VM!
411 */
412 default:
413 AssertMsgFailed(("Unknown GC return code: %Rra\n", rc));
414 break;
415 }
416 return rc;
417}
418
419#endif
420
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette