1 | /* $Id: EMHandleRCTmpl.h 43394 2012-09-21 11:11:17Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * EM - emR3[Raw|Hm]HandleRC template.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2006-2009 Oracle Corporation
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | * available from http://www.virtualbox.org. This file is free software;
|
---|
11 | * you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | * General Public License (GPL) as published by the Free Software
|
---|
13 | * Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | */
|
---|
17 |
|
---|
18 | #ifndef ___EMHandleRCTmpl_h
|
---|
19 | #define ___EMHandleRCTmpl_h
|
---|
20 |
|
---|
21 | /**
|
---|
22 | * Process a subset of the raw-mode and hm return codes.
|
---|
23 | *
|
---|
24 | * Since we have to share this with raw-mode single stepping, this inline
|
---|
25 | * function has been created to avoid code duplication.
|
---|
26 | *
|
---|
27 | * @returns VINF_SUCCESS if it's ok to continue raw mode.
|
---|
28 | * @returns VBox status code to return to the EM main loop.
|
---|
29 | *
|
---|
30 | * @param pVM Pointer to the VM.
|
---|
31 | * @param pVCpu Pointer to the VMCPU.
|
---|
32 | * @param rc The return code.
|
---|
33 | * @param pCtx Pointer to the guest CPU context.
|
---|
34 | */
|
---|
35 | #ifdef EMHANDLERC_WITH_PATM
|
---|
36 | int emR3RawHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
|
---|
37 | #elif defined(EMHANDLERC_WITH_HM)
|
---|
38 | int emR3HmHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
|
---|
39 | #endif
|
---|
40 | {
|
---|
41 | switch (rc)
|
---|
42 | {
|
---|
43 | /*
|
---|
44 | * Common & simple ones.
|
---|
45 | */
|
---|
46 | case VINF_SUCCESS:
|
---|
47 | break;
|
---|
48 | case VINF_EM_RESCHEDULE_RAW:
|
---|
49 | case VINF_EM_RESCHEDULE_HM:
|
---|
50 | case VINF_EM_RAW_INTERRUPT:
|
---|
51 | case VINF_EM_RAW_TO_R3:
|
---|
52 | case VINF_EM_RAW_TIMER_PENDING:
|
---|
53 | case VINF_EM_PENDING_REQUEST:
|
---|
54 | rc = VINF_SUCCESS;
|
---|
55 | break;
|
---|
56 |
|
---|
57 | #ifdef EMHANDLERC_WITH_PATM
|
---|
58 | /*
|
---|
59 | * Privileged instruction.
|
---|
60 | */
|
---|
61 | case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
|
---|
62 | case VINF_PATM_PATCH_TRAP_GP:
|
---|
63 | rc = emR3RawPrivileged(pVM, pVCpu);
|
---|
64 | break;
|
---|
65 |
|
---|
66 | case VINF_EM_RAW_GUEST_TRAP:
|
---|
67 | /*
|
---|
68 | * Got a trap which needs dispatching.
|
---|
69 | */
|
---|
70 | if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))
|
---|
71 | {
|
---|
72 | AssertReleaseMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", CPUMGetGuestEIP(pVCpu)));
|
---|
73 | rc = VERR_EM_RAW_PATCH_CONFLICT;
|
---|
74 | break;
|
---|
75 | }
|
---|
76 | rc = emR3RawGuestTrap(pVM, pVCpu);
|
---|
77 | break;
|
---|
78 |
|
---|
79 | /*
|
---|
80 | * Trap in patch code.
|
---|
81 | */
|
---|
82 | case VINF_PATM_PATCH_TRAP_PF:
|
---|
83 | case VINF_PATM_PATCH_INT3:
|
---|
84 | rc = emR3PatchTrap(pVM, pVCpu, pCtx, rc);
|
---|
85 | break;
|
---|
86 |
|
---|
87 | case VINF_PATM_DUPLICATE_FUNCTION:
|
---|
88 | Assert(PATMIsPatchGCAddr(pVM, pCtx->eip));
|
---|
89 | rc = PATMR3DuplicateFunctionRequest(pVM, pCtx);
|
---|
90 | AssertRC(rc);
|
---|
91 | rc = VINF_SUCCESS;
|
---|
92 | break;
|
---|
93 |
|
---|
94 | case VINF_PATM_CHECK_PATCH_PAGE:
|
---|
95 | rc = PATMR3HandleMonitoredPage(pVM);
|
---|
96 | AssertRC(rc);
|
---|
97 | rc = VINF_SUCCESS;
|
---|
98 | break;
|
---|
99 |
|
---|
100 | /*
|
---|
101 | * Patch manager.
|
---|
102 | */
|
---|
103 | case VERR_EM_RAW_PATCH_CONFLICT:
|
---|
104 | AssertReleaseMsgFailed(("%Rrc handling is not yet implemented\n", rc));
|
---|
105 | break;
|
---|
106 | #endif /* EMHANDLERC_WITH_PATM */
|
---|
107 |
|
---|
108 | #ifdef EMHANDLERC_WITH_PATM
|
---|
109 | /*
|
---|
110 | * Memory mapped I/O access - attempt to patch the instruction
|
---|
111 | */
|
---|
112 | case VINF_PATM_HC_MMIO_PATCH_READ:
|
---|
113 | rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
|
---|
114 | PATMFL_MMIO_ACCESS
|
---|
115 | | (CPUMGetGuestCodeBits(pVCpu) == 32 ? PATMFL_CODE32 : 0));
|
---|
116 | if (RT_FAILURE(rc))
|
---|
117 | rc = emR3ExecuteInstruction(pVM, pVCpu, "MMIO");
|
---|
118 | break;
|
---|
119 |
|
---|
120 | case VINF_PATM_HC_MMIO_PATCH_WRITE:
|
---|
121 | AssertFailed(); /* not yet implemented. */
|
---|
122 | rc = emR3ExecuteInstruction(pVM, pVCpu, "MMIO");
|
---|
123 | break;
|
---|
124 | #endif /* EMHANDLERC_WITH_PATM */
|
---|
125 |
|
---|
126 | /*
|
---|
127 | * Conflict or out of page tables.
|
---|
128 | *
|
---|
129 | * VM_FF_PGM_SYNC_CR3 is set by the hypervisor and all we need to
|
---|
130 | * do here is to execute the pending forced actions.
|
---|
131 | */
|
---|
132 | case VINF_PGM_SYNC_CR3:
|
---|
133 | AssertMsg(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL),
|
---|
134 | ("VINF_PGM_SYNC_CR3 and no VMCPU_FF_PGM_SYNC_CR3*!\n"));
|
---|
135 | rc = VINF_SUCCESS;
|
---|
136 | break;
|
---|
137 |
|
---|
138 | /*
|
---|
139 | * PGM pool flush pending (guest SMP only).
|
---|
140 | */
|
---|
141 | /** @todo jumping back and forth between ring 0 and 3 can burn a lot of cycles
|
---|
142 | * if the EMT thread that's supposed to handle the flush is currently not active
|
---|
143 | * (e.g. waiting to be scheduled) -> fix this properly!
|
---|
144 | *
|
---|
145 | * bird: Since the clearing is global and done via a rendezvous any CPU can do
|
---|
146 | * it. They would have to choose who to call VMMR3EmtRendezvous and send
|
---|
147 | * the rest to VMMR3EmtRendezvousFF ... Hmm ... that's not going to work
|
---|
148 | * all that well since the latter will race the setup done by the
|
---|
149 | * first. Guess that means we need some new magic in that area for
|
---|
150 | * handling this case. :/
|
---|
151 | */
|
---|
152 | case VINF_PGM_POOL_FLUSH_PENDING:
|
---|
153 | rc = VINF_SUCCESS;
|
---|
154 | break;
|
---|
155 |
|
---|
156 | /*
|
---|
157 | * Paging mode change.
|
---|
158 | */
|
---|
159 | case VINF_PGM_CHANGE_MODE:
|
---|
160 | rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
|
---|
161 | if (rc == VINF_SUCCESS)
|
---|
162 | rc = VINF_EM_RESCHEDULE;
|
---|
163 | AssertMsg(RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST), ("%Rrc\n", rc));
|
---|
164 | break;
|
---|
165 |
|
---|
166 | #ifdef EMHANDLERC_WITH_PATM
|
---|
167 | /*
|
---|
168 | * CSAM wants to perform a task in ring-3. It has set an FF action flag.
|
---|
169 | */
|
---|
170 | case VINF_CSAM_PENDING_ACTION:
|
---|
171 | rc = VINF_SUCCESS;
|
---|
172 | break;
|
---|
173 |
|
---|
174 | /*
|
---|
175 | * Invoked Interrupt gate - must directly (!) go to the recompiler.
|
---|
176 | */
|
---|
177 | case VINF_EM_RAW_INTERRUPT_PENDING:
|
---|
178 | case VINF_EM_RAW_RING_SWITCH_INT:
|
---|
179 | Assert(TRPMHasTrap(pVCpu));
|
---|
180 | Assert(!PATMIsPatchGCAddr(pVM, pCtx->eip));
|
---|
181 |
|
---|
182 | if (TRPMHasTrap(pVCpu))
|
---|
183 | {
|
---|
184 | /* If the guest gate is marked unpatched, then we will check again if we can patch it. */
|
---|
185 | uint8_t u8Interrupt = TRPMGetTrapNo(pVCpu);
|
---|
186 | if (TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) == TRPM_INVALID_HANDLER)
|
---|
187 | {
|
---|
188 | CSAMR3CheckGates(pVM, u8Interrupt, 1);
|
---|
189 | Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8Interrupt, TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) != TRPM_INVALID_HANDLER));
|
---|
190 | /* Note: If it was successful, then we could go back to raw mode, but let's keep things simple for now. */
|
---|
191 | }
|
---|
192 | }
|
---|
193 | rc = VINF_EM_RESCHEDULE_REM;
|
---|
194 | break;
|
---|
195 |
|
---|
196 | /*
|
---|
197 | * Other ring switch types.
|
---|
198 | */
|
---|
199 | case VINF_EM_RAW_RING_SWITCH:
|
---|
200 | rc = emR3RawRingSwitch(pVM, pVCpu);
|
---|
201 | break;
|
---|
202 | #endif /* EMHANDLERC_WITH_PATM */
|
---|
203 |
|
---|
204 | /*
|
---|
205 | * I/O Port access - emulate the instruction.
|
---|
206 | */
|
---|
207 | case VINF_IOM_R3_IOPORT_READ:
|
---|
208 | case VINF_IOM_R3_IOPORT_WRITE:
|
---|
209 | rc = emR3ExecuteIOInstruction(pVM, pVCpu);
|
---|
210 | break;
|
---|
211 |
|
---|
212 | /*
|
---|
213 | * Memory mapped I/O access - emulate the instruction.
|
---|
214 | */
|
---|
215 | case VINF_IOM_R3_MMIO_READ:
|
---|
216 | case VINF_IOM_R3_MMIO_WRITE:
|
---|
217 | case VINF_IOM_R3_MMIO_READ_WRITE:
|
---|
218 | rc = emR3ExecuteInstruction(pVM, pVCpu, "MMIO");
|
---|
219 | break;
|
---|
220 |
|
---|
221 | #ifdef EMHANDLERC_WITH_HM
|
---|
222 | /*
|
---|
223 | * (MM)IO intensive code block detected; fall back to the recompiler for better performance
|
---|
224 | */
|
---|
225 | case VINF_EM_RAW_EMULATE_IO_BLOCK:
|
---|
226 | rc = HMR3EmulateIoBlock(pVM, pCtx);
|
---|
227 | break;
|
---|
228 |
|
---|
229 | case VINF_EM_HM_PATCH_TPR_INSTR:
|
---|
230 | rc = HMR3PatchTprInstr(pVM, pVCpu, pCtx);
|
---|
231 | break;
|
---|
232 | #endif
|
---|
233 |
|
---|
234 | #ifdef EMHANDLERC_WITH_PATM
|
---|
235 | /*
|
---|
236 | * Execute instruction.
|
---|
237 | */
|
---|
238 | case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
|
---|
239 | rc = emR3ExecuteInstruction(pVM, pVCpu, "LDT FAULT: ");
|
---|
240 | break;
|
---|
241 | case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
|
---|
242 | rc = emR3ExecuteInstruction(pVM, pVCpu, "GDT FAULT: ");
|
---|
243 | break;
|
---|
244 | case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
|
---|
245 | rc = emR3ExecuteInstruction(pVM, pVCpu, "IDT FAULT: ");
|
---|
246 | break;
|
---|
247 | case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
|
---|
248 | rc = emR3ExecuteInstruction(pVM, pVCpu, "TSS FAULT: ");
|
---|
249 | break;
|
---|
250 | case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
|
---|
251 | rc = emR3ExecuteInstruction(pVM, pVCpu, "PD FAULT: ");
|
---|
252 | break;
|
---|
253 | case VINF_EM_RAW_EMULATE_INSTR_HLT:
|
---|
254 | /** @todo skip instruction and go directly to the halt state. (see REM for implementation details) */
|
---|
255 | rc = emR3RawPrivileged(pVM, pVCpu);
|
---|
256 | break;
|
---|
257 | #endif
|
---|
258 |
|
---|
259 | #ifdef EMHANDLERC_WITH_PATM
|
---|
260 | case VINF_PATM_PENDING_IRQ_AFTER_IRET:
|
---|
261 | rc = emR3ExecuteInstruction(pVM, pVCpu, "EMUL: ", VINF_PATM_PENDING_IRQ_AFTER_IRET);
|
---|
262 | break;
|
---|
263 |
|
---|
264 | case VINF_PATCH_EMULATE_INSTR:
|
---|
265 | #else
|
---|
266 | case VINF_EM_RAW_GUEST_TRAP:
|
---|
267 | #endif
|
---|
268 | case VINF_EM_RAW_EMULATE_INSTR:
|
---|
269 | rc = emR3ExecuteInstruction(pVM, pVCpu, "EMUL: ");
|
---|
270 | break;
|
---|
271 |
|
---|
272 | #ifdef EMHANDLERC_WITH_PATM
|
---|
273 | /*
|
---|
274 | * Stale selector and iret traps => REM.
|
---|
275 | */
|
---|
276 | case VINF_EM_RAW_STALE_SELECTOR:
|
---|
277 | case VINF_EM_RAW_IRET_TRAP:
|
---|
278 | /* We will not go to the recompiler if EIP points to patch code. */
|
---|
279 | if (PATMIsPatchGCAddr(pVM, pCtx->eip))
|
---|
280 | {
|
---|
281 | pCtx->eip = PATMR3PatchToGCPtr(pVM, (RTGCPTR)pCtx->eip, 0);
|
---|
282 | }
|
---|
283 | LogFlow(("emR3RawHandleRC: %Rrc -> %Rrc\n", rc, VINF_EM_RESCHEDULE_REM));
|
---|
284 | rc = VINF_EM_RESCHEDULE_REM;
|
---|
285 | break;
|
---|
286 |
|
---|
287 | /*
|
---|
288 | * Conflict in GDT, resync and continue.
|
---|
289 | */
|
---|
290 | case VINF_SELM_SYNC_GDT:
|
---|
291 | AssertMsg(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_TSS),
|
---|
292 | ("VINF_SELM_SYNC_GDT without VMCPU_FF_SELM_SYNC_GDT/LDT/TSS!\n"));
|
---|
293 | rc = VINF_SUCCESS;
|
---|
294 | break;
|
---|
295 | #endif
|
---|
296 |
|
---|
297 | /*
|
---|
298 | * Up a level.
|
---|
299 | */
|
---|
300 | case VINF_EM_TERMINATE:
|
---|
301 | case VINF_EM_OFF:
|
---|
302 | case VINF_EM_RESET:
|
---|
303 | case VINF_EM_SUSPEND:
|
---|
304 | case VINF_EM_HALT:
|
---|
305 | case VINF_EM_RESUME:
|
---|
306 | case VINF_EM_NO_MEMORY:
|
---|
307 | case VINF_EM_RESCHEDULE:
|
---|
308 | case VINF_EM_RESCHEDULE_REM:
|
---|
309 | case VINF_EM_WAIT_SIPI:
|
---|
310 | break;
|
---|
311 |
|
---|
312 | /*
|
---|
313 | * Up a level and invoke the debugger.
|
---|
314 | */
|
---|
315 | case VINF_EM_DBG_STEPPED:
|
---|
316 | case VINF_EM_DBG_BREAKPOINT:
|
---|
317 | case VINF_EM_DBG_STEP:
|
---|
318 | case VINF_EM_DBG_HYPER_BREAKPOINT:
|
---|
319 | case VINF_EM_DBG_HYPER_STEPPED:
|
---|
320 | case VINF_EM_DBG_HYPER_ASSERTION:
|
---|
321 | case VINF_EM_DBG_STOP:
|
---|
322 | break;
|
---|
323 |
|
---|
324 | /*
|
---|
325 | * Up a level, dump and debug.
|
---|
326 | */
|
---|
327 | case VERR_TRPM_DONT_PANIC:
|
---|
328 | case VERR_TRPM_PANIC:
|
---|
329 | case VERR_VMM_RING0_ASSERTION:
|
---|
330 | case VERR_VMM_HYPER_CR3_MISMATCH:
|
---|
331 | case VERR_VMM_RING3_CALL_DISABLED:
|
---|
332 | case VERR_IEM_INSTR_NOT_IMPLEMENTED:
|
---|
333 | case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
|
---|
334 | break;
|
---|
335 |
|
---|
336 | #ifdef EMHANDLERC_WITH_HM
|
---|
337 | /*
|
---|
338 | * Up a level, after Hm have done some release logging.
|
---|
339 | */
|
---|
340 | case VERR_VMX_INVALID_VMCS_FIELD:
|
---|
341 | case VERR_VMX_INVALID_VMCS_PTR:
|
---|
342 | case VERR_VMX_INVALID_VMXON_PTR:
|
---|
343 | case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE:
|
---|
344 | case VERR_VMX_UNEXPECTED_EXCEPTION:
|
---|
345 | case VERR_VMX_UNEXPECTED_EXIT_CODE:
|
---|
346 | case VERR_VMX_INVALID_GUEST_STATE:
|
---|
347 | case VERR_VMX_UNABLE_TO_START_VM:
|
---|
348 | case VERR_VMX_UNABLE_TO_RESUME_VM:
|
---|
349 | HMR3CheckError(pVM, rc);
|
---|
350 | break;
|
---|
351 |
|
---|
352 | /* Up a level; fatal */
|
---|
353 | case VERR_VMX_IN_VMX_ROOT_MODE:
|
---|
354 | case VERR_SVM_IN_USE:
|
---|
355 | case VERR_SVM_UNABLE_TO_START_VM:
|
---|
356 | break;
|
---|
357 | #endif
|
---|
358 |
|
---|
359 | /*
|
---|
360 | * Anything which is not known to us means an internal error
|
---|
361 | * and the termination of the VM!
|
---|
362 | */
|
---|
363 | default:
|
---|
364 | AssertMsgFailed(("Unknown GC return code: %Rra\n", rc));
|
---|
365 | break;
|
---|
366 | }
|
---|
367 | return rc;
|
---|
368 | }
|
---|
369 |
|
---|
370 | #endif
|
---|
371 |
|
---|