VirtualBox

source: vbox/trunk/src/VBox/VMM/EMHandleRCTmpl.h@ 21192

Last change on this file since 21192 was 21192, checked in by vboxsync, 15 years ago

Export

File size: 11.3 KB
Line 
1/* $Id: EMInternal.h 48532 2009-06-13 20:53:44Z bird $ */
2/** @file
3 * EM - emR3RawHandleRC template
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/**
23 * Process a subset of the raw-mode return code.
24 *
25 * Since we have to share this with raw-mode single stepping, this inline
26 * function has been created to avoid code duplication.
27 *
28 * @returns VINF_SUCCESS if it's ok to continue raw mode.
29 * @returns VBox status code to return to the EM main loop.
30 *
31 * @param pVM The VM handle
32 * @param pVCpu The VMCPU handle
33 * @param rc The return code.
34 * @param pCtx The guest cpu context.
35 */
36static int EMHANDLERC_NAME(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
37{
38 switch (rc)
39 {
40 /*
41 * Common & simple ones.
42 */
43 case VINF_SUCCESS:
44 break;
45 case VINF_EM_RESCHEDULE_RAW:
46 case VINF_EM_RESCHEDULE_HWACC:
47 case VINF_EM_RAW_INTERRUPT:
48 case VINF_EM_RAW_TO_R3:
49 case VINF_EM_RAW_TIMER_PENDING:
50 case VINF_EM_PENDING_REQUEST:
51 rc = VINF_SUCCESS;
52 break;
53
54 /*
55 * Privileged instruction.
56 */
57 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
58#ifdef EMHANDLERC_WITH_PATM
59 case VINF_PATM_PATCH_TRAP_GP:
60#endif
61 rc = emR3RawPrivileged(pVM, pVCpu);
62 break;
63
64 /*
65 * Got a trap which needs dispatching.
66 */
67 case VINF_EM_RAW_GUEST_TRAP:
68#ifdef EMHANDLERC_WITH_PATM
69 if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))
70 {
71 AssertReleaseMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", CPUMGetGuestEIP(pVCpu)));
72 rc = VERR_EM_RAW_PATCH_CONFLICT;
73 break;
74 }
75#endif
76 rc = emR3RawGuestTrap(pVM, pVCpu);
77 break;
78
79#ifdef EMHANDLERC_WITH_PATM
80 /*
81 * Trap in patch code.
82 */
83 case VINF_PATM_PATCH_TRAP_PF:
84 case VINF_PATM_PATCH_INT3:
85 rc = emR3PatchTrap(pVM, pVCpu, pCtx, rc);
86 break;
87
88 case VINF_PATM_DUPLICATE_FUNCTION:
89 Assert(PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip));
90 rc = PATMR3DuplicateFunctionRequest(pVM, pCtx);
91 AssertRC(rc);
92 rc = VINF_SUCCESS;
93 break;
94
95 case VINF_PATM_CHECK_PATCH_PAGE:
96 rc = PATMR3HandleMonitoredPage(pVM);
97 AssertRC(rc);
98 rc = VINF_SUCCESS;
99 break;
100
101 /*
102 * Patch manager.
103 */
104 case VERR_EM_RAW_PATCH_CONFLICT:
105 AssertReleaseMsgFailed(("%Rrc handling is not yet implemented\n", rc));
106 break;
107#endif /* EMHANDLERC_WITH_PATM */
108
109#ifdef VBOX_WITH_VMI
110 /*
111 * PARAV function.
112 */
113 case VINF_EM_RESCHEDULE_PARAV:
114 rc = PARAVCallFunction(pVM);
115 break;
116#endif
117
118#ifdef EMHANDLERC_WITH_PATM
119 /*
120 * Memory mapped I/O access - attempt to patch the instruction
121 */
122 case VINF_PATM_HC_MMIO_PATCH_READ:
123 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
124 PATMFL_MMIO_ACCESS | ((SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0));
125 if (RT_FAILURE(rc))
126 rc = emR3RawExecuteInstruction(pVM, pVCpu, "MMIO");
127 break;
128
129 case VINF_PATM_HC_MMIO_PATCH_WRITE:
130 AssertFailed(); /* not yet implemented. */
131 rc = emR3RawExecuteInstruction(pVM, pVCpu, "MMIO");
132 break;
133#endif /* EMHANDLERC_WITH_PATM */
134
135 /*
136 * Conflict or out of page tables.
137 *
138 * VM_FF_PGM_SYNC_CR3 is set by the hypervisor and all we need to
139 * do here is to execute the pending forced actions.
140 */
141 case VINF_PGM_SYNC_CR3:
142 AssertMsg(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL),
143 ("VINF_PGM_SYNC_CR3 and no VMCPU_FF_PGM_SYNC_CR3*!\n"));
144 rc = VINF_SUCCESS;
145 break;
146
147 /*
148 * Paging mode change.
149 */
150 case VINF_PGM_CHANGE_MODE:
151 rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
152 if (rc == VINF_SUCCESS)
153 rc = VINF_EM_RESCHEDULE;
154 AssertMsg(RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST), ("%Rrc\n", rc));
155 break;
156
157#ifdef EMHANDLERC_WITH_PATM
158 /*
159 * CSAM wants to perform a task in ring-3. It has set an FF action flag.
160 */
161 case VINF_CSAM_PENDING_ACTION:
162 rc = VINF_SUCCESS;
163 break;
164
165 /*
166 * Invoked Interrupt gate - must directly (!) go to the recompiler.
167 */
168 case VINF_EM_RAW_INTERRUPT_PENDING:
169 case VINF_EM_RAW_RING_SWITCH_INT:
170 Assert(TRPMHasTrap(pVCpu));
171 Assert(!PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip));
172
173 if (TRPMHasTrap(pVCpu))
174 {
175 /* If the guest gate is marked unpatched, then we will check again if we can patch it. */
176 uint8_t u8Interrupt = TRPMGetTrapNo(pVCpu);
177 if (TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) == TRPM_INVALID_HANDLER)
178 {
179 CSAMR3CheckGates(pVM, u8Interrupt, 1);
180 Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8Interrupt, TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) != TRPM_INVALID_HANDLER));
181 /* Note: If it was successful, then we could go back to raw mode, but let's keep things simple for now. */
182 }
183 }
184 rc = VINF_EM_RESCHEDULE_REM;
185 break;
186#endif /* EMHANDLERC_WITH_PATM */
187
188 /*
189 * Other ring switch types.
190 */
191 case VINF_EM_RAW_RING_SWITCH:
192 rc = emR3RawRingSwitch(pVM, pVCpu);
193 break;
194
195 /*
196 * I/O Port access - emulate the instruction.
197 */
198 case VINF_IOM_HC_IOPORT_READ:
199 case VINF_IOM_HC_IOPORT_WRITE:
200 rc = emR3RawExecuteIOInstruction(pVM, pVCpu);
201 break;
202
203 /*
204 * Memory mapped I/O access - emulate the instruction.
205 */
206 case VINF_IOM_HC_MMIO_READ:
207 case VINF_IOM_HC_MMIO_WRITE:
208 case VINF_IOM_HC_MMIO_READ_WRITE:
209 rc = emR3RawExecuteInstruction(pVM, pVCpu, "MMIO");
210 break;
211
212 /*
213 * (MM)IO intensive code block detected; fall back to the recompiler for better performance
214 */
215 case VINF_EM_RAW_EMULATE_IO_BLOCK:
216 rc = HWACCMR3EmulateIoBlock(pVM, pCtx);
217 break;
218
219#ifdef EMHANDLERC_WITH_PATM
220 /*
221 * Execute instruction.
222 */
223 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
224 rc = emR3RawExecuteInstruction(pVM, pVCpu, "LDT FAULT: ");
225 break;
226 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
227 rc = emR3RawExecuteInstruction(pVM, pVCpu, "GDT FAULT: ");
228 break;
229 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
230 rc = emR3RawExecuteInstruction(pVM, pVCpu, "IDT FAULT: ");
231 break;
232 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
233 rc = emR3RawExecuteInstruction(pVM, pVCpu, "TSS FAULT: ");
234 break;
235 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
236 rc = emR3RawExecuteInstruction(pVM, pVCpu, "PD FAULT: ");
237 break;
238#endif
239
240 case VINF_EM_RAW_EMULATE_INSTR_HLT:
241 /** @todo skip instruction and go directly to the halt state. (see REM for implementation details) */
242 rc = emR3RawPrivileged(pVM, pVCpu);
243 break;
244
245#ifdef EMHANDLERC_WITH_PATM
246 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
247 rc = emR3RawExecuteInstruction(pVM, pVCpu, "EMUL: ", VINF_PATM_PENDING_IRQ_AFTER_IRET);
248 break;
249
250 case VINF_PATCH_EMULATE_INSTR:
251#endif
252 case VINF_EM_RAW_EMULATE_INSTR:
253 rc = emR3RawExecuteInstruction(pVM, pVCpu, "EMUL: ");
254 break;
255
256#ifdef EMHANDLERC_WITH_PATM
257 /*
258 * Stale selector and iret traps => REM.
259 */
260 case VINF_EM_RAW_STALE_SELECTOR:
261 case VINF_EM_RAW_IRET_TRAP:
262 /* We will not go to the recompiler if EIP points to patch code. */
263 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
264 {
265 pCtx->eip = PATMR3PatchToGCPtr(pVM, (RTGCPTR)pCtx->eip, 0);
266 }
267 LogFlow(("emR3RawHandleRC: %Rrc -> %Rrc\n", rc, VINF_EM_RESCHEDULE_REM));
268 rc = VINF_EM_RESCHEDULE_REM;
269 break;
270#endif
271
272 /*
273 * Up a level.
274 */
275 case VINF_EM_TERMINATE:
276 case VINF_EM_OFF:
277 case VINF_EM_RESET:
278 case VINF_EM_SUSPEND:
279 case VINF_EM_HALT:
280 case VINF_EM_RESUME:
281 case VINF_EM_NO_MEMORY:
282 case VINF_EM_RESCHEDULE:
283 case VINF_EM_RESCHEDULE_REM:
284 case VINF_EM_WAIT_SIPI:
285 break;
286
287 /*
288 * Up a level and invoke the debugger.
289 */
290 case VINF_EM_DBG_STEPPED:
291 case VINF_EM_DBG_BREAKPOINT:
292 case VINF_EM_DBG_STEP:
293 case VINF_EM_DBG_HYPER_BREAKPOINT:
294 case VINF_EM_DBG_HYPER_STEPPED:
295 case VINF_EM_DBG_HYPER_ASSERTION:
296 case VINF_EM_DBG_STOP:
297 break;
298
299 /*
300 * Up a level, dump and debug.
301 */
302 case VERR_TRPM_DONT_PANIC:
303 case VERR_TRPM_PANIC:
304 case VERR_VMM_RING0_ASSERTION:
305 case VERR_VMM_HYPER_CR3_MISMATCH:
306 case VERR_VMM_RING3_CALL_DISABLED:
307 break;
308
309#ifndef EMHANDLERC_WITH_PATM
310 /*
311 * Up a level, after HwAccM have done some release logging.
312 */
313 case VERR_VMX_INVALID_VMCS_FIELD:
314 case VERR_VMX_INVALID_VMCS_PTR:
315 case VERR_VMX_INVALID_VMXON_PTR:
316 case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE:
317 case VERR_VMX_UNEXPECTED_EXCEPTION:
318 case VERR_VMX_UNEXPECTED_EXIT_CODE:
319 case VERR_VMX_INVALID_GUEST_STATE:
320 case VERR_VMX_UNABLE_TO_START_VM:
321 case VERR_VMX_UNABLE_TO_RESUME_VM:
322 HWACCMR3CheckError(pVM, rc);
323 break;
324#endif
325
326 /*
327 * Anything which is not known to us means an internal error
328 * and the termination of the VM!
329 */
330 default:
331 AssertMsgFailed(("Unknown GC return code: %Rra\n", rc));
332 break;
333 }
334 return rc;
335}
336
337#undef EMHANDLERC_NAME
338#undef EMHANDLERC_WITH_PATM
339
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette