VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/PATMRC.cpp@ 60189

Last change on this file since 60189 was 58126, checked in by vboxsync, 9 years ago

VMM: Fixed almost all the Doxygen warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.6 KB
Line 
1/* $Id: PATMRC.cpp 58126 2015-10-08 20:59:48Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager - Raw-mode Context.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PATM
23#include <VBox/vmm/patm.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#ifdef VBOX_WITH_IEM
30# include <VBox/vmm/iem.h>
31#endif
32#include <VBox/vmm/selm.h>
33#include <VBox/vmm/mm.h>
34#include "PATMInternal.h"
35#include "PATMA.h"
36#include <VBox/vmm/vm.h>
37#include <VBox/dbg.h>
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/err.h>
41#include <VBox/log.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45
46
47/**
48 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
49 * PATM all access handler callback.}
50 *
51 * @remarks The @a pvUser argument is the base address of the page being
52 * monitored.
53 */
54DECLEXPORT(VBOXSTRICTRC) patmRCVirtPagePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore,
55 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
56{
57 NOREF(pVCpu); NOREF(uErrorCode); NOREF(pCtxCore); NOREF(pvFault); NOREF(pvRange); NOREF(offRange);
58
59 Assert(pvUser);
60 Assert(!((uintptr_t)pvUser & PAGE_OFFSET_MASK));
61 Assert(((uintptr_t)pvUser + (pvFault & PAGE_OFFSET_MASK)) == pvRange + offRange);
62
63 pVM->patm.s.pvFaultMonitor = (RTRCPTR)(pvRange + offRange);
64 return VINF_PATM_CHECK_PATCH_PAGE;
65}
66
67
68/**
69 * Checks if the write is located on a page with was patched before.
70 * (if so, then we are not allowed to turn on r/w)
71 *
72 * @returns Strict VBox status code.
73 * @retval VINF_SUCCESS if access interpreted (@a pCtxCore != NULL).
74 * @retval VINF_PGM_HANDLER_DO_DEFAULT (@a pCtxCore == NULL).
75 * @retval VINF_EM_RAW_EMULATE_INSTR on needing to go to ring-3 to do this.
76 * @retval VERR_PATCH_NOT_FOUND if no patch was found.
77 *
78 * @param pVM The cross context VM structure.
79 * @param pCtxCore CPU context if \#PF, NULL if other write..
80 * @param GCPtr GC pointer to write address.
81 * @param cbWrite Number of bytes to write.
82 *
83 */
84VMMRC_INT_DECL(VBOXSTRICTRC) PATMRCHandleWriteToPatchPage(PVM pVM, PCPUMCTXCORE pCtxCore, RTRCPTR GCPtr, uint32_t cbWrite)
85{
86 Assert(cbWrite > 0);
87
88 /* Quick boundary check */
89 if ( PAGE_ADDRESS(GCPtr) < PAGE_ADDRESS(pVM->patm.s.pPatchedInstrGCLowest)
90 || PAGE_ADDRESS(GCPtr) > PAGE_ADDRESS(pVM->patm.s.pPatchedInstrGCHighest))
91 return VERR_PATCH_NOT_FOUND;
92
93 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWriteDetect, a);
94
95 /*
96 * Lookup the patch page record for the write.
97 */
98 RTRCUINTPTR pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
99 RTRCUINTPTR pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
100
101 PPATMPATCHPAGE pPatchPage;
102 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.CTXSUFF(PatchLookupTree)->PatchTreeByPage, pWritePageStart);
103 if ( !pPatchPage
104 && pWritePageStart != pWritePageEnd)
105 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.CTXSUFF(PatchLookupTree)->PatchTreeByPage, pWritePageEnd);
106 if (pPatchPage)
107 {
108 Log(("PATMGCHandleWriteToPatchPage: Found page %RRv for write to %RRv %d bytes (page low:high %RRv:%RRv\n",
109 pPatchPage->Core.Key, GCPtr, cbWrite, pPatchPage->pLowestAddrGC, pPatchPage->pHighestAddrGC));
110 if ( (RTRCUINTPTR)pPatchPage->pLowestAddrGC > (RTRCUINTPTR)GCPtr + cbWrite - 1U
111 || (RTRCUINTPTR)pPatchPage->pHighestAddrGC < (RTRCUINTPTR)GCPtr)
112 {
113 /* This part of the page was not patched; try to emulate the instruction / tell the caller to do so. */
114 if (!pCtxCore)
115 {
116 LogFlow(("PATMHandleWriteToPatchPage: Allow %#x writing %RRv LB %#x\n", pCtxCore->eip, GCPtr, cbWrite));
117 STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpreted);
118 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
119 return VINF_PGM_HANDLER_DO_DEFAULT;
120 }
121 LogFlow(("PATMHandleWriteToPatchPage: Interpret %#x accessing %RRv\n", pCtxCore->eip, GCPtr));
122 int rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(VMMGetCpu0(pVM), pCtxCore, (RTGCPTR)(RTRCUINTPTR)GCPtr));
123 if (rc == VINF_SUCCESS)
124 {
125 STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpreted);
126 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
127 return VINF_SUCCESS;
128 }
129 STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpretedFailed);
130 }
131 R3PTRTYPE(PPATCHINFO) *paPatch = (R3PTRTYPE(PPATCHINFO) *)MMHyperR3ToRC(pVM, pPatchPage->papPatch);
132
133 /* Increase the invalid write counter for each patch that's registered for that page. */
134 for (uint32_t i=0;i<pPatchPage->cCount;i++)
135 {
136 PPATCHINFO pPatch = (PPATCHINFO)MMHyperR3ToRC(pVM, paPatch[i]);
137
138 pPatch->cInvalidWrites++;
139 }
140
141 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
142 return VINF_EM_RAW_EMULATE_INSTR;
143 }
144
145 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
146 return VERR_PATCH_NOT_FOUND;
147}
148
149
150/**
151 * Checks if the illegal instruction was caused by a patched instruction
152 *
153 * @returns VBox status
154 *
155 * @param pVM The cross context VM structure.
156 * @param pCtxCore The relevant core context.
157 */
158VMMRC_INT_DECL(int) PATMRCHandleIllegalInstrTrap(PVM pVM, PCPUMCTXCORE pCtxCore)
159{
160 PPATMPATCHREC pRec;
161 PVMCPU pVCpu = VMMGetCpu0(pVM);
162 int rc;
163
164 /* Very important check -> otherwise we have a security leak. */
165 AssertReturn(!pCtxCore->eflags.Bits.u1VM && (pCtxCore->ss.Sel & X86_SEL_RPL) <= (EMIsRawRing1Enabled(pVM) ? 2U : 1U),
166 VERR_ACCESS_DENIED);
167 Assert(PATMIsPatchGCAddr(pVM, pCtxCore->eip));
168
169 /* OP_ILLUD2 in PATM generated code? */
170 if (CTXSUFF(pVM->patm.s.pGCState)->uPendingAction)
171 {
172 LogFlow(("PATMRC: Pending action %x at %x\n", CTXSUFF(pVM->patm.s.pGCState)->uPendingAction, pCtxCore->eip));
173
174 /* Private PATM interface (@todo hack due to lack of anything generic). */
175 /* Parameters:
176 * eax = Pending action (currently PATM_ACTION_LOOKUP_ADDRESS)
177 * ecx = PATM_ACTION_MAGIC
178 */
179 if ( (pCtxCore->eax & CTXSUFF(pVM->patm.s.pGCState)->uPendingAction)
180 && pCtxCore->ecx == PATM_ACTION_MAGIC
181 )
182 {
183 CTXSUFF(pVM->patm.s.pGCState)->uPendingAction = 0;
184
185 switch (pCtxCore->eax)
186 {
187 case PATM_ACTION_LOOKUP_ADDRESS:
188 {
189 /* Parameters:
190 * edx = GC address to find
191 * edi = PATCHJUMPTABLE ptr
192 */
193 AssertMsg(!pCtxCore->edi || PATMIsPatchGCAddr(pVM, pCtxCore->edi), ("edi = %x\n", pCtxCore->edi));
194
195 Log(("PATMRC: lookup %x jump table=%x\n", pCtxCore->edx, pCtxCore->edi));
196
197 pRec = patmQueryFunctionPatch(pVM, (RTRCPTR)pCtxCore->edx);
198 if (pRec)
199 {
200 if (pRec->patch.uState == PATCH_ENABLED)
201 {
202 RTGCUINTPTR pRelAddr = pRec->patch.pPatchBlockOffset; /* make it relative */
203 rc = patmAddBranchToLookupCache(pVM, (RTRCPTR)pCtxCore->edi, (RTRCPTR)pCtxCore->edx, pRelAddr);
204 if (rc == VINF_SUCCESS)
205 {
206 Log(("Patch block %RRv called as function\n", pRec->patch.pPrivInstrGC));
207 pRec->patch.flags |= PATMFL_CODE_REFERENCED;
208
209 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
210 pCtxCore->eax = pRelAddr;
211 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionFound);
212 return VINF_SUCCESS;
213 }
214 AssertFailed();
215 }
216 else
217 {
218 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
219 pCtxCore->eax = 0; /* make it fault */
220 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
221 return VINF_SUCCESS;
222 }
223 }
224 else
225 {
226 /* Check first before trying to generate a function/trampoline patch. */
227 if (pVM->patm.s.fOutOfMemory)
228 {
229 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
230 pCtxCore->eax = 0; /* make it fault */
231 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
232 return VINF_SUCCESS;
233 }
234 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
235 return VINF_PATM_DUPLICATE_FUNCTION;
236 }
237 }
238
239 case PATM_ACTION_DISPATCH_PENDING_IRQ:
240 /* Parameters:
241 * edi = GC address to jump to
242 */
243 Log(("PATMRC: Dispatch pending interrupt; eip=%x->%x\n", pCtxCore->eip, pCtxCore->edi));
244
245 /* Change EIP to the guest address the patch would normally jump to after setting IF. */
246 pCtxCore->eip = pCtxCore->edi;
247
248 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX|PATM_RESTORE_EDI));
249 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
250
251 pCtxCore->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
252 pCtxCore->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
253 pCtxCore->edi = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEDI;
254
255 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
256
257 /* We are no longer executing PATM code; set PIF again. */
258 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
259
260 STAM_COUNTER_INC(&pVM->patm.s.StatCheckPendingIRQ);
261
262 /* The caller will call trpmGCExitTrap, which will dispatch pending interrupts for us. */
263 return VINF_SUCCESS;
264
265 case PATM_ACTION_PENDING_IRQ_AFTER_IRET:
266 /* Parameters:
267 * edi = GC address to jump to
268 */
269 Log(("PATMRC: Dispatch pending interrupt (iret); eip=%x->%x\n", pCtxCore->eip, pCtxCore->edi));
270 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX|PATM_RESTORE_EDI));
271 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
272
273 /* Change EIP to the guest address of the iret. */
274 pCtxCore->eip = pCtxCore->edi;
275
276 pCtxCore->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
277 pCtxCore->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
278 pCtxCore->edi = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEDI;
279 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
280
281 /* We are no longer executing PATM code; set PIF again. */
282 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
283
284 return VINF_PATM_PENDING_IRQ_AFTER_IRET;
285
286 case PATM_ACTION_DO_V86_IRET:
287 {
288 Log(("PATMRC: Do iret to V86 code; eip=%x\n", pCtxCore->eip));
289 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX));
290 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
291
292 pCtxCore->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
293 pCtxCore->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
294 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
295
296 rc = EMInterpretIretV86ForPatm(pVM, pVCpu, pCtxCore);
297 if (RT_SUCCESS(rc))
298 {
299 STAM_COUNTER_INC(&pVM->patm.s.StatEmulIret);
300
301 /* We are no longer executing PATM code; set PIF again. */
302 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
303 PGMRZDynMapReleaseAutoSet(pVCpu);
304 CPUMGCCallV86Code(pCtxCore);
305 /* does not return */
306 }
307 else
308 STAM_COUNTER_INC(&pVM->patm.s.StatEmulIretFailed);
309 return rc;
310 }
311
312#ifdef DEBUG
313 case PATM_ACTION_LOG_CLI:
314 Log(("PATMRC: CLI at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
315 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
316 return VINF_SUCCESS;
317
318 case PATM_ACTION_LOG_STI:
319 Log(("PATMRC: STI at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
320 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
321 return VINF_SUCCESS;
322
323 case PATM_ACTION_LOG_POPF_IF1:
324 Log(("PATMRC: POPF setting IF at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
325 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
326 return VINF_SUCCESS;
327
328 case PATM_ACTION_LOG_POPF_IF0:
329 Log(("PATMRC: POPF at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
330 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
331 return VINF_SUCCESS;
332
333 case PATM_ACTION_LOG_PUSHF:
334 Log(("PATMRC: PUSHF at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
335 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
336 return VINF_SUCCESS;
337
338 case PATM_ACTION_LOG_IF1:
339 Log(("PATMRC: IF=1 escape from %x\n", pCtxCore->eip));
340 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
341 return VINF_SUCCESS;
342
343 case PATM_ACTION_LOG_IRET:
344 {
345 char *pIretFrame = (char *)pCtxCore->edx;
346 uint32_t eip, selCS, uEFlags;
347
348 rc = MMGCRamRead(pVM, &eip, pIretFrame, 4);
349 rc |= MMGCRamRead(pVM, &selCS, pIretFrame + 4, 4);
350 rc |= MMGCRamRead(pVM, &uEFlags, pIretFrame + 8, 4);
351 if (rc == VINF_SUCCESS)
352 {
353 if ( (uEFlags & X86_EFL_VM)
354 || (selCS & X86_SEL_RPL) == 3)
355 {
356 uint32_t selSS, esp;
357
358 rc |= MMGCRamRead(pVM, &esp, pIretFrame + 12, 4);
359 rc |= MMGCRamRead(pVM, &selSS, pIretFrame + 16, 4);
360
361 if (uEFlags & X86_EFL_VM)
362 {
363 uint32_t selDS, selES, selFS, selGS;
364 rc = MMGCRamRead(pVM, &selES, pIretFrame + 20, 4);
365 rc |= MMGCRamRead(pVM, &selDS, pIretFrame + 24, 4);
366 rc |= MMGCRamRead(pVM, &selFS, pIretFrame + 28, 4);
367 rc |= MMGCRamRead(pVM, &selGS, pIretFrame + 32, 4);
368 if (rc == VINF_SUCCESS)
369 {
370 Log(("PATMRC: IRET->VM stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
371 Log(("PATMRC: IRET->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
372 }
373 }
374 else
375 Log(("PATMRC: IRET stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
376 }
377 else
378 Log(("PATMRC: IRET stack frame: return address %04X:%x eflags=%08x\n", selCS, eip, uEFlags));
379 }
380 Log(("PATMRC: IRET from %x (IF->1) current eflags=%x\n", pCtxCore->eip, pVM->patm.s.CTXSUFF(pGCState)->uVMFlags));
381 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
382 return VINF_SUCCESS;
383 }
384
385 case PATM_ACTION_LOG_GATE_ENTRY:
386 {
387 char *pIretFrame = (char *)pCtxCore->edx;
388 uint32_t eip, selCS, uEFlags;
389
390 rc = MMGCRamRead(pVM, &eip, pIretFrame, 4);
391 rc |= MMGCRamRead(pVM, &selCS, pIretFrame + 4, 4);
392 rc |= MMGCRamRead(pVM, &uEFlags, pIretFrame + 8, 4);
393 if (rc == VINF_SUCCESS)
394 {
395 if ( (uEFlags & X86_EFL_VM)
396 || (selCS & X86_SEL_RPL) == 3)
397 {
398 uint32_t selSS, esp;
399
400 rc |= MMGCRamRead(pVM, &esp, pIretFrame + 12, 4);
401 rc |= MMGCRamRead(pVM, &selSS, pIretFrame + 16, 4);
402
403 if (uEFlags & X86_EFL_VM)
404 {
405 uint32_t selDS, selES, selFS, selGS;
406 rc = MMGCRamRead(pVM, &selES, pIretFrame + 20, 4);
407 rc |= MMGCRamRead(pVM, &selDS, pIretFrame + 24, 4);
408 rc |= MMGCRamRead(pVM, &selFS, pIretFrame + 28, 4);
409 rc |= MMGCRamRead(pVM, &selGS, pIretFrame + 32, 4);
410 if (rc == VINF_SUCCESS)
411 {
412 Log(("PATMRC: GATE->VM stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
413 Log(("PATMRC: GATE->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
414 }
415 }
416 else
417 Log(("PATMRC: GATE stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
418 }
419 else
420 Log(("PATMRC: GATE stack frame: return address %04X:%x eflags=%08x\n", selCS, eip, uEFlags));
421 }
422 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
423 return VINF_SUCCESS;
424 }
425
426 case PATM_ACTION_LOG_RET:
427 Log(("PATMRC: RET from %x to %x ESP=%x iopl=%d\n", pCtxCore->eip, pCtxCore->edx, pCtxCore->ebx, X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
428 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
429 return VINF_SUCCESS;
430
431 case PATM_ACTION_LOG_CALL:
432 Log(("PATMRC: CALL to %RRv return addr %RRv ESP=%x iopl=%d\n", pVM->patm.s.CTXSUFF(pGCState)->GCCallPatchTargetAddr, pVM->patm.s.CTXSUFF(pGCState)->GCCallReturnAddr, pCtxCore->edx, X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
433 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
434 return VINF_SUCCESS;
435#endif
436 default:
437 AssertFailed();
438 break;
439 }
440 }
441 else
442 AssertFailed();
443 CTXSUFF(pVM->patm.s.pGCState)->uPendingAction = 0;
444 }
445 AssertMsgFailed(("Unexpected OP_ILLUD2 in patch code at %x (pending action %x)!!!!\n", pCtxCore->eip, CTXSUFF(pVM->patm.s.pGCState)->uPendingAction));
446 return VINF_EM_RAW_EMULATE_INSTR;
447}
448
449/**
450 * Checks if the int 3 was caused by a patched instruction
451 *
452 * @returns Strict VBox status, includes all statuses that
453 * EMInterpretInstructionDisasState and
454 * @retval VINF_SUCCESS
455 * @retval VINF_PATM_PATCH_INT3
456 * @retval VINF_EM_RAW_EMULATE_INSTR
457 *
458 * @param pVM The cross context VM structure.
459 * @param pCtxCore The relevant core context.
460 */
461VMMRC_INT_DECL(int) PATMRCHandleInt3PatchTrap(PVM pVM, PCPUMCTXCORE pCtxCore)
462{
463 PPATMPATCHREC pRec;
464 int rc;
465
466 AssertReturn(!pCtxCore->eflags.Bits.u1VM
467 && ( (pCtxCore->ss.Sel & X86_SEL_RPL) == 1
468 || (EMIsRawRing1Enabled(pVM) && (pCtxCore->ss.Sel & X86_SEL_RPL) == 2)), VERR_ACCESS_DENIED);
469
470 /* Int 3 in PATM generated code? (most common case) */
471 if (PATMIsPatchGCAddr(pVM, pCtxCore->eip))
472 {
473 /* Note! Hardcoded assumption about it being a single byte int 3 instruction. */
474 pCtxCore->eip--;
475 return VINF_PATM_PATCH_INT3;
476 }
477
478 /** @todo could use simple caching here to speed things up. */
479 pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)(pCtxCore->eip - 1)); /* eip is pointing to the instruction *after* 'int 3' already */
480 if (pRec && pRec->patch.uState == PATCH_ENABLED)
481 {
482 if (pRec->patch.flags & PATMFL_INT3_REPLACEMENT_BLOCK)
483 {
484 Assert(pRec->patch.opcode == OP_CLI);
485 /* This is a special cli block that was turned into an int 3 patch. We jump to the generated code manually. */
486 pCtxCore->eip = (uint32_t)PATCHCODE_PTR_GC(&pRec->patch);
487 STAM_COUNTER_INC(&pVM->patm.s.StatInt3BlockRun);
488 return VINF_SUCCESS;
489 }
490 if (pRec->patch.flags & PATMFL_INT3_REPLACEMENT)
491 {
492 /* eip is pointing to the instruction *after* 'int 3' already */
493 pCtxCore->eip = pCtxCore->eip - 1;
494
495 PATM_STAT_RUN_INC(&pRec->patch);
496
497 Log(("PATMHandleInt3PatchTrap found int3 for %s at %x\n", patmGetInstructionString(pRec->patch.opcode, 0), pCtxCore->eip));
498
499 switch(pRec->patch.opcode)
500 {
501 case OP_CPUID:
502 case OP_IRET:
503#ifdef VBOX_WITH_RAW_RING1
504 case OP_SMSW:
505 case OP_MOV: /* mov xx, CS */
506#endif
507 break;
508
509 case OP_STR:
510 case OP_SGDT:
511 case OP_SLDT:
512 case OP_SIDT:
513 case OP_LSL:
514 case OP_LAR:
515#ifndef VBOX_WITH_RAW_RING1
516 case OP_SMSW:
517#endif
518 case OP_VERW:
519 case OP_VERR:
520 default:
521 PATM_STAT_FAULT_INC(&pRec->patch);
522 pRec->patch.cTraps++;
523 return VINF_EM_RAW_EMULATE_INSTR;
524 }
525
526 PVMCPU pVCpu = VMMGetCpu0(pVM);
527 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
528 if (enmCpuMode != DISCPUMODE_32BIT)
529 {
530 AssertFailed();
531 return VINF_EM_RAW_EMULATE_INSTR;
532 }
533
534#ifdef VBOX_WITH_IEM
535 VBOXSTRICTRC rcStrict;
536 rcStrict = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pCtxCore, pCtxCore->rip,
537 pRec->patch.aPrivInstr, pRec->patch.cbPrivInstr);
538 rc = VBOXSTRICTRC_TODO(rcStrict);
539#else
540 uint32_t cbOp;
541 DISCPUSTATE cpu;
542 rc = DISInstr(&pRec->patch.aPrivInstr[0], enmCpuMode, &cpu, &cbOp);
543 if (RT_FAILURE(rc))
544 {
545 Log(("DISCoreOne failed with %Rrc\n", rc));
546 PATM_STAT_FAULT_INC(&pRec->patch);
547 pRec->patch.cTraps++;
548 return VINF_EM_RAW_EMULATE_INSTR;
549 }
550
551 rc = VBOXSTRICTRC_TODO(EMInterpretInstructionDisasState(pVCpu, &cpu, pCtxCore, 0 /* not relevant here */,
552 EMCODETYPE_SUPERVISOR));
553#endif
554 if (RT_FAILURE(rc))
555 {
556 Log(("EMInterpretInstructionCPU failed with %Rrc\n", rc));
557 PATM_STAT_FAULT_INC(&pRec->patch);
558 pRec->patch.cTraps++;
559 return VINF_EM_RAW_EMULATE_INSTR;
560 }
561 return rc;
562 }
563 }
564 return VERR_PATCH_NOT_FOUND;
565}
566
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette