VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/PATMRC.cpp@ 72372

Last change on this file since 72372 was 71720, checked in by vboxsync, 7 years ago

PATM: Don't try pass stuff to patmRCVirtPagePfHandle that's not needed, as it seems the page address (pPage) isn't a hypervisor address, but guest one.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 23.9 KB
Line 
1/* $Id: PATMRC.cpp 71720 2018-04-06 18:51:44Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager - Raw-mode Context.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PATM
23#include <VBox/vmm/patm.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/iem.h>
30#include <VBox/vmm/selm.h>
31#include <VBox/vmm/mm.h>
32#include "PATMInternal.h"
33#include "PATMA.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/dbg.h>
36#include <VBox/dis.h>
37#include <VBox/disopcode.h>
38#include <VBox/err.h>
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43
44
45/**
46 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
47 * PATM all access handler callback.}
48 *
49 * @remarks pvUser is NULL.
50 */
51DECLEXPORT(VBOXSTRICTRC) patmRCVirtPagePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore,
52 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
53{
54 NOREF(pVCpu); NOREF(uErrorCode); NOREF(pCtxCore); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); RT_NOREF_PV(pvUser);
55
56 pVM->patm.s.pvFaultMonitor = (RTRCPTR)(pvRange + offRange);
57 return VINF_PATM_CHECK_PATCH_PAGE;
58}
59
60
61/**
62 * Checks if the write is located on a page with was patched before.
63 * (if so, then we are not allowed to turn on r/w)
64 *
65 * @returns Strict VBox status code.
66 * @retval VINF_SUCCESS if access interpreted (@a pCtxCore != NULL).
67 * @retval VINF_PGM_HANDLER_DO_DEFAULT (@a pCtxCore == NULL).
68 * @retval VINF_EM_RAW_EMULATE_INSTR on needing to go to ring-3 to do this.
69 * @retval VERR_PATCH_NOT_FOUND if no patch was found.
70 *
71 * @param pVM The cross context VM structure.
72 * @param pCtxCore CPU context if \#PF, NULL if other write..
73 * @param GCPtr GC pointer to write address.
74 * @param cbWrite Number of bytes to write.
75 *
76 */
77VMMRC_INT_DECL(VBOXSTRICTRC) PATMRCHandleWriteToPatchPage(PVM pVM, PCPUMCTXCORE pCtxCore, RTRCPTR GCPtr, uint32_t cbWrite)
78{
79 Assert(cbWrite > 0);
80
81 /* Quick boundary check */
82 if ( PAGE_ADDRESS(GCPtr) < PAGE_ADDRESS(pVM->patm.s.pPatchedInstrGCLowest)
83 || PAGE_ADDRESS(GCPtr) > PAGE_ADDRESS(pVM->patm.s.pPatchedInstrGCHighest))
84 return VERR_PATCH_NOT_FOUND;
85
86 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWriteDetect, a);
87
88 /*
89 * Lookup the patch page record for the write.
90 */
91 RTRCUINTPTR pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
92 RTRCUINTPTR pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
93
94 PPATMPATCHPAGE pPatchPage;
95 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.CTXSUFF(PatchLookupTree)->PatchTreeByPage, pWritePageStart);
96 if ( !pPatchPage
97 && pWritePageStart != pWritePageEnd)
98 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.CTXSUFF(PatchLookupTree)->PatchTreeByPage, pWritePageEnd);
99 if (pPatchPage)
100 {
101 Log(("PATMGCHandleWriteToPatchPage: Found page %RRv for write to %RRv %d bytes (page low:high %RRv:%RRv\n",
102 pPatchPage->Core.Key, GCPtr, cbWrite, pPatchPage->pLowestAddrGC, pPatchPage->pHighestAddrGC));
103 if ( (RTRCUINTPTR)pPatchPage->pLowestAddrGC > (RTRCUINTPTR)GCPtr + cbWrite - 1U
104 || (RTRCUINTPTR)pPatchPage->pHighestAddrGC < (RTRCUINTPTR)GCPtr)
105 {
106 /* This part of the page was not patched; try to emulate the instruction / tell the caller to do so. */
107 if (!pCtxCore)
108 {
109 LogFlow(("PATMHandleWriteToPatchPage: Allow writing %RRv LB %#x\n", GCPtr, cbWrite));
110 STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpreted);
111 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
112 return VINF_PGM_HANDLER_DO_DEFAULT;
113 }
114 LogFlow(("PATMHandleWriteToPatchPage: Interpret %#x accessing %RRv\n", pCtxCore->eip, GCPtr));
115 int rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(VMMGetCpu0(pVM), pCtxCore, (RTGCPTR)(RTRCUINTPTR)GCPtr));
116 if (rc == VINF_SUCCESS)
117 {
118 STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpreted);
119 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
120 return VINF_SUCCESS;
121 }
122 STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpretedFailed);
123 }
124 R3PTRTYPE(PPATCHINFO) *paPatch = (R3PTRTYPE(PPATCHINFO) *)MMHyperR3ToRC(pVM, pPatchPage->papPatch);
125
126 /* Increase the invalid write counter for each patch that's registered for that page. */
127 for (uint32_t i=0;i<pPatchPage->cCount;i++)
128 {
129 PPATCHINFO pPatch = (PPATCHINFO)MMHyperR3ToRC(pVM, paPatch[i]);
130
131 pPatch->cInvalidWrites++;
132 }
133
134 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
135 return VINF_EM_RAW_EMULATE_INSTR;
136 }
137
138 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
139 return VERR_PATCH_NOT_FOUND;
140}
141
142
143/**
144 * Checks if the illegal instruction was caused by a patched instruction
145 *
146 * @returns VBox status
147 *
148 * @param pVM The cross context VM structure.
149 * @param pCtxCore The relevant core context.
150 */
151VMMRC_INT_DECL(int) PATMRCHandleIllegalInstrTrap(PVM pVM, PCPUMCTXCORE pCtxCore)
152{
153 PPATMPATCHREC pRec;
154 PVMCPU pVCpu = VMMGetCpu0(pVM);
155 int rc;
156
157 /* Very important check -> otherwise we have a security leak. */
158 AssertReturn(!pCtxCore->eflags.Bits.u1VM && (pCtxCore->ss.Sel & X86_SEL_RPL) <= (EMIsRawRing1Enabled(pVM) ? 2U : 1U),
159 VERR_ACCESS_DENIED);
160 Assert(PATMIsPatchGCAddr(pVM, pCtxCore->eip));
161
162 /* OP_ILLUD2 in PATM generated code? */
163 if (CTXSUFF(pVM->patm.s.pGCState)->uPendingAction)
164 {
165 LogFlow(("PATMRC: Pending action %x at %x\n", CTXSUFF(pVM->patm.s.pGCState)->uPendingAction, pCtxCore->eip));
166
167 /* Private PATM interface (@todo hack due to lack of anything generic). */
168 /* Parameters:
169 * eax = Pending action (currently PATM_ACTION_LOOKUP_ADDRESS)
170 * ecx = PATM_ACTION_MAGIC
171 */
172 if ( (pCtxCore->eax & CTXSUFF(pVM->patm.s.pGCState)->uPendingAction)
173 && pCtxCore->ecx == PATM_ACTION_MAGIC
174 )
175 {
176 CTXSUFF(pVM->patm.s.pGCState)->uPendingAction = 0;
177
178 switch (pCtxCore->eax)
179 {
180 case PATM_ACTION_LOOKUP_ADDRESS:
181 {
182 /* Parameters:
183 * edx = GC address to find
184 * edi = PATCHJUMPTABLE ptr
185 */
186 AssertMsg(!pCtxCore->edi || PATMIsPatchGCAddr(pVM, pCtxCore->edi), ("edi = %x\n", pCtxCore->edi));
187
188 Log(("PATMRC: lookup %x jump table=%x\n", pCtxCore->edx, pCtxCore->edi));
189
190 pRec = patmQueryFunctionPatch(pVM, (RTRCPTR)pCtxCore->edx);
191 if (pRec)
192 {
193 if (pRec->patch.uState == PATCH_ENABLED)
194 {
195 RTGCUINTPTR pRelAddr = pRec->patch.pPatchBlockOffset; /* make it relative */
196 rc = patmAddBranchToLookupCache(pVM, (RTRCPTR)pCtxCore->edi, (RTRCPTR)pCtxCore->edx, pRelAddr);
197 if (rc == VINF_SUCCESS)
198 {
199 Log(("Patch block %RRv called as function\n", pRec->patch.pPrivInstrGC));
200 pRec->patch.flags |= PATMFL_CODE_REFERENCED;
201
202 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
203 pCtxCore->eax = pRelAddr;
204 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionFound);
205 return VINF_SUCCESS;
206 }
207 AssertFailed();
208 return rc;
209 }
210 else
211 {
212 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
213 pCtxCore->eax = 0; /* make it fault */
214 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
215 return VINF_SUCCESS;
216 }
217 }
218 else
219 {
220 /* Check first before trying to generate a function/trampoline patch. */
221 if (pVM->patm.s.fOutOfMemory)
222 {
223 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
224 pCtxCore->eax = 0; /* make it fault */
225 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
226 return VINF_SUCCESS;
227 }
228 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
229 return VINF_PATM_DUPLICATE_FUNCTION;
230 }
231 }
232
233 case PATM_ACTION_DISPATCH_PENDING_IRQ:
234 /* Parameters:
235 * edi = GC address to jump to
236 */
237 Log(("PATMRC: Dispatch pending interrupt; eip=%x->%x\n", pCtxCore->eip, pCtxCore->edi));
238
239 /* Change EIP to the guest address the patch would normally jump to after setting IF. */
240 pCtxCore->eip = pCtxCore->edi;
241
242 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX|PATM_RESTORE_EDI));
243 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
244
245 pCtxCore->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
246 pCtxCore->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
247 pCtxCore->edi = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEDI;
248
249 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
250
251 /* We are no longer executing PATM code; set PIF again. */
252 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
253
254 STAM_COUNTER_INC(&pVM->patm.s.StatCheckPendingIRQ);
255
256 /* The caller will call trpmGCExitTrap, which will dispatch pending interrupts for us. */
257 return VINF_SUCCESS;
258
259 case PATM_ACTION_PENDING_IRQ_AFTER_IRET:
260 /* Parameters:
261 * edi = GC address to jump to
262 */
263 Log(("PATMRC: Dispatch pending interrupt (iret); eip=%x->%x\n", pCtxCore->eip, pCtxCore->edi));
264 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX|PATM_RESTORE_EDI));
265 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
266
267 /* Change EIP to the guest address of the iret. */
268 pCtxCore->eip = pCtxCore->edi;
269
270 pCtxCore->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
271 pCtxCore->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
272 pCtxCore->edi = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEDI;
273 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
274
275 /* We are no longer executing PATM code; set PIF again. */
276 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
277
278 return VINF_PATM_PENDING_IRQ_AFTER_IRET;
279
280 case PATM_ACTION_DO_V86_IRET:
281 {
282 Log(("PATMRC: Do iret to V86 code; eip=%x\n", pCtxCore->eip));
283 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX));
284 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
285
286 pCtxCore->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
287 pCtxCore->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
288 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
289
290 rc = EMInterpretIretV86ForPatm(pVM, pVCpu, pCtxCore);
291 if (RT_SUCCESS(rc))
292 {
293 STAM_COUNTER_INC(&pVM->patm.s.StatEmulIret);
294
295 /* We are no longer executing PATM code; set PIF again. */
296 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
297 PGMRZDynMapReleaseAutoSet(pVCpu);
298 CPUMGCCallV86Code(pCtxCore);
299 /* does not return */
300 }
301 else
302 STAM_COUNTER_INC(&pVM->patm.s.StatEmulIretFailed);
303 return rc;
304 }
305
306#ifdef DEBUG
307 case PATM_ACTION_LOG_CLI:
308 Log(("PATMRC: CLI at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
309 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
310 return VINF_SUCCESS;
311
312 case PATM_ACTION_LOG_STI:
313 Log(("PATMRC: STI at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
314 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
315 return VINF_SUCCESS;
316
317 case PATM_ACTION_LOG_POPF_IF1:
318 Log(("PATMRC: POPF setting IF at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
319 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
320 return VINF_SUCCESS;
321
322 case PATM_ACTION_LOG_POPF_IF0:
323 Log(("PATMRC: POPF at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
324 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
325 return VINF_SUCCESS;
326
327 case PATM_ACTION_LOG_PUSHF:
328 Log(("PATMRC: PUSHF at %x (current IF=%d iopl=%d)\n", pCtxCore->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
329 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
330 return VINF_SUCCESS;
331
332 case PATM_ACTION_LOG_IF1:
333 Log(("PATMRC: IF=1 escape from %x\n", pCtxCore->eip));
334 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
335 return VINF_SUCCESS;
336
337 case PATM_ACTION_LOG_IRET:
338 {
339 char *pIretFrame = (char *)pCtxCore->edx;
340 uint32_t eip, selCS, uEFlags;
341
342 rc = MMGCRamRead(pVM, &eip, pIretFrame, 4);
343 rc |= MMGCRamRead(pVM, &selCS, pIretFrame + 4, 4);
344 rc |= MMGCRamRead(pVM, &uEFlags, pIretFrame + 8, 4);
345 if (rc == VINF_SUCCESS)
346 {
347 if ( (uEFlags & X86_EFL_VM)
348 || (selCS & X86_SEL_RPL) == 3)
349 {
350 uint32_t selSS, esp;
351
352 rc |= MMGCRamRead(pVM, &esp, pIretFrame + 12, 4);
353 rc |= MMGCRamRead(pVM, &selSS, pIretFrame + 16, 4);
354
355 if (uEFlags & X86_EFL_VM)
356 {
357 uint32_t selDS, selES, selFS, selGS;
358 rc = MMGCRamRead(pVM, &selES, pIretFrame + 20, 4);
359 rc |= MMGCRamRead(pVM, &selDS, pIretFrame + 24, 4);
360 rc |= MMGCRamRead(pVM, &selFS, pIretFrame + 28, 4);
361 rc |= MMGCRamRead(pVM, &selGS, pIretFrame + 32, 4);
362 if (rc == VINF_SUCCESS)
363 {
364 Log(("PATMRC: IRET->VM stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
365 Log(("PATMRC: IRET->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
366 }
367 }
368 else
369 Log(("PATMRC: IRET stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
370 }
371 else
372 Log(("PATMRC: IRET stack frame: return address %04X:%x eflags=%08x\n", selCS, eip, uEFlags));
373 }
374 Log(("PATMRC: IRET from %x (IF->1) current eflags=%x\n", pCtxCore->eip, pVM->patm.s.CTXSUFF(pGCState)->uVMFlags));
375 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
376 return VINF_SUCCESS;
377 }
378
379 case PATM_ACTION_LOG_GATE_ENTRY:
380 {
381 char *pIretFrame = (char *)pCtxCore->edx;
382 uint32_t eip, selCS, uEFlags;
383
384 rc = MMGCRamRead(pVM, &eip, pIretFrame, 4);
385 rc |= MMGCRamRead(pVM, &selCS, pIretFrame + 4, 4);
386 rc |= MMGCRamRead(pVM, &uEFlags, pIretFrame + 8, 4);
387 if (rc == VINF_SUCCESS)
388 {
389 if ( (uEFlags & X86_EFL_VM)
390 || (selCS & X86_SEL_RPL) == 3)
391 {
392 uint32_t selSS, esp;
393
394 rc |= MMGCRamRead(pVM, &esp, pIretFrame + 12, 4);
395 rc |= MMGCRamRead(pVM, &selSS, pIretFrame + 16, 4);
396
397 if (uEFlags & X86_EFL_VM)
398 {
399 uint32_t selDS, selES, selFS, selGS;
400 rc = MMGCRamRead(pVM, &selES, pIretFrame + 20, 4);
401 rc |= MMGCRamRead(pVM, &selDS, pIretFrame + 24, 4);
402 rc |= MMGCRamRead(pVM, &selFS, pIretFrame + 28, 4);
403 rc |= MMGCRamRead(pVM, &selGS, pIretFrame + 32, 4);
404 if (rc == VINF_SUCCESS)
405 {
406 Log(("PATMRC: GATE->VM stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
407 Log(("PATMRC: GATE->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
408 }
409 }
410 else
411 Log(("PATMRC: GATE stack frame: return address %04X:%x eflags=%08x ss:esp=%04X:%x\n", selCS, eip, uEFlags, selSS, esp));
412 }
413 else
414 Log(("PATMRC: GATE stack frame: return address %04X:%x eflags=%08x\n", selCS, eip, uEFlags));
415 }
416 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
417 return VINF_SUCCESS;
418 }
419
420 case PATM_ACTION_LOG_RET:
421 Log(("PATMRC: RET from %x to %x ESP=%x iopl=%d\n", pCtxCore->eip, pCtxCore->edx, pCtxCore->ebx, X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
422 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
423 return VINF_SUCCESS;
424
425 case PATM_ACTION_LOG_CALL:
426 Log(("PATMRC: CALL to %RRv return addr %RRv ESP=%x iopl=%d\n", pVM->patm.s.CTXSUFF(pGCState)->GCCallPatchTargetAddr, pVM->patm.s.CTXSUFF(pGCState)->GCCallReturnAddr, pCtxCore->edx, X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
427 pCtxCore->eip += PATM_ILLEGAL_INSTR_SIZE;
428 return VINF_SUCCESS;
429#endif
430 default:
431 AssertFailed();
432 break;
433 }
434 }
435 else
436 AssertFailed();
437 CTXSUFF(pVM->patm.s.pGCState)->uPendingAction = 0;
438 }
439 AssertMsgFailed(("Unexpected OP_ILLUD2 in patch code at %x (pending action %x)!!!!\n", pCtxCore->eip, CTXSUFF(pVM->patm.s.pGCState)->uPendingAction));
440 return VINF_EM_RAW_EMULATE_INSTR;
441}
442
443/**
444 * Checks if the int 3 was caused by a patched instruction
445 *
446 * @returns Strict VBox status, includes all statuses that
447 * EMInterpretInstructionDisasState and
448 * @retval VINF_SUCCESS
449 * @retval VINF_PATM_PATCH_INT3
450 * @retval VINF_EM_RAW_EMULATE_INSTR
451 *
452 * @param pVM The cross context VM structure.
453 * @param pCtxCore The relevant core context.
454 */
455VMMRC_INT_DECL(int) PATMRCHandleInt3PatchTrap(PVM pVM, PCPUMCTXCORE pCtxCore)
456{
457 PPATMPATCHREC pRec;
458
459 AssertReturn(!pCtxCore->eflags.Bits.u1VM
460 && ( (pCtxCore->ss.Sel & X86_SEL_RPL) == 1
461 || (EMIsRawRing1Enabled(pVM) && (pCtxCore->ss.Sel & X86_SEL_RPL) == 2)), VERR_ACCESS_DENIED);
462
463 /* Int 3 in PATM generated code? (most common case) */
464 if (PATMIsPatchGCAddr(pVM, pCtxCore->eip))
465 {
466 /* Note! Hardcoded assumption about it being a single byte int 3 instruction. */
467 pCtxCore->eip--;
468 return VINF_PATM_PATCH_INT3;
469 }
470
471 /** @todo could use simple caching here to speed things up. */
472 pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)(pCtxCore->eip - 1)); /* eip is pointing to the instruction *after* 'int 3' already */
473 if (pRec && pRec->patch.uState == PATCH_ENABLED)
474 {
475 if (pRec->patch.flags & PATMFL_INT3_REPLACEMENT_BLOCK)
476 {
477 Assert(pRec->patch.opcode == OP_CLI);
478 /* This is a special cli block that was turned into an int 3 patch. We jump to the generated code manually. */
479 pCtxCore->eip = (uint32_t)PATCHCODE_PTR_GC(&pRec->patch);
480 STAM_COUNTER_INC(&pVM->patm.s.StatInt3BlockRun);
481 return VINF_SUCCESS;
482 }
483 if (pRec->patch.flags & PATMFL_INT3_REPLACEMENT)
484 {
485 /* eip is pointing to the instruction *after* 'int 3' already */
486 pCtxCore->eip = pCtxCore->eip - 1;
487
488 PATM_STAT_RUN_INC(&pRec->patch);
489
490 Log(("PATMHandleInt3PatchTrap found int3 for %s at %x\n", patmGetInstructionString(pRec->patch.opcode, 0), pCtxCore->eip));
491
492 switch(pRec->patch.opcode)
493 {
494 case OP_CPUID:
495 case OP_IRET:
496#ifdef VBOX_WITH_RAW_RING1
497 case OP_SMSW:
498 case OP_MOV: /* mov xx, CS */
499#endif
500 break;
501
502 case OP_STR:
503 case OP_SGDT:
504 case OP_SLDT:
505 case OP_SIDT:
506 case OP_LSL:
507 case OP_LAR:
508#ifndef VBOX_WITH_RAW_RING1
509 case OP_SMSW:
510#endif
511 case OP_VERW:
512 case OP_VERR:
513 default:
514 PATM_STAT_FAULT_INC(&pRec->patch);
515 pRec->patch.cTraps++;
516 return VINF_EM_RAW_EMULATE_INSTR;
517 }
518
519 PVMCPU pVCpu = VMMGetCpu0(pVM);
520 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
521 if (enmCpuMode != DISCPUMODE_32BIT)
522 {
523 AssertFailed();
524 return VINF_EM_RAW_EMULATE_INSTR;
525 }
526
527 VBOXSTRICTRC rcStrict;
528 rcStrict = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pCtxCore, pCtxCore->rip,
529 pRec->patch.aPrivInstr, pRec->patch.cbPrivInstr);
530 if (RT_SUCCESS(rcStrict))
531 {
532 if (rcStrict != VINF_SUCCESS)
533 Log(("PATMRCHandleInt3PatchTrap: returns %Rrc\n", VBOXSTRICTRC_TODO(rcStrict)));
534 return VBOXSTRICTRC_TODO(rcStrict);
535 }
536
537 Log(("IEMExecOneBypassWithPrefetchedByPC failed with %Rrc\n", VBOXSTRICTRC_TODO(rcStrict)));
538 PATM_STAT_FAULT_INC(&pRec->patch);
539 pRec->patch.cTraps++;
540 return VINF_EM_RAW_EMULATE_INSTR;
541 }
542 }
543 return VERR_PATCH_NOT_FOUND;
544}
545
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette