VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/VMMAll/PATMAll.cpp@ 1104

Last change on this file since 1104 was 1104, checked in by vboxsync, 18 years ago

Correction for same privilege level returns.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 34.2 KB
Line 
1/* $Id: PATMAll.cpp 1104 2007-02-28 13:05:24Z vboxsync $ */
2/** @file
3 * PATM - The Patch Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PATM
26#include <VBox/patm.h>
27#include <VBox/cpum.h>
28#include <VBox/dis.h>
29#include <VBox/disopcode.h>
30#include <VBox/em.h>
31#include <VBox/err.h>
32#include <VBox/selm.h>
33#include <VBox/mm.h>
34#include "PATMInternal.h"
35#include <VBox/vm.h>
36#include "PATMA.h"
37
38#include <VBox/log.h>
39#include <iprt/assert.h>
40
41
42/**
43 * Load virtualized flags.
44 *
45 * This function is called from CPUMRawEnter(). It doesn't have to update the
46 * IF and IOPL eflags bits, the caller will enforce those to set and 0 repectively.
47 *
48 * @param pVM VM handle.
49 * @param pCtxCore The cpu context core.
50 * @see pg_raw
51 */
52PATMDECL(void) PATMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
53{
54 bool fPatchCode = PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtxCore->eip);
55
56 /*
57 * Currently we don't bother to check whether PATM is enabled or not.
58 * For all cases where it isn't, IOPL will be safe and IF will be set.
59 */
60 register uint32_t efl = pCtxCore->eflags.u32;
61 CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
62 AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTGCPTR)pCtxCore->eip), ("X86_EFL_IF is clear and PATM is disabled! (eip=%VGv eflags=%08x fPATM=%d pPATMGC=%VGv-%VGv\n", pCtxCore->eip, pCtxCore->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC, pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem));
63
64 AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode, ("fPIF=%d eip=%VGv\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip));
65
66 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
67 efl |= X86_EFL_IF;
68 pCtxCore->eflags.u32 = efl;
69
70#ifdef IN_RING3
71#ifdef PATM_EMULATE_SYSENTER
72 PCPUMCTX pCtx;
73 int rc;
74
75 /* Check if the sysenter handler has changed. */
76 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
77 AssertRC(rc);
78 if ( rc == VINF_SUCCESS
79 && pCtx->SysEnter.cs != 0
80 && pCtx->SysEnter.eip != 0
81 )
82 {
83 if (pVM->patm.s.pfnSysEnterGC != (RTGCPTR)pCtx->SysEnter.eip)
84 {
85 pVM->patm.s.pfnSysEnterPatchGC = 0;
86 pVM->patm.s.pfnSysEnterGC = 0;
87
88 Log2(("PATMRawEnter: installing sysenter patch for %VGv\n", pCtx->SysEnter.eip));
89 pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
90 if (pVM->patm.s.pfnSysEnterPatchGC == 0)
91 {
92 rc = PATMR3InstallPatch(pVM, pCtx->SysEnter.eip, PATMFL_SYSENTER | PATMFL_CODE32);
93 if (rc == VINF_SUCCESS)
94 {
95 pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
96 pVM->patm.s.pfnSysEnterGC = (RTGCPTR)pCtx->SysEnter.eip;
97 Assert(pVM->patm.s.pfnSysEnterPatchGC);
98 }
99 }
100 else
101 pVM->patm.s.pfnSysEnterGC = (RTGCPTR)pCtx->SysEnter.eip;
102 }
103 }
104 else
105 {
106 pVM->patm.s.pfnSysEnterPatchGC = 0;
107 pVM->patm.s.pfnSysEnterGC = 0;
108 }
109#endif
110#endif
111}
112
113
114/**
115 * Restores virtualized flags.
116 *
117 * This function is called from CPUMRawLeave(). It will update the eflags register.
118 *
119 ** @note Only here we are allowed to switch back to guest code (without a special reason such as a trap in patch code)!!
120 *
121 * @param pVM VM handle.
122 * @param pCtxCore The cpu context core.
123 * @param rawRC Raw mode return code
124 * @see @ref pg_raw
125 */
126PATMDECL(void) PATMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rawRC)
127{
128 bool fPatchCode = PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtxCore->eip);
129 /*
130 * We will only be called if PATMRawEnter was previously called.
131 */
132 register uint32_t efl = pCtxCore->eflags.u32;
133 efl = (efl & ~PATM_VIRTUAL_FLAGS_MASK) | (CTXSUFF(pVM->patm.s.pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK);
134 pCtxCore->eflags.u32 = efl;
135 CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = X86_EFL_IF;
136
137 AssertReleaseMsg((efl & X86_EFL_IF) || fPatchCode || rawRC == VINF_PATM_PENDING_IRQ_AFTER_IRET, ("Inconsistent state at %VGv\n", pCtxCore->eip));
138 AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode, ("fPIF=%d eip=%VGv\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip));
139
140#ifdef IN_RING3
141 if ( (efl & X86_EFL_IF)
142 && fPatchCode
143 )
144 {
145 if ( rawRC < VINF_PATM_LEAVEGC_FIRST
146 || rawRC > VINF_PATM_LEAVEGC_LAST)
147 {
148 /*
149 * Golden rules:
150 * - Don't interrupt special patch streams that replace special instructions
151 * - Don't break instruction fusing (sti, pop ss, mov ss)
152 * - Don't go back to an instruction that has been overwritten by a patch jump
153 * - Don't interrupt an idt handler on entry (1st instruction); technically incorrect
154 *
155 */
156 if (CTXSUFF(pVM->patm.s.pGCState)->fPIF == 1) /* consistent patch instruction state */
157 {
158 PATMTRANSSTATE enmState;
159 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtxCore->eip, &enmState);
160
161 AssertRelease(pOrgInstrGC);
162
163 Assert(enmState != PATMTRANS_OVERWRITTEN);
164 if (enmState == PATMTRANS_SAFE)
165 {
166 Assert(!PATMFindActivePatchByEntrypoint(pVM, pOrgInstrGC));
167 Log(("Switchback from %VGv to %VGv (Psp=%x)\n", pCtxCore->eip, pOrgInstrGC, CTXSUFF(pVM->patm.s.pGCState)->Psp));
168 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBack);
169 pCtxCore->eip = pOrgInstrGC;
170 fPatchCode = false; /* to reset the stack ptr */
171
172 CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0; /* reset this pointer; safe otherwise the state would be PATMTRANS_INHIBITIRQ */
173 }
174 else
175 {
176 LogFlow(("Patch address %VGv can't be interrupted (state=%d)!\n", pCtxCore->eip, enmState));
177 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
178 }
179 }
180 else
181 {
182 LogFlow(("Patch address %VGv can't be interrupted (fPIF=%d)!\n", pCtxCore->eip, CTXSUFF(pVM->patm.s.pGCState)->fPIF));
183 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
184 }
185 }
186 }
187#else /* !IN_RING3 */
188 AssertMsgFailed(("!IN_RING3"));
189#endif /* !IN_RING3 */
190
191 if (!fPatchCode)
192 {
193 if (CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts == (RTGCPTR)pCtxCore->eip)
194 {
195 EMSetInhibitInterruptsPC(pVM, pCtxCore->eip);
196 }
197 CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0;
198
199 /* Reset the stack pointer to the top of the stack. */
200#ifdef DEBUG
201 if (CTXSUFF(pVM->patm.s.pGCState)->Psp != PATM_STACK_SIZE)
202 {
203 LogFlow(("PATMRawLeave: Reset PATM stack (Psp = %x)\n", CTXSUFF(pVM->patm.s.pGCState)->Psp));
204 }
205#endif
206 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
207 }
208}
209
210/**
211 * Get the EFLAGS.
212 * This is a worker for CPUMRawGetEFlags().
213 *
214 * @returns The eflags.
215 * @param pVM The VM handle.
216 * @param pCtxCore The context core.
217 */
218PATMDECL(uint32_t) PATMRawGetEFlags(PVM pVM, PCCPUMCTXCORE pCtxCore)
219{
220 uint32_t efl = pCtxCore->eflags.u32;
221 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
222 efl |= pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK;
223 return efl;
224}
225
226/**
227 * Updates the EFLAGS.
228 * This is a worker for CPUMRawSetEFlags().
229 *
230 * @param pVM The VM handle.
231 * @param pCtxCore The context core.
232 * @param efl The new EFLAGS value.
233 */
234PATMDECL(void) PATMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t efl)
235{
236 pVM->patm.s.CTXSUFF(pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
237 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
238 efl |= X86_EFL_IF;
239 pCtxCore->eflags.u32 = efl;
240}
241
242/**
243 * Check if we must use raw mode (patch code being executed)
244 *
245 * @param pVM VM handle.
246 * @param pAddrGC Guest context address
247 */
248PATMDECL(bool) PATMShouldUseRawMode(PVM pVM, RTGCPTR pAddrGC)
249{
250 return ( PATMIsEnabled(pVM)
251 && ((pAddrGC >= pVM->patm.s.pPatchMemGC && pAddrGC < pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem))) ? true : false;
252}
253
254/**
255 * Returns the guest context pointer and size of the GC context structure
256 *
257 * @returns VBox status code.
258 * @param pVM The VM to operate on.
259 */
260PATMDECL(GCPTRTYPE(PPATMGCSTATE)) PATMQueryGCState(PVM pVM)
261{
262 return pVM->patm.s.pGCStateGC;
263}
264
265/**
266 * Checks whether the GC address is part of our patch region
267 *
268 * @returns VBox status code.
269 * @param pVM The VM to operate on.
270 * @param pAddrGC Guest context address
271 */
272PATMDECL(bool) PATMIsPatchGCAddr(PVM pVM, RTGCPTR pAddrGC)
273{
274 return (PATMIsEnabled(pVM) && pAddrGC >= pVM->patm.s.pPatchMemGC && pAddrGC < pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem) ? true : false;
275}
276
277/**
278 * Set parameters for pending MMIO patch operation
279 *
280 * @returns VBox status code.
281 * @param pDevIns Device instance.
282 * @param GCPhys MMIO physical address
283 * @param pCachedData GC pointer to cached data
284 */
285PATMDECL(int) PATMSetMMIOPatchInfo(PVM pVM, RTGCPHYS GCPhys, RTGCPTR pCachedData)
286{
287 pVM->patm.s.mmio.GCPhys = GCPhys;
288 pVM->patm.s.mmio.pCachedData = pCachedData;
289
290 return VINF_SUCCESS;
291}
292
293/**
294 * Checks if the interrupt flag is enabled or not.
295 *
296 * @returns true if it's enabled.
297 * @returns false if it's diabled.
298 *
299 * @param pVM The VM handle.
300 */
301PATMDECL(bool) PATMAreInterruptsEnabled(PVM pVM)
302{
303 PCPUMCTX pCtx = 0;
304 int rc;
305
306 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
307 AssertRC(rc);
308
309 return PATMAreInterruptsEnabledByCtxCore(pVM, CPUMCTX2CORE(pCtx));
310}
311
312/**
313 * Checks if the interrupt flag is enabled or not.
314 *
315 * @returns true if it's enabled.
316 * @returns false if it's diabled.
317 *
318 * @param pVM The VM handle.
319 * @param pCtxCore CPU context
320 */
321PATMDECL(bool) PATMAreInterruptsEnabledByCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore)
322{
323 if (PATMIsEnabled(pVM))
324 {
325 if (PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtxCore->eip))
326 return false;
327 }
328 return !!(pCtxCore->eflags.u32 & X86_EFL_IF);
329}
330
331/**
332 * Check if the instruction is patched as a duplicated function
333 *
334 * @returns patch record
335 * @param pVM The VM to operate on.
336 * @param pInstrGC Guest context point to the instruction
337 *
338 */
339PATMDECL(PPATMPATCHREC) PATMQueryFunctionPatch(PVM pVM, RTGCPTR pInstrGC)
340{
341 PPATMPATCHREC pRec;
342
343 pRec = (PPATMPATCHREC)RTAvloGCPtrGet(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, pInstrGC);
344 if ( pRec
345 && (pRec->patch.uState == PATCH_ENABLED)
346 && (pRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CALLABLE_AS_FUNCTION))
347 )
348 return pRec;
349 return 0;
350}
351
352/**
353 * Checks if the int 3 was caused by a patched instruction
354 *
355 * @returns VBox status
356 *
357 * @param pVM The VM handle.
358 * @param pInstrGC Instruction pointer
359 * @param pOpcode Original instruction opcode (out, optional)
360 * @param pSize Original instruction size (out, optional)
361 */
362PATMDECL(bool) PATMIsInt3Patch(PVM pVM, RTGCPTR pInstrGC, uint32_t *pOpcode, uint32_t *pSize)
363{
364 PPATMPATCHREC pRec;
365
366 pRec = (PPATMPATCHREC)RTAvloGCPtrGet(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, pInstrGC);
367 if ( pRec
368 && (pRec->patch.uState == PATCH_ENABLED)
369 && (pRec->patch.flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
370 )
371 {
372 if (pOpcode) *pOpcode = pRec->patch.opcode;
373 if (pSize) *pSize = pRec->patch.cbPrivInstr;
374 return true;
375 }
376 return false;
377}
378
379/**
380 * Emulate sysenter, sysexit and syscall instructions
381 *
382 * @returns VBox status
383 *
384 * @param pVM The VM handle.
385 * @param pCtxCore The relevant core context.
386 * @param pCpu Disassembly context
387 */
388PATMDECL(int) PATMSysCall(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
389{
390 PCPUMCTX pCtx;
391 int rc;
392
393 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
394 AssertRCReturn(rc, VINF_EM_RAW_RING_SWITCH);
395
396 if (pCpu->pCurInstr->opcode == OP_SYSENTER)
397 {
398 if ( pCtx->SysEnter.cs == 0
399 || (pRegFrame->cs & X86_SEL_RPL) != 3
400 || pVM->patm.s.pfnSysEnterPatchGC == 0
401 || pVM->patm.s.pfnSysEnterGC != (RTGCPTR)pCtx->SysEnter.eip
402 || !(PATMRawGetEFlags(pVM, pRegFrame) & X86_EFL_IF))
403 goto end;
404
405 Log2(("PATMSysCall: sysenter from %VGv to %VGv\n", pRegFrame->eip, pVM->patm.s.pfnSysEnterPatchGC));
406 /** @todo the base and limit are forced to 0 & 4G-1 resp. We assume the selector is wide open here. */
407 /** @note The Intel manual suggests that the OS is responsible for this. */
408 pRegFrame->cs = (pCtx->SysEnter.cs & ~X86_SEL_RPL) | 1;
409 pRegFrame->eip = /** @todo ugly conversion! */(uint32_t)pVM->patm.s.pfnSysEnterPatchGC;
410 pRegFrame->ss = pRegFrame->cs + 8; /* SysEnter.cs + 8 */
411 pRegFrame->esp = pCtx->SysEnter.esp;
412 pRegFrame->eflags.u32 &= ~(X86_EFL_VM|X86_EFL_RF);
413 pRegFrame->eflags.u32 |= X86_EFL_IF;
414
415 /* Turn off interrupts. */
416 pVM->patm.s.CTXSUFF(pGCState)->uVMFlags &= ~X86_EFL_IF;
417
418 STAM_COUNTER_INC(&pVM->patm.s.StatSysEnter);
419
420 return VINF_SUCCESS;
421 }
422 else
423 if (pCpu->pCurInstr->opcode == OP_SYSEXIT)
424 {
425 if ( pCtx->SysEnter.cs == 0
426 || (pRegFrame->cs & X86_SEL_RPL) != 1
427 || !(PATMRawGetEFlags(pVM, pRegFrame) & X86_EFL_IF))
428 goto end;
429
430 Log2(("PATMSysCall: sysexit from %VGv to %VGv\n", pRegFrame->eip, pRegFrame->edx));
431
432 pRegFrame->cs = ((pCtx->SysEnter.cs + 16) & ~X86_SEL_RPL) | 3;
433 pRegFrame->eip = pRegFrame->edx;
434 pRegFrame->ss = pRegFrame->cs + 8; /* SysEnter.cs + 24 */
435 pRegFrame->esp = pRegFrame->ecx;
436
437 STAM_COUNTER_INC(&pVM->patm.s.StatSysExit);
438
439 return VINF_SUCCESS;
440 }
441 else
442 if (pCpu->pCurInstr->opcode == OP_SYSCALL)
443 {
444 /** @todo implement syscall */
445 }
446 else
447 if (pCpu->pCurInstr->opcode == OP_SYSRET)
448 {
449 /** @todo implement sysret */
450 }
451
452end:
453 return VINF_EM_RAW_RING_SWITCH;
454}
455
456/**
457 * Checks if the illegal instruction was caused by a patched instruction
458 *
459 * @returns VBox status
460 *
461 * @param pVM The VM handle.
462 * @param pCtxCore The relevant core context.
463 */
464PATMDECL(int) PATMHandleIllegalInstrTrap(PVM pVM, PCPUMCTXCORE pRegFrame)
465{
466 PPATMPATCHREC pRec;
467 int rc;
468
469 /* Very important check -> otherwise we have a security leak. */
470 AssertReturn((pRegFrame->ss & X86_SEL_RPL) == 1, VERR_ACCESS_DENIED);
471 Assert(PATMIsPatchGCAddr(pVM, (RTGCPTR)pRegFrame->eip));
472
473 /* OP_ILLUD2 in PATM generated code? */
474 if (CTXSUFF(pVM->patm.s.pGCState)->uPendingAction)
475 {
476 LogFlow(("PATMGC: Pending action %x at %VGv\n", CTXSUFF(pVM->patm.s.pGCState)->uPendingAction, pRegFrame->eip));
477
478 /* Private PATM interface (@todo hack due to lack of anything generic). */
479 /* Parameters:
480 * eax = Pending action (currently PATM_ACTION_LOOKUP_ADDRESS)
481 * ecx = PATM_ACTION_MAGIC
482 */
483 if ( (pRegFrame->eax & CTXSUFF(pVM->patm.s.pGCState)->uPendingAction)
484 && pRegFrame->ecx == PATM_ACTION_MAGIC
485 )
486 {
487 CTXSUFF(pVM->patm.s.pGCState)->uPendingAction = 0;
488
489 switch (pRegFrame->eax)
490 {
491 case PATM_ACTION_LOOKUP_ADDRESS:
492 {
493 /* Parameters:
494 * edx = GC address to find
495 * edi = PATCHJUMPTABLE ptr
496 */
497 AssertMsg(!pRegFrame->edi || PATMIsPatchGCAddr(pVM, (RTGCPTR)pRegFrame->edi), ("edx = %VGv\n", pRegFrame->edi));
498
499 Log(("PATMGC: lookup %VGv jump table=%VGv\n", pRegFrame->edx, pRegFrame->edi));
500
501 pRec = PATMQueryFunctionPatch(pVM, (RTGCPTR)(pRegFrame->edx));
502 if (pRec)
503 {
504 if (pRec->patch.uState == PATCH_ENABLED)
505 {
506 RTGCUINTPTR pRelAddr = pRec->patch.pPatchBlockOffset; /* make it relative */
507 rc = PATMAddBranchToLookupCache(pVM, (RTGCPTR)pRegFrame->edi, (RTGCPTR)pRegFrame->edx, pRelAddr);
508 if (rc == VINF_SUCCESS)
509 {
510 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
511 pRegFrame->eax = pRelAddr;
512 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionFound);
513 return VINF_SUCCESS;
514 }
515 AssertFailed();
516 }
517 else
518 {
519 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
520 pRegFrame->eax = 0; /* make it fault */
521 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
522 return VINF_SUCCESS;
523 }
524 }
525 else
526 {
527#if 0
528 if (pRegFrame->edx == 0x806eca98)
529 {
530 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
531 pRegFrame->eax = 0; /* make it fault */
532 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
533 return VINF_SUCCESS;
534 }
535#endif
536 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
537 return VINF_PATM_DUPLICATE_FUNCTION;
538 }
539 }
540
541 case PATM_ACTION_DISPATCH_PENDING_IRQ:
542 /* Parameters:
543 * edi = GC address to jump to
544 */
545 Log(("PATMGC: Dispatch pending interrupt; eip=%VGv->%VGv\n", pRegFrame->eip, pRegFrame->edi));
546
547 /* Change EIP to the guest address the patch would normally jump to after setting IF. */
548 pRegFrame->eip = pRegFrame->edi;
549
550 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX|PATM_RESTORE_EDI));
551 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
552
553 pRegFrame->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
554 pRegFrame->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
555 pRegFrame->edi = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEDI;
556
557 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
558
559 /* We are no longer executing PATM code; set PIF again. */
560 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
561
562 STAM_COUNTER_INC(&pVM->patm.s.StatCheckPendingIRQ);
563
564 /* The caller will call trpmGCExitTrap, which will dispatch pending interrupts for us. */
565 return VINF_SUCCESS;
566
567 case PATM_ACTION_PENDING_IRQ_AFTER_IRET:
568 /* Parameters:
569 * edi = GC address to jump to
570 */
571 Log(("PATMGC: Dispatch pending interrupt (iret); eip=%VGv->%VGv\n", pRegFrame->eip, pRegFrame->edi));
572 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX|PATM_RESTORE_EDI));
573 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
574
575 /* Change EIP to the guest address of the iret. */
576 pRegFrame->eip = pRegFrame->edi;
577
578 pRegFrame->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
579 pRegFrame->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
580 pRegFrame->edi = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEDI;
581 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
582
583 /* We are no longer executing PATM code; set PIF again. */
584 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
585
586 return VINF_PATM_PENDING_IRQ_AFTER_IRET;
587
588#ifdef DEBUG
589 case PATM_ACTION_LOG_CLI:
590 Log(("PATMGC: CLI at %VGv (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
591 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
592 return VINF_SUCCESS;
593
594 case PATM_ACTION_LOG_STI:
595 Log(("PATMGC: STI at %VGv (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
596 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
597 return VINF_SUCCESS;
598
599 case PATM_ACTION_LOG_POPF_IF1:
600 Log(("PATMGC: POPF setting IF at %VGv (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
601 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
602 return VINF_SUCCESS;
603
604 case PATM_ACTION_LOG_POPF_IF0:
605 Log(("PATMGC: POPF at %VGv (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
606 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
607 return VINF_SUCCESS;
608
609 case PATM_ACTION_LOG_PUSHF:
610 Log(("PATMGC: PUSHF at %VGv (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
611 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
612 return VINF_SUCCESS;
613
614 case PATM_ACTION_LOG_IF1:
615 Log(("PATMGC: IF=1 escape from %VGv\n", pRegFrame->eip));
616 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
617 return VINF_SUCCESS;
618
619 case PATM_ACTION_LOG_IRET:
620 {
621#ifdef IN_GC
622 char *pIretFrame = (char *)pRegFrame->edx;
623 uint32_t eip, selCS, uEFlags, selSS, esp;
624
625 rc = MMGCRamRead(pVM, &eip, pIretFrame, 4);
626 rc |= MMGCRamRead(pVM, &selCS, pIretFrame + 4, 4);
627 rc |= MMGCRamRead(pVM, &uEFlags, pIretFrame + 8, 4);
628 rc |= MMGCRamRead(pVM, &esp, pIretFrame + 12, 4);
629 rc |= MMGCRamRead(pVM, &selSS, pIretFrame + 16, 4);
630 if (rc == VINF_SUCCESS)
631 {
632 if ( (uEFlags & X86_EFL_VM)
633 || (selCS & X86_SEL_RPL) == 3))
634 {
635 Log(("PATMGC: IRET stack frame: return address %04X:%VGv eflags=%08x ss:esp=%04X:%VGv\n", selCS, eip, uEFlags, selSS, esp));
636 if (uEFlags & X86_EFL_VM)
637 {
638 uint32_t selDS, selES, selFS, selGS;
639 rc = MMGCRamRead(pVM, &selES, pIretFrame + 20, 4);
640 rc |= MMGCRamRead(pVM, &selDS, pIretFrame + 24, 4);
641 rc |= MMGCRamRead(pVM, &selFS, pIretFrame + 28, 4);
642 rc |= MMGCRamRead(pVM, &selGS, pIretFrame + 32, 4);
643 if (rc == VINF_SUCCESS)
644 Log(("PATMGC: IRET stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
645 }
646 }
647 else
648 Log(("PATMGC: IRET stack frame: return address %04X:%VGv eflags=%08x\n", selCS, eip, uEFlags));
649 }
650#endif
651 Log(("PATMGC: IRET from %VGv (IF->1) current eflags=%x\n", pRegFrame->eip, pVM->patm.s.CTXSUFF(pGCState)->uVMFlags));
652 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
653 return VINF_SUCCESS;
654 }
655
656 case PATM_ACTION_LOG_RET:
657 Log(("PATMGC: RET to %VGv ESP=%VGv iopl=%d\n", pRegFrame->edx, pRegFrame->ebx, X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
658 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
659 return VINF_SUCCESS;
660
661 case PATM_ACTION_LOG_CALL:
662 Log(("PATMGC: CALL to %VGv return addr %VGv ESP=%VGv iopl=%d\n", pVM->patm.s.CTXSUFF(pGCState)->GCCallPatchTargetAddr, pVM->patm.s.CTXSUFF(pGCState)->GCCallReturnAddr, pRegFrame->edx, X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
663 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
664 return VINF_SUCCESS;
665#endif
666 default:
667 AssertFailed();
668 break;
669 }
670 }
671 else
672 AssertFailed();
673 CTXSUFF(pVM->patm.s.pGCState)->uPendingAction = 0;
674 }
675 AssertMsgFailed(("Unexpected OP_ILLUD2 in patch code at %VGv (pending action %x)!!!!\n", pRegFrame->eip, CTXSUFF(pVM->patm.s.pGCState)->uPendingAction));
676 return VINF_EM_RAW_EMULATE_INSTR;
677}
678
679/**
680 * Checks if the int 3 was caused by a patched instruction
681 *
682 * @returns VBox status
683 *
684 * @param pVM The VM handle.
685 * @param pCtxCore The relevant core context.
686 */
687PATMDECL(int) PATMHandleInt3PatchTrap(PVM pVM, PCPUMCTXCORE pRegFrame)
688{
689 PPATMPATCHREC pRec;
690 int rc;
691
692 Assert((pRegFrame->ss & X86_SEL_RPL) == 1);
693
694 /* Int 3 in PATM generated code? (most common case) */
695 if (PATMIsPatchGCAddr(pVM, (RTGCPTR)pRegFrame->eip))
696 {
697 /* @note hardcoded assumption about it being a single byte int 3 instruction. */
698 pRegFrame->eip--;
699 return VINF_PATM_PATCH_INT3;
700 }
701
702 /** @todo could use simple caching here to speed things up. */
703 pRec = (PPATMPATCHREC)RTAvloGCPtrGet(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (RTGCPTR)(pRegFrame->eip - 1)); /* eip is pointing to the instruction *after* 'int 3' already */
704 if (pRec && pRec->patch.uState == PATCH_ENABLED)
705 {
706 if (pRec->patch.flags & PATMFL_INT3_REPLACEMENT_BLOCK)
707 {
708 Assert(pRec->patch.opcode == OP_CLI);
709 /* This is a special cli block that was turned into an int 3 patch. We jump to the generated code manually. */
710 pRegFrame->eip = (uint32_t)PATCHCODE_PTR_GC(&pRec->patch);
711 STAM_COUNTER_INC(&pVM->patm.s.StatInt3BlockRun);
712 return VINF_SUCCESS;
713 }
714 else
715 if (pRec->patch.flags & PATMFL_INT3_REPLACEMENT)
716 {
717 uint32_t size, cbOp;
718 DISCPUSTATE cpu;
719
720 /* eip is pointing to the instruction *after* 'int 3' already */
721 pRegFrame->eip = pRegFrame->eip - 1;
722
723 PATM_STAT_RUN_INC(&pRec->patch);
724
725 Log(("PATMHandleInt3PatchTrap found int3 for %s at %VGv\n", patmGetInstructionString(pRec->patch.opcode, 0), pRegFrame->eip));
726
727 switch(pRec->patch.opcode)
728 {
729 case OP_CPUID:
730 case OP_IRET:
731 break;
732
733 case OP_STR:
734 case OP_SGDT:
735 case OP_SLDT:
736 case OP_SIDT:
737 case OP_LSL:
738 case OP_LAR:
739 case OP_SMSW:
740 case OP_VERW:
741 case OP_VERR:
742 default:
743 PATM_STAT_FAULT_INC(&pRec->patch);
744 pRec->patch.cTraps++;
745 return VINF_EM_RAW_EMULATE_INSTR;
746 }
747
748 cpu.mode = SELMIsSelector32Bit(pVM, pRegFrame->cs, 0) ? CPUMODE_32BIT : CPUMODE_16BIT;
749 if(cpu.mode != CPUMODE_32BIT)
750 {
751 AssertFailed();
752 return VINF_EM_RAW_EMULATE_INSTR;
753 }
754 rc = DISCoreOne(&cpu, (RTUINTPTR)&pRec->patch.aPrivInstr[0], &cbOp);
755 if (VBOX_FAILURE(rc))
756 {
757 Log(("DISCoreOne failed with %Vrc\n", rc));
758 PATM_STAT_FAULT_INC(&pRec->patch);
759 pRec->patch.cTraps++;
760 return VINF_EM_RAW_EMULATE_INSTR;
761 }
762
763 rc = EMInterpretInstructionCPU(pVM, &cpu, pRegFrame, 0 /* not relevant here */, &size);
764 if (rc != VINF_SUCCESS)
765 {
766 Log(("EMInterpretInstructionCPU failed with %Vrc\n", rc));
767 PATM_STAT_FAULT_INC(&pRec->patch);
768 pRec->patch.cTraps++;
769 return VINF_EM_RAW_EMULATE_INSTR;
770 }
771
772 pRegFrame->eip += cpu.opsize;
773 return VINF_SUCCESS;
774 }
775 }
776 return VERR_PATCH_NOT_FOUND;
777}
778
779/**
780 * Adds branch pair to the lookup cache of the particular branch instruction
781 *
782 * @returns VBox status
783 * @param pVM The VM to operate on.
784 * @param pJumpTableGC Pointer to branch instruction lookup cache
785 * @param pBranchTarget Original branch target
786 * @param pRelBranchPatch Relative duplicated function address
787 */
788int PATMAddBranchToLookupCache(PVM pVM, RTGCPTR pJumpTableGC, RTGCPTR pBranchTarget, RTGCUINTPTR pRelBranchPatch)
789{
790 PPATCHJUMPTABLE pJumpTable;
791
792 Log(("PATMAddBranchToLookupCache: Adding (%VGv->%VGv (%VGv)) to table %VGv\n", pBranchTarget, pRelBranchPatch + pVM->patm.s.pPatchMemGC, pRelBranchPatch, pJumpTableGC));
793
794 AssertReturn(PATMIsPatchGCAddr(pVM, pJumpTableGC), VERR_INVALID_PARAMETER);
795
796#ifdef IN_GC
797 pJumpTable = (PPATCHJUMPTABLE) pJumpTableGC;
798#else
799 pJumpTable = (PPATCHJUMPTABLE) (pJumpTableGC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemHC);
800#endif
801 Log(("Nr addresses = %d, insert pos = %d\n", pJumpTable->cAddresses, pJumpTable->ulInsertPos));
802 if (pJumpTable->cAddresses < pJumpTable->nrSlots)
803 {
804 uint32_t i;
805
806 for (i=0;i<pJumpTable->nrSlots;i++)
807 {
808 if (pJumpTable->Slot[i].pInstrGC == 0)
809 {
810 pJumpTable->Slot[i].pInstrGC = pBranchTarget;
811 /* Relative address - eases relocation */
812 pJumpTable->Slot[i].pRelPatchGC = pRelBranchPatch;
813 pJumpTable->cAddresses++;
814 break;
815 }
816 }
817 AssertReturn(i < pJumpTable->nrSlots, VERR_INTERNAL_ERROR);
818#ifdef VBOX_WITH_STATISTICS
819 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupInsert);
820 if (pVM->patm.s.StatU32FunctionMaxSlotsUsed < i)
821 pVM->patm.s.StatU32FunctionMaxSlotsUsed = i + 1;
822#endif
823 }
824 else
825 {
826 /* Replace an old entry. */
827 /** @todo replacement strategy isn't really bright. change to something better if required. */
828 Assert(pJumpTable->ulInsertPos < pJumpTable->nrSlots);
829 Assert((pJumpTable->nrSlots & 1) == 0);
830
831 pJumpTable->ulInsertPos &= (pJumpTable->nrSlots-1);
832 pJumpTable->Slot[pJumpTable->ulInsertPos].pInstrGC = pBranchTarget;
833 /* Relative address - eases relocation */
834 pJumpTable->Slot[pJumpTable->ulInsertPos].pRelPatchGC = pRelBranchPatch;
835
836 pJumpTable->ulInsertPos = (pJumpTable->ulInsertPos+1) & (pJumpTable->nrSlots-1);
837
838 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupReplace);
839 }
840
841 return VINF_SUCCESS;
842}
843
844/**
845 * Return the name of the patched instruction
846 *
847 * @returns instruction name
848 *
849 * @param opcode DIS instruction opcode
850 * @param fPatchFlags Patch flags
851 */
852PATMDECL(char *)patmGetInstructionString(uint32_t opcode, uint32_t fPatchFlags)
853{
854 char *pszInstr = NULL;
855
856 switch (opcode)
857 {
858 case OP_CLI:
859 pszInstr = "cli";
860 break;
861 case OP_PUSHF:
862 pszInstr = "pushf";
863 break;
864 case OP_POPF:
865 pszInstr = "popf";
866 break;
867 case OP_STR:
868 pszInstr = "str";
869 break;
870 case OP_LSL:
871 pszInstr = "lsl";
872 break;
873 case OP_LAR:
874 pszInstr = "lar";
875 break;
876 case OP_SGDT:
877 pszInstr = "sgdt";
878 break;
879 case OP_SLDT:
880 pszInstr = "sldt";
881 break;
882 case OP_SIDT:
883 pszInstr = "sidt";
884 break;
885 case OP_SMSW:
886 pszInstr = "smsw";
887 break;
888 case OP_VERW:
889 pszInstr = "verw";
890 break;
891 case OP_VERR:
892 pszInstr = "verr";
893 break;
894 case OP_CPUID:
895 pszInstr = "cpuid";
896 break;
897 case OP_JMP:
898 pszInstr = "jmp";
899 break;
900 case OP_JO:
901 pszInstr = "jo";
902 break;
903 case OP_JNO:
904 pszInstr = "jno";
905 break;
906 case OP_JC:
907 pszInstr = "jc";
908 break;
909 case OP_JNC:
910 pszInstr = "jnc";
911 break;
912 case OP_JE:
913 pszInstr = "je";
914 break;
915 case OP_JNE:
916 pszInstr = "jne";
917 break;
918 case OP_JBE:
919 pszInstr = "jbe";
920 break;
921 case OP_JNBE:
922 pszInstr = "jnbe";
923 break;
924 case OP_JS:
925 pszInstr = "js";
926 break;
927 case OP_JNS:
928 pszInstr = "jns";
929 break;
930 case OP_JP:
931 pszInstr = "jp";
932 break;
933 case OP_JNP:
934 pszInstr = "jnp";
935 break;
936 case OP_JL:
937 pszInstr = "jl";
938 break;
939 case OP_JNL:
940 pszInstr = "jnl";
941 break;
942 case OP_JLE:
943 pszInstr = "jle";
944 break;
945 case OP_JNLE:
946 pszInstr = "jnle";
947 break;
948 case OP_JECXZ:
949 pszInstr = "jecxz";
950 break;
951 case OP_LOOP:
952 pszInstr = "loop";
953 break;
954 case OP_LOOPNE:
955 pszInstr = "loopne";
956 break;
957 case OP_LOOPE:
958 pszInstr = "loope";
959 break;
960 case OP_MOV:
961 if (fPatchFlags & PATMFL_IDTHANDLER)
962 {
963 pszInstr = "mov (Int/Trap Handler)";
964 }
965 break;
966 case OP_SYSENTER:
967 pszInstr = "sysenter";
968 break;
969 case OP_PUSH:
970 pszInstr = "push (cs)";
971 break;
972 case OP_CALL:
973 pszInstr = "call";
974 break;
975 case OP_IRET:
976 pszInstr = "iret";
977 break;
978 }
979 return pszInstr;
980}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette