VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/VMMAll/PATMAll.cpp@ 9212

Last change on this file since 9212 was 9212, checked in by vboxsync, 17 years ago

Major changes for sizeof(RTGCPTR) == uint64_t.
Introduced RCPTRTYPE for pointers valid in raw mode only (RTGCPTR32).

Disabled by default. Enable by adding VBOX_WITH_64_BITS_GUESTS to your LocalConfig.kmk.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 20.5 KB
Line 
1/* $Id: PATMAll.cpp 9212 2008-05-29 09:38:38Z vboxsync $ */
2/** @file
3 * PATM - The Patch Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PATM
26#include <VBox/patm.h>
27#include <VBox/cpum.h>
28#include <VBox/dis.h>
29#include <VBox/disopcode.h>
30#include <VBox/em.h>
31#include <VBox/err.h>
32#include <VBox/selm.h>
33#include <VBox/mm.h>
34#include "PATMInternal.h"
35#include <VBox/vm.h>
36#include "PATMA.h"
37
38#include <VBox/log.h>
39#include <iprt/assert.h>
40
41
42/**
43 * Load virtualized flags.
44 *
45 * This function is called from CPUMRawEnter(). It doesn't have to update the
46 * IF and IOPL eflags bits, the caller will enforce those to set and 0 repectively.
47 *
48 * @param pVM VM handle.
49 * @param pCtxCore The cpu context core.
50 * @see pg_raw
51 */
52PATMDECL(void) PATMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
53{
54 bool fPatchCode = PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtxCore->eip);
55
56 /*
57 * Currently we don't bother to check whether PATM is enabled or not.
58 * For all cases where it isn't, IOPL will be safe and IF will be set.
59 */
60 register uint32_t efl = pCtxCore->eflags.u32;
61 CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
62 AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTGCPTR32)pCtxCore->eip), ("X86_EFL_IF is clear and PATM is disabled! (eip=%VGv eflags=%08x fPATM=%d pPATMGC=%VGv-%VGv\n", pCtxCore->eip, pCtxCore->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC, pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem));
63
64 AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode, ("fPIF=%d eip=%VGv\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip));
65
66 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
67 efl |= X86_EFL_IF;
68 pCtxCore->eflags.u32 = efl;
69
70#ifdef IN_RING3
71#ifdef PATM_EMULATE_SYSENTER
72 PCPUMCTX pCtx;
73 int rc;
74
75 /* Check if the sysenter handler has changed. */
76 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
77 AssertRC(rc);
78 if ( rc == VINF_SUCCESS
79 && pCtx->SysEnter.cs != 0
80 && pCtx->SysEnter.eip != 0
81 )
82 {
83 if (pVM->patm.s.pfnSysEnterGC != (RTGCPTR)pCtx->SysEnter.eip)
84 {
85 pVM->patm.s.pfnSysEnterPatchGC = 0;
86 pVM->patm.s.pfnSysEnterGC = 0;
87
88 Log2(("PATMRawEnter: installing sysenter patch for %VGv\n", pCtx->SysEnter.eip));
89 pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
90 if (pVM->patm.s.pfnSysEnterPatchGC == 0)
91 {
92 rc = PATMR3InstallPatch(pVM, pCtx->SysEnter.eip, PATMFL_SYSENTER | PATMFL_CODE32);
93 if (rc == VINF_SUCCESS)
94 {
95 pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
96 pVM->patm.s.pfnSysEnterGC = (RTGCPTR)pCtx->SysEnter.eip;
97 Assert(pVM->patm.s.pfnSysEnterPatchGC);
98 }
99 }
100 else
101 pVM->patm.s.pfnSysEnterGC = (RTGCPTR)pCtx->SysEnter.eip;
102 }
103 }
104 else
105 {
106 pVM->patm.s.pfnSysEnterPatchGC = 0;
107 pVM->patm.s.pfnSysEnterGC = 0;
108 }
109#endif
110#endif
111}
112
113
114/**
115 * Restores virtualized flags.
116 *
117 * This function is called from CPUMRawLeave(). It will update the eflags register.
118 *
119 ** @note Only here we are allowed to switch back to guest code (without a special reason such as a trap in patch code)!!
120 *
121 * @param pVM VM handle.
122 * @param pCtxCore The cpu context core.
123 * @param rawRC Raw mode return code
124 * @see @ref pg_raw
125 */
126PATMDECL(void) PATMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rawRC)
127{
128 bool fPatchCode = PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtxCore->eip);
129 /*
130 * We will only be called if PATMRawEnter was previously called.
131 */
132 register uint32_t efl = pCtxCore->eflags.u32;
133 efl = (efl & ~PATM_VIRTUAL_FLAGS_MASK) | (CTXSUFF(pVM->patm.s.pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK);
134 pCtxCore->eflags.u32 = efl;
135 CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = X86_EFL_IF;
136
137 AssertReleaseMsg((efl & X86_EFL_IF) || fPatchCode || rawRC == VINF_PATM_PENDING_IRQ_AFTER_IRET || VBOX_FAILURE(rawRC), ("Inconsistent state at %VGv rc=%Vrc\n", pCtxCore->eip, rawRC));
138 AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode || VBOX_FAILURE(rawRC), ("fPIF=%d eip=%VGv rc=%Vrc\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip, rawRC));
139
140#ifdef IN_RING3
141 if ( (efl & X86_EFL_IF)
142 && fPatchCode
143 )
144 {
145 if ( rawRC < VINF_PATM_LEAVEGC_FIRST
146 || rawRC > VINF_PATM_LEAVEGC_LAST)
147 {
148 /*
149 * Golden rules:
150 * - Don't interrupt special patch streams that replace special instructions
151 * - Don't break instruction fusing (sti, pop ss, mov ss)
152 * - Don't go back to an instruction that has been overwritten by a patch jump
153 * - Don't interrupt an idt handler on entry (1st instruction); technically incorrect
154 *
155 */
156 if (CTXSUFF(pVM->patm.s.pGCState)->fPIF == 1) /* consistent patch instruction state */
157 {
158 PATMTRANSSTATE enmState;
159 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtxCore->eip, &enmState);
160
161 AssertRelease(pOrgInstrGC);
162
163 Assert(enmState != PATMTRANS_OVERWRITTEN);
164 if (enmState == PATMTRANS_SAFE)
165 {
166 Assert(!PATMFindActivePatchByEntrypoint(pVM, pOrgInstrGC));
167 Log(("Switchback from %VGv to %VGv (Psp=%x)\n", pCtxCore->eip, pOrgInstrGC, CTXSUFF(pVM->patm.s.pGCState)->Psp));
168 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBack);
169 pCtxCore->eip = pOrgInstrGC;
170 fPatchCode = false; /* to reset the stack ptr */
171
172 CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0; /* reset this pointer; safe otherwise the state would be PATMTRANS_INHIBITIRQ */
173 }
174 else
175 {
176 LogFlow(("Patch address %VGv can't be interrupted (state=%d)!\n", pCtxCore->eip, enmState));
177 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
178 }
179 }
180 else
181 {
182 LogFlow(("Patch address %VGv can't be interrupted (fPIF=%d)!\n", pCtxCore->eip, CTXSUFF(pVM->patm.s.pGCState)->fPIF));
183 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
184 }
185 }
186 }
187#else /* !IN_RING3 */
188 AssertMsgFailed(("!IN_RING3"));
189#endif /* !IN_RING3 */
190
191 if (!fPatchCode)
192 {
193 if (CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts == (RTGCPTR)pCtxCore->eip)
194 {
195 EMSetInhibitInterruptsPC(pVM, pCtxCore->eip);
196 }
197 CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0;
198
199 /* Reset the stack pointer to the top of the stack. */
200#ifdef DEBUG
201 if (CTXSUFF(pVM->patm.s.pGCState)->Psp != PATM_STACK_SIZE)
202 {
203 LogFlow(("PATMRawLeave: Reset PATM stack (Psp = %x)\n", CTXSUFF(pVM->patm.s.pGCState)->Psp));
204 }
205#endif
206 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
207 }
208}
209
210/**
211 * Get the EFLAGS.
212 * This is a worker for CPUMRawGetEFlags().
213 *
214 * @returns The eflags.
215 * @param pVM The VM handle.
216 * @param pCtxCore The context core.
217 */
218PATMDECL(uint32_t) PATMRawGetEFlags(PVM pVM, PCCPUMCTXCORE pCtxCore)
219{
220 uint32_t efl = pCtxCore->eflags.u32;
221 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
222 efl |= pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK;
223 return efl;
224}
225
226/**
227 * Updates the EFLAGS.
228 * This is a worker for CPUMRawSetEFlags().
229 *
230 * @param pVM The VM handle.
231 * @param pCtxCore The context core.
232 * @param efl The new EFLAGS value.
233 */
234PATMDECL(void) PATMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t efl)
235{
236 pVM->patm.s.CTXSUFF(pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
237 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
238 efl |= X86_EFL_IF;
239 pCtxCore->eflags.u32 = efl;
240}
241
242/**
243 * Check if we must use raw mode (patch code being executed)
244 *
245 * @param pVM VM handle.
246 * @param pAddrGC Guest context address
247 */
248PATMDECL(bool) PATMShouldUseRawMode(PVM pVM, RTGCPTR pAddrGC)
249{
250 return ( PATMIsEnabled(pVM)
251 && ((pAddrGC >= (RTGCPTR)pVM->patm.s.pPatchMemGC && pAddrGC < (RTGCPTR)pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem))) ? true : false;
252}
253
254/**
255 * Returns the guest context pointer and size of the GC context structure
256 *
257 * @returns VBox status code.
258 * @param pVM The VM to operate on.
259 */
260PATMDECL(RCPTRTYPE(PPATMGCSTATE)) PATMQueryGCState(PVM pVM)
261{
262 return pVM->patm.s.pGCStateGC;
263}
264
265/**
266 * Checks whether the GC address is part of our patch region
267 *
268 * @returns VBox status code.
269 * @param pVM The VM to operate on.
270 * @param pAddrGC Guest context address
271 */
272PATMDECL(bool) PATMIsPatchGCAddr(PVM pVM, RTGCPTR pAddrGC)
273{
274 RTGCPTR32 pAddrGG32 = (RTGCPTR32)pAddrGC;
275
276 return (PATMIsEnabled(pVM) && pAddrGG32 >= (RTGCPTR)pVM->patm.s.pPatchMemGC && pAddrGG32 < (RTGCPTR)pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem) ? true : false;
277}
278
279/**
280 * Set parameters for pending MMIO patch operation
281 *
282 * @returns VBox status code.
283 * @param pDevIns Device instance.
284 * @param GCPhys MMIO physical address
285 * @param pCachedData GC pointer to cached data
286 */
287PATMDECL(int) PATMSetMMIOPatchInfo(PVM pVM, RTGCPHYS GCPhys, RTGCPTR pCachedData)
288{
289 pVM->patm.s.mmio.GCPhys = GCPhys;
290 pVM->patm.s.mmio.pCachedData = (RTGCPTR32)pCachedData;
291
292 return VINF_SUCCESS;
293}
294
295/**
296 * Checks if the interrupt flag is enabled or not.
297 *
298 * @returns true if it's enabled.
299 * @returns false if it's diabled.
300 *
301 * @param pVM The VM handle.
302 */
303PATMDECL(bool) PATMAreInterruptsEnabled(PVM pVM)
304{
305 PCPUMCTX pCtx = 0;
306 int rc;
307
308 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
309 AssertRC(rc);
310
311 return PATMAreInterruptsEnabledByCtxCore(pVM, CPUMCTX2CORE(pCtx));
312}
313
314/**
315 * Checks if the interrupt flag is enabled or not.
316 *
317 * @returns true if it's enabled.
318 * @returns false if it's diabled.
319 *
320 * @param pVM The VM handle.
321 * @param pCtxCore CPU context
322 */
323PATMDECL(bool) PATMAreInterruptsEnabledByCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore)
324{
325 if (PATMIsEnabled(pVM))
326 {
327 if (PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtxCore->eip))
328 return false;
329 }
330 return !!(pCtxCore->eflags.u32 & X86_EFL_IF);
331}
332
333/**
334 * Check if the instruction is patched as a duplicated function
335 *
336 * @returns patch record
337 * @param pVM The VM to operate on.
338 * @param pInstrGC Guest context point to the instruction
339 *
340 */
341PATMDECL(PPATMPATCHREC) PATMQueryFunctionPatch(PVM pVM, RTGCPTR pInstrGC)
342{
343 PPATMPATCHREC pRec;
344
345 pRec = (PPATMPATCHREC)RTAvloGCPtrGet(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, pInstrGC);
346 if ( pRec
347 && (pRec->patch.uState == PATCH_ENABLED)
348 && (pRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CALLABLE_AS_FUNCTION))
349 )
350 return pRec;
351 return 0;
352}
353
354/**
355 * Checks if the int 3 was caused by a patched instruction
356 *
357 * @returns VBox status
358 *
359 * @param pVM The VM handle.
360 * @param pInstrGC Instruction pointer
361 * @param pOpcode Original instruction opcode (out, optional)
362 * @param pSize Original instruction size (out, optional)
363 */
364PATMDECL(bool) PATMIsInt3Patch(PVM pVM, RTGCPTR pInstrGC, uint32_t *pOpcode, uint32_t *pSize)
365{
366 PPATMPATCHREC pRec;
367
368 pRec = (PPATMPATCHREC)RTAvloGCPtrGet(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, pInstrGC);
369 if ( pRec
370 && (pRec->patch.uState == PATCH_ENABLED)
371 && (pRec->patch.flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
372 )
373 {
374 if (pOpcode) *pOpcode = pRec->patch.opcode;
375 if (pSize) *pSize = pRec->patch.cbPrivInstr;
376 return true;
377 }
378 return false;
379}
380
381/**
382 * Emulate sysenter, sysexit and syscall instructions
383 *
384 * @returns VBox status
385 *
386 * @param pVM The VM handle.
387 * @param pCtxCore The relevant core context.
388 * @param pCpu Disassembly context
389 */
390PATMDECL(int) PATMSysCall(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
391{
392 PCPUMCTX pCtx;
393 int rc;
394
395 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
396 AssertRCReturn(rc, VINF_EM_RAW_RING_SWITCH);
397
398 if (pCpu->pCurInstr->opcode == OP_SYSENTER)
399 {
400 if ( pCtx->SysEnter.cs == 0
401 || pRegFrame->eflags.Bits.u1VM
402 || (pRegFrame->cs & X86_SEL_RPL) != 3
403 || pVM->patm.s.pfnSysEnterPatchGC == 0
404 || pVM->patm.s.pfnSysEnterGC != (RTGCPTR32)pCtx->SysEnter.eip
405 || !(PATMRawGetEFlags(pVM, pRegFrame) & X86_EFL_IF))
406 goto end;
407
408 Log2(("PATMSysCall: sysenter from %VGv to %VGv\n", pRegFrame->eip, pVM->patm.s.pfnSysEnterPatchGC));
409 /** @todo the base and limit are forced to 0 & 4G-1 resp. We assume the selector is wide open here. */
410 /** @note The Intel manual suggests that the OS is responsible for this. */
411 pRegFrame->cs = (pCtx->SysEnter.cs & ~X86_SEL_RPL) | 1;
412 pRegFrame->eip = /** @todo ugly conversion! */(uint32_t)pVM->patm.s.pfnSysEnterPatchGC;
413 pRegFrame->ss = pRegFrame->cs + 8; /* SysEnter.cs + 8 */
414 pRegFrame->esp = pCtx->SysEnter.esp;
415 pRegFrame->eflags.u32 &= ~(X86_EFL_VM|X86_EFL_RF);
416 pRegFrame->eflags.u32 |= X86_EFL_IF;
417
418 /* Turn off interrupts. */
419 pVM->patm.s.CTXSUFF(pGCState)->uVMFlags &= ~X86_EFL_IF;
420
421 STAM_COUNTER_INC(&pVM->patm.s.StatSysEnter);
422
423 return VINF_SUCCESS;
424 }
425 else
426 if (pCpu->pCurInstr->opcode == OP_SYSEXIT)
427 {
428 if ( pCtx->SysEnter.cs == 0
429 || (pRegFrame->cs & X86_SEL_RPL) != 1
430 || pRegFrame->eflags.Bits.u1VM
431 || !(PATMRawGetEFlags(pVM, pRegFrame) & X86_EFL_IF))
432 goto end;
433
434 Log2(("PATMSysCall: sysexit from %VGv to %VGv\n", pRegFrame->eip, pRegFrame->edx));
435
436 pRegFrame->cs = ((pCtx->SysEnter.cs + 16) & ~X86_SEL_RPL) | 3;
437 pRegFrame->eip = pRegFrame->edx;
438 pRegFrame->ss = pRegFrame->cs + 8; /* SysEnter.cs + 24 */
439 pRegFrame->esp = pRegFrame->ecx;
440
441 STAM_COUNTER_INC(&pVM->patm.s.StatSysExit);
442
443 return VINF_SUCCESS;
444 }
445 else
446 if (pCpu->pCurInstr->opcode == OP_SYSCALL)
447 {
448 /** @todo implement syscall */
449 }
450 else
451 if (pCpu->pCurInstr->opcode == OP_SYSRET)
452 {
453 /** @todo implement sysret */
454 }
455
456end:
457 return VINF_EM_RAW_RING_SWITCH;
458}
459
460/**
461 * Adds branch pair to the lookup cache of the particular branch instruction
462 *
463 * @returns VBox status
464 * @param pVM The VM to operate on.
465 * @param pJumpTableGC Pointer to branch instruction lookup cache
466 * @param pBranchTarget Original branch target
467 * @param pRelBranchPatch Relative duplicated function address
468 */
469PATMDECL(int) PATMAddBranchToLookupCache(PVM pVM, RTGCPTR pJumpTableGC, RTGCPTR pBranchTarget, RTGCUINTPTR pRelBranchPatch)
470{
471 PPATCHJUMPTABLE pJumpTable;
472
473 Log(("PATMAddBranchToLookupCache: Adding (%VGv->%VGv (%VGv)) to table %VGv\n", pBranchTarget, pRelBranchPatch + pVM->patm.s.pPatchMemGC, pRelBranchPatch, pJumpTableGC));
474
475 AssertReturn(PATMIsPatchGCAddr(pVM, pJumpTableGC), VERR_INVALID_PARAMETER);
476
477#ifdef IN_GC
478 pJumpTable = (PPATCHJUMPTABLE) pJumpTableGC;
479#else
480 pJumpTable = (PPATCHJUMPTABLE) (pJumpTableGC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemHC);
481#endif
482 Log(("Nr addresses = %d, insert pos = %d\n", pJumpTable->cAddresses, pJumpTable->ulInsertPos));
483 if (pJumpTable->cAddresses < pJumpTable->nrSlots)
484 {
485 uint32_t i;
486
487 for (i=0;i<pJumpTable->nrSlots;i++)
488 {
489 if (pJumpTable->Slot[i].pInstrGC == 0)
490 {
491 pJumpTable->Slot[i].pInstrGC = pBranchTarget;
492 /* Relative address - eases relocation */
493 pJumpTable->Slot[i].pRelPatchGC = pRelBranchPatch;
494 pJumpTable->cAddresses++;
495 break;
496 }
497 }
498 AssertReturn(i < pJumpTable->nrSlots, VERR_INTERNAL_ERROR);
499#ifdef VBOX_WITH_STATISTICS
500 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupInsert);
501 if (pVM->patm.s.StatU32FunctionMaxSlotsUsed < i)
502 pVM->patm.s.StatU32FunctionMaxSlotsUsed = i + 1;
503#endif
504 }
505 else
506 {
507 /* Replace an old entry. */
508 /** @todo replacement strategy isn't really bright. change to something better if required. */
509 Assert(pJumpTable->ulInsertPos < pJumpTable->nrSlots);
510 Assert((pJumpTable->nrSlots & 1) == 0);
511
512 pJumpTable->ulInsertPos &= (pJumpTable->nrSlots-1);
513 pJumpTable->Slot[pJumpTable->ulInsertPos].pInstrGC = pBranchTarget;
514 /* Relative address - eases relocation */
515 pJumpTable->Slot[pJumpTable->ulInsertPos].pRelPatchGC = pRelBranchPatch;
516
517 pJumpTable->ulInsertPos = (pJumpTable->ulInsertPos+1) & (pJumpTable->nrSlots-1);
518
519 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupReplace);
520 }
521
522 return VINF_SUCCESS;
523}
524
525
526/**
527 * Return the name of the patched instruction
528 *
529 * @returns instruction name
530 *
531 * @param opcode DIS instruction opcode
532 * @param fPatchFlags Patch flags
533 */
534PATMDECL(const char *) patmGetInstructionString(uint32_t opcode, uint32_t fPatchFlags)
535{
536 const char *pszInstr = NULL;
537
538 switch (opcode)
539 {
540 case OP_CLI:
541 pszInstr = "cli";
542 break;
543 case OP_PUSHF:
544 pszInstr = "pushf";
545 break;
546 case OP_POPF:
547 pszInstr = "popf";
548 break;
549 case OP_STR:
550 pszInstr = "str";
551 break;
552 case OP_LSL:
553 pszInstr = "lsl";
554 break;
555 case OP_LAR:
556 pszInstr = "lar";
557 break;
558 case OP_SGDT:
559 pszInstr = "sgdt";
560 break;
561 case OP_SLDT:
562 pszInstr = "sldt";
563 break;
564 case OP_SIDT:
565 pszInstr = "sidt";
566 break;
567 case OP_SMSW:
568 pszInstr = "smsw";
569 break;
570 case OP_VERW:
571 pszInstr = "verw";
572 break;
573 case OP_VERR:
574 pszInstr = "verr";
575 break;
576 case OP_CPUID:
577 pszInstr = "cpuid";
578 break;
579 case OP_JMP:
580 pszInstr = "jmp";
581 break;
582 case OP_JO:
583 pszInstr = "jo";
584 break;
585 case OP_JNO:
586 pszInstr = "jno";
587 break;
588 case OP_JC:
589 pszInstr = "jc";
590 break;
591 case OP_JNC:
592 pszInstr = "jnc";
593 break;
594 case OP_JE:
595 pszInstr = "je";
596 break;
597 case OP_JNE:
598 pszInstr = "jne";
599 break;
600 case OP_JBE:
601 pszInstr = "jbe";
602 break;
603 case OP_JNBE:
604 pszInstr = "jnbe";
605 break;
606 case OP_JS:
607 pszInstr = "js";
608 break;
609 case OP_JNS:
610 pszInstr = "jns";
611 break;
612 case OP_JP:
613 pszInstr = "jp";
614 break;
615 case OP_JNP:
616 pszInstr = "jnp";
617 break;
618 case OP_JL:
619 pszInstr = "jl";
620 break;
621 case OP_JNL:
622 pszInstr = "jnl";
623 break;
624 case OP_JLE:
625 pszInstr = "jle";
626 break;
627 case OP_JNLE:
628 pszInstr = "jnle";
629 break;
630 case OP_JECXZ:
631 pszInstr = "jecxz";
632 break;
633 case OP_LOOP:
634 pszInstr = "loop";
635 break;
636 case OP_LOOPNE:
637 pszInstr = "loopne";
638 break;
639 case OP_LOOPE:
640 pszInstr = "loope";
641 break;
642 case OP_MOV:
643 if (fPatchFlags & PATMFL_IDTHANDLER)
644 {
645 pszInstr = "mov (Int/Trap Handler)";
646 }
647 break;
648 case OP_SYSENTER:
649 pszInstr = "sysenter";
650 break;
651 case OP_PUSH:
652 pszInstr = "push (cs)";
653 break;
654 case OP_CALL:
655 pszInstr = "call";
656 break;
657 case OP_IRET:
658 pszInstr = "iret";
659 break;
660 }
661 return pszInstr;
662}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette