VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/VMMAll/PATMAll.cpp@ 17130

Last change on this file since 17130 was 13832, checked in by vboxsync, 16 years ago

IN_GC -> IN_RC.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 20.4 KB
Line 
1/* $Id: PATMAll.cpp 13832 2008-11-05 02:01:12Z vboxsync $ */
2/** @file
3 * PATM - The Patch Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PATM
26#include <VBox/patm.h>
27#include <VBox/cpum.h>
28#include <VBox/dis.h>
29#include <VBox/disopcode.h>
30#include <VBox/em.h>
31#include <VBox/err.h>
32#include <VBox/selm.h>
33#include <VBox/mm.h>
34#include "PATMInternal.h"
35#include <VBox/vm.h>
36#include "PATMA.h"
37
38#include <VBox/log.h>
39#include <iprt/assert.h>
40
41
42/**
43 * Load virtualized flags.
44 *
45 * This function is called from CPUMRawEnter(). It doesn't have to update the
46 * IF and IOPL eflags bits, the caller will enforce those to set and 0 repectively.
47 *
48 * @param pVM VM handle.
49 * @param pCtxCore The cpu context core.
50 * @see pg_raw
51 */
52VMMDECL(void) PATMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
53{
54 bool fPatchCode = PATMIsPatchGCAddr(pVM, (RTRCPTR)pCtxCore->eip);
55
56 /*
57 * Currently we don't bother to check whether PATM is enabled or not.
58 * For all cases where it isn't, IOPL will be safe and IF will be set.
59 */
60 register uint32_t efl = pCtxCore->eflags.u32;
61 CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
62 AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTRCPTR)pCtxCore->eip), ("X86_EFL_IF is clear and PATM is disabled! (eip=%RRv eflags=%08x fPATM=%d pPATMGC=%RRv-%RRv\n", pCtxCore->eip, pCtxCore->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC, pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem));
63
64 AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode, ("fPIF=%d eip=%RRv\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip));
65
66 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
67 efl |= X86_EFL_IF;
68 pCtxCore->eflags.u32 = efl;
69
70#ifdef IN_RING3
71#ifdef PATM_EMULATE_SYSENTER
72 PCPUMCTX pCtx;
73
74 /* Check if the sysenter handler has changed. */
75 pCtx = CPUMQueryGuestCtxPtr(pVM);
76 if ( pCtx->SysEnter.cs != 0
77 && pCtx->SysEnter.eip != 0
78 )
79 {
80 if (pVM->patm.s.pfnSysEnterGC != (RTRCPTR)pCtx->SysEnter.eip)
81 {
82 pVM->patm.s.pfnSysEnterPatchGC = 0;
83 pVM->patm.s.pfnSysEnterGC = 0;
84
85 Log2(("PATMRawEnter: installing sysenter patch for %RRv\n", pCtx->SysEnter.eip));
86 pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
87 if (pVM->patm.s.pfnSysEnterPatchGC == 0)
88 {
89 rc = PATMR3InstallPatch(pVM, pCtx->SysEnter.eip, PATMFL_SYSENTER | PATMFL_CODE32);
90 if (rc == VINF_SUCCESS)
91 {
92 pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
93 pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
94 Assert(pVM->patm.s.pfnSysEnterPatchGC);
95 }
96 }
97 else
98 pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
99 }
100 }
101 else
102 {
103 pVM->patm.s.pfnSysEnterPatchGC = 0;
104 pVM->patm.s.pfnSysEnterGC = 0;
105 }
106#endif
107#endif
108}
109
110
111/**
112 * Restores virtualized flags.
113 *
114 * This function is called from CPUMRawLeave(). It will update the eflags register.
115 *
116 ** @note Only here we are allowed to switch back to guest code (without a special reason such as a trap in patch code)!!
117 *
118 * @param pVM VM handle.
119 * @param pCtxCore The cpu context core.
120 * @param rawRC Raw mode return code
121 * @see @ref pg_raw
122 */
123VMMDECL(void) PATMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rawRC)
124{
125 bool fPatchCode = PATMIsPatchGCAddr(pVM, (RTRCPTR)pCtxCore->eip);
126 /*
127 * We will only be called if PATMRawEnter was previously called.
128 */
129 register uint32_t efl = pCtxCore->eflags.u32;
130 efl = (efl & ~PATM_VIRTUAL_FLAGS_MASK) | (CTXSUFF(pVM->patm.s.pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK);
131 pCtxCore->eflags.u32 = efl;
132 CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = X86_EFL_IF;
133
134 AssertReleaseMsg((efl & X86_EFL_IF) || fPatchCode || rawRC == VINF_PATM_PENDING_IRQ_AFTER_IRET || RT_FAILURE(rawRC), ("Inconsistent state at %RRv rc=%Rrc\n", pCtxCore->eip, rawRC));
135 AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode || RT_FAILURE(rawRC), ("fPIF=%d eip=%RRv rc=%Rrc\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip, rawRC));
136
137#ifdef IN_RING3
138 if ( (efl & X86_EFL_IF)
139 && fPatchCode
140 )
141 {
142 if ( rawRC < VINF_PATM_LEAVEGC_FIRST
143 || rawRC > VINF_PATM_LEAVEGC_LAST)
144 {
145 /*
146 * Golden rules:
147 * - Don't interrupt special patch streams that replace special instructions
148 * - Don't break instruction fusing (sti, pop ss, mov ss)
149 * - Don't go back to an instruction that has been overwritten by a patch jump
150 * - Don't interrupt an idt handler on entry (1st instruction); technically incorrect
151 *
152 */
153 if (CTXSUFF(pVM->patm.s.pGCState)->fPIF == 1) /* consistent patch instruction state */
154 {
155 PATMTRANSSTATE enmState;
156 RTRCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtxCore->eip, &enmState);
157
158 AssertRelease(pOrgInstrGC);
159
160 Assert(enmState != PATMTRANS_OVERWRITTEN);
161 if (enmState == PATMTRANS_SAFE)
162 {
163 Assert(!PATMFindActivePatchByEntrypoint(pVM, pOrgInstrGC));
164 Log(("Switchback from %RRv to %RRv (Psp=%x)\n", pCtxCore->eip, pOrgInstrGC, CTXSUFF(pVM->patm.s.pGCState)->Psp));
165 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBack);
166 pCtxCore->eip = pOrgInstrGC;
167 fPatchCode = false; /* to reset the stack ptr */
168
169 CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0; /* reset this pointer; safe otherwise the state would be PATMTRANS_INHIBITIRQ */
170 }
171 else
172 {
173 LogFlow(("Patch address %RRv can't be interrupted (state=%d)!\n", pCtxCore->eip, enmState));
174 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
175 }
176 }
177 else
178 {
179 LogFlow(("Patch address %RRv can't be interrupted (fPIF=%d)!\n", pCtxCore->eip, CTXSUFF(pVM->patm.s.pGCState)->fPIF));
180 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
181 }
182 }
183 }
184#else /* !IN_RING3 */
185 AssertMsgFailed(("!IN_RING3"));
186#endif /* !IN_RING3 */
187
188 if (!fPatchCode)
189 {
190 if (CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts == (RTRCPTR)pCtxCore->eip)
191 {
192 EMSetInhibitInterruptsPC(pVM, pCtxCore->eip);
193 }
194 CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0;
195
196 /* Reset the stack pointer to the top of the stack. */
197#ifdef DEBUG
198 if (CTXSUFF(pVM->patm.s.pGCState)->Psp != PATM_STACK_SIZE)
199 {
200 LogFlow(("PATMRawLeave: Reset PATM stack (Psp = %x)\n", CTXSUFF(pVM->patm.s.pGCState)->Psp));
201 }
202#endif
203 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
204 }
205}
206
207/**
208 * Get the EFLAGS.
209 * This is a worker for CPUMRawGetEFlags().
210 *
211 * @returns The eflags.
212 * @param pVM The VM handle.
213 * @param pCtxCore The context core.
214 */
215VMMDECL(uint32_t) PATMRawGetEFlags(PVM pVM, PCCPUMCTXCORE pCtxCore)
216{
217 uint32_t efl = pCtxCore->eflags.u32;
218 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
219 efl |= pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK;
220 return efl;
221}
222
223/**
224 * Updates the EFLAGS.
225 * This is a worker for CPUMRawSetEFlags().
226 *
227 * @param pVM The VM handle.
228 * @param pCtxCore The context core.
229 * @param efl The new EFLAGS value.
230 */
231VMMDECL(void) PATMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t efl)
232{
233 pVM->patm.s.CTXSUFF(pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
234 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
235 efl |= X86_EFL_IF;
236 pCtxCore->eflags.u32 = efl;
237}
238
239/**
240 * Check if we must use raw mode (patch code being executed)
241 *
242 * @param pVM VM handle.
243 * @param pAddrGC Guest context address
244 */
245VMMDECL(bool) PATMShouldUseRawMode(PVM pVM, RTRCPTR pAddrGC)
246{
247 return ( PATMIsEnabled(pVM)
248 && ((pAddrGC >= (RTRCPTR)pVM->patm.s.pPatchMemGC && pAddrGC < (RTRCPTR)((RTRCUINTPTR)pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem)))) ? true : false;
249}
250
251/**
252 * Returns the guest context pointer and size of the GC context structure
253 *
254 * @returns VBox status code.
255 * @param pVM The VM to operate on.
256 */
257VMMDECL(RCPTRTYPE(PPATMGCSTATE)) PATMQueryGCState(PVM pVM)
258{
259 return pVM->patm.s.pGCStateGC;
260}
261
262/**
263 * Checks whether the GC address is part of our patch region
264 *
265 * @returns VBox status code.
266 * @param pVM The VM to operate on.
267 * @param pAddrGC Guest context address
268 */
269VMMDECL(bool) PATMIsPatchGCAddr(PVM pVM, RTRCPTR pAddrGC)
270{
271 return (PATMIsEnabled(pVM) && pAddrGC >= pVM->patm.s.pPatchMemGC && pAddrGC < (RTRCPTR)((RTRCUINTPTR)pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem)) ? true : false;
272}
273
274/**
275 * Set parameters for pending MMIO patch operation
276 *
277 * @returns VBox status code.
278 * @param pDevIns Device instance.
279 * @param GCPhys MMIO physical address
280 * @param pCachedData GC pointer to cached data
281 */
282VMMDECL(int) PATMSetMMIOPatchInfo(PVM pVM, RTGCPHYS GCPhys, RTRCPTR pCachedData)
283{
284 pVM->patm.s.mmio.GCPhys = GCPhys;
285 pVM->patm.s.mmio.pCachedData = (RTRCPTR)pCachedData;
286
287 return VINF_SUCCESS;
288}
289
290/**
291 * Checks if the interrupt flag is enabled or not.
292 *
293 * @returns true if it's enabled.
294 * @returns false if it's diabled.
295 *
296 * @param pVM The VM handle.
297 */
298VMMDECL(bool) PATMAreInterruptsEnabled(PVM pVM)
299{
300 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVM);
301
302 return PATMAreInterruptsEnabledByCtxCore(pVM, CPUMCTX2CORE(pCtx));
303}
304
305/**
306 * Checks if the interrupt flag is enabled or not.
307 *
308 * @returns true if it's enabled.
309 * @returns false if it's diabled.
310 *
311 * @param pVM The VM handle.
312 * @param pCtxCore CPU context
313 */
314VMMDECL(bool) PATMAreInterruptsEnabledByCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore)
315{
316 if (PATMIsEnabled(pVM))
317 {
318 if (PATMIsPatchGCAddr(pVM, (RTRCPTR)pCtxCore->eip))
319 return false;
320 }
321 return !!(pCtxCore->eflags.u32 & X86_EFL_IF);
322}
323
324/**
325 * Check if the instruction is patched as a duplicated function
326 *
327 * @returns patch record
328 * @param pVM The VM to operate on.
329 * @param pInstrGC Guest context point to the instruction
330 *
331 */
332VMMDECL(PPATMPATCHREC) PATMQueryFunctionPatch(PVM pVM, RTRCPTR pInstrGC)
333{
334 PPATMPATCHREC pRec;
335
336 AssertCompile(sizeof(AVLOU32KEY) == sizeof(pInstrGC));
337 pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC);
338 if ( pRec
339 && (pRec->patch.uState == PATCH_ENABLED)
340 && (pRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CALLABLE_AS_FUNCTION))
341 )
342 return pRec;
343 return 0;
344}
345
346/**
347 * Checks if the int 3 was caused by a patched instruction
348 *
349 * @returns VBox status
350 *
351 * @param pVM The VM handle.
352 * @param pInstrGC Instruction pointer
353 * @param pOpcode Original instruction opcode (out, optional)
354 * @param pSize Original instruction size (out, optional)
355 */
356VMMDECL(bool) PATMIsInt3Patch(PVM pVM, RTRCPTR pInstrGC, uint32_t *pOpcode, uint32_t *pSize)
357{
358 PPATMPATCHREC pRec;
359
360 pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC);
361 if ( pRec
362 && (pRec->patch.uState == PATCH_ENABLED)
363 && (pRec->patch.flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
364 )
365 {
366 if (pOpcode) *pOpcode = pRec->patch.opcode;
367 if (pSize) *pSize = pRec->patch.cbPrivInstr;
368 return true;
369 }
370 return false;
371}
372
373/**
374 * Emulate sysenter, sysexit and syscall instructions
375 *
376 * @returns VBox status
377 *
378 * @param pVM The VM handle.
379 * @param pCtxCore The relevant core context.
380 * @param pCpu Disassembly context
381 */
382VMMDECL(int) PATMSysCall(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
383{
384 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVM);
385
386 if (pCpu->pCurInstr->opcode == OP_SYSENTER)
387 {
388 if ( pCtx->SysEnter.cs == 0
389 || pRegFrame->eflags.Bits.u1VM
390 || (pRegFrame->cs & X86_SEL_RPL) != 3
391 || pVM->patm.s.pfnSysEnterPatchGC == 0
392 || pVM->patm.s.pfnSysEnterGC != (RTRCPTR)pCtx->SysEnter.eip
393 || !(PATMRawGetEFlags(pVM, pRegFrame) & X86_EFL_IF))
394 goto end;
395
396 Log2(("PATMSysCall: sysenter from %RRv to %RRv\n", pRegFrame->eip, pVM->patm.s.pfnSysEnterPatchGC));
397 /** @todo the base and limit are forced to 0 & 4G-1 resp. We assume the selector is wide open here. */
398 /** @note The Intel manual suggests that the OS is responsible for this. */
399 pRegFrame->cs = (pCtx->SysEnter.cs & ~X86_SEL_RPL) | 1;
400 pRegFrame->eip = /** @todo ugly conversion! */(uint32_t)pVM->patm.s.pfnSysEnterPatchGC;
401 pRegFrame->ss = pRegFrame->cs + 8; /* SysEnter.cs + 8 */
402 pRegFrame->esp = pCtx->SysEnter.esp;
403 pRegFrame->eflags.u32 &= ~(X86_EFL_VM|X86_EFL_RF);
404 pRegFrame->eflags.u32 |= X86_EFL_IF;
405
406 /* Turn off interrupts. */
407 pVM->patm.s.CTXSUFF(pGCState)->uVMFlags &= ~X86_EFL_IF;
408
409 STAM_COUNTER_INC(&pVM->patm.s.StatSysEnter);
410
411 return VINF_SUCCESS;
412 }
413 else
414 if (pCpu->pCurInstr->opcode == OP_SYSEXIT)
415 {
416 if ( pCtx->SysEnter.cs == 0
417 || (pRegFrame->cs & X86_SEL_RPL) != 1
418 || pRegFrame->eflags.Bits.u1VM
419 || !(PATMRawGetEFlags(pVM, pRegFrame) & X86_EFL_IF))
420 goto end;
421
422 Log2(("PATMSysCall: sysexit from %RRv to %RRv\n", pRegFrame->eip, pRegFrame->edx));
423
424 pRegFrame->cs = ((pCtx->SysEnter.cs + 16) & ~X86_SEL_RPL) | 3;
425 pRegFrame->eip = pRegFrame->edx;
426 pRegFrame->ss = pRegFrame->cs + 8; /* SysEnter.cs + 24 */
427 pRegFrame->esp = pRegFrame->ecx;
428
429 STAM_COUNTER_INC(&pVM->patm.s.StatSysExit);
430
431 return VINF_SUCCESS;
432 }
433 else
434 if (pCpu->pCurInstr->opcode == OP_SYSCALL)
435 {
436 /** @todo implement syscall */
437 }
438 else
439 if (pCpu->pCurInstr->opcode == OP_SYSRET)
440 {
441 /** @todo implement sysret */
442 }
443
444end:
445 return VINF_EM_RAW_RING_SWITCH;
446}
447
448/**
449 * Adds branch pair to the lookup cache of the particular branch instruction
450 *
451 * @returns VBox status
452 * @param pVM The VM to operate on.
453 * @param pJumpTableGC Pointer to branch instruction lookup cache
454 * @param pBranchTarget Original branch target
455 * @param pRelBranchPatch Relative duplicated function address
456 */
457VMMDECL(int) PATMAddBranchToLookupCache(PVM pVM, RTRCPTR pJumpTableGC, RTRCPTR pBranchTarget, RTRCUINTPTR pRelBranchPatch)
458{
459 PPATCHJUMPTABLE pJumpTable;
460
461 Log(("PATMAddBranchToLookupCache: Adding (%RRv->%RRv (%RRv)) to table %RRv\n", pBranchTarget, pRelBranchPatch + pVM->patm.s.pPatchMemGC, pRelBranchPatch, pJumpTableGC));
462
463 AssertReturn(PATMIsPatchGCAddr(pVM, pJumpTableGC), VERR_INVALID_PARAMETER);
464
465#ifdef IN_RC
466 pJumpTable = (PPATCHJUMPTABLE) pJumpTableGC;
467#else
468 pJumpTable = (PPATCHJUMPTABLE) (pJumpTableGC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemHC);
469#endif
470 Log(("Nr addresses = %d, insert pos = %d\n", pJumpTable->cAddresses, pJumpTable->ulInsertPos));
471 if (pJumpTable->cAddresses < pJumpTable->nrSlots)
472 {
473 uint32_t i;
474
475 for (i=0;i<pJumpTable->nrSlots;i++)
476 {
477 if (pJumpTable->Slot[i].pInstrGC == 0)
478 {
479 pJumpTable->Slot[i].pInstrGC = pBranchTarget;
480 /* Relative address - eases relocation */
481 pJumpTable->Slot[i].pRelPatchGC = pRelBranchPatch;
482 pJumpTable->cAddresses++;
483 break;
484 }
485 }
486 AssertReturn(i < pJumpTable->nrSlots, VERR_INTERNAL_ERROR);
487#ifdef VBOX_WITH_STATISTICS
488 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupInsert);
489 if (pVM->patm.s.StatU32FunctionMaxSlotsUsed < i)
490 pVM->patm.s.StatU32FunctionMaxSlotsUsed = i + 1;
491#endif
492 }
493 else
494 {
495 /* Replace an old entry. */
496 /** @todo replacement strategy isn't really bright. change to something better if required. */
497 Assert(pJumpTable->ulInsertPos < pJumpTable->nrSlots);
498 Assert((pJumpTable->nrSlots & 1) == 0);
499
500 pJumpTable->ulInsertPos &= (pJumpTable->nrSlots-1);
501 pJumpTable->Slot[pJumpTable->ulInsertPos].pInstrGC = pBranchTarget;
502 /* Relative address - eases relocation */
503 pJumpTable->Slot[pJumpTable->ulInsertPos].pRelPatchGC = pRelBranchPatch;
504
505 pJumpTable->ulInsertPos = (pJumpTable->ulInsertPos+1) & (pJumpTable->nrSlots-1);
506
507 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupReplace);
508 }
509
510 return VINF_SUCCESS;
511}
512
513
514#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
515/**
516 * Return the name of the patched instruction
517 *
518 * @returns instruction name
519 *
520 * @param opcode DIS instruction opcode
521 * @param fPatchFlags Patch flags
522 */
523VMMDECL(const char *) patmGetInstructionString(uint32_t opcode, uint32_t fPatchFlags)
524{
525 const char *pszInstr = NULL;
526
527 switch (opcode)
528 {
529 case OP_CLI:
530 pszInstr = "cli";
531 break;
532 case OP_PUSHF:
533 pszInstr = "pushf";
534 break;
535 case OP_POPF:
536 pszInstr = "popf";
537 break;
538 case OP_STR:
539 pszInstr = "str";
540 break;
541 case OP_LSL:
542 pszInstr = "lsl";
543 break;
544 case OP_LAR:
545 pszInstr = "lar";
546 break;
547 case OP_SGDT:
548 pszInstr = "sgdt";
549 break;
550 case OP_SLDT:
551 pszInstr = "sldt";
552 break;
553 case OP_SIDT:
554 pszInstr = "sidt";
555 break;
556 case OP_SMSW:
557 pszInstr = "smsw";
558 break;
559 case OP_VERW:
560 pszInstr = "verw";
561 break;
562 case OP_VERR:
563 pszInstr = "verr";
564 break;
565 case OP_CPUID:
566 pszInstr = "cpuid";
567 break;
568 case OP_JMP:
569 pszInstr = "jmp";
570 break;
571 case OP_JO:
572 pszInstr = "jo";
573 break;
574 case OP_JNO:
575 pszInstr = "jno";
576 break;
577 case OP_JC:
578 pszInstr = "jc";
579 break;
580 case OP_JNC:
581 pszInstr = "jnc";
582 break;
583 case OP_JE:
584 pszInstr = "je";
585 break;
586 case OP_JNE:
587 pszInstr = "jne";
588 break;
589 case OP_JBE:
590 pszInstr = "jbe";
591 break;
592 case OP_JNBE:
593 pszInstr = "jnbe";
594 break;
595 case OP_JS:
596 pszInstr = "js";
597 break;
598 case OP_JNS:
599 pszInstr = "jns";
600 break;
601 case OP_JP:
602 pszInstr = "jp";
603 break;
604 case OP_JNP:
605 pszInstr = "jnp";
606 break;
607 case OP_JL:
608 pszInstr = "jl";
609 break;
610 case OP_JNL:
611 pszInstr = "jnl";
612 break;
613 case OP_JLE:
614 pszInstr = "jle";
615 break;
616 case OP_JNLE:
617 pszInstr = "jnle";
618 break;
619 case OP_JECXZ:
620 pszInstr = "jecxz";
621 break;
622 case OP_LOOP:
623 pszInstr = "loop";
624 break;
625 case OP_LOOPNE:
626 pszInstr = "loopne";
627 break;
628 case OP_LOOPE:
629 pszInstr = "loope";
630 break;
631 case OP_MOV:
632 if (fPatchFlags & PATMFL_IDTHANDLER)
633 {
634 pszInstr = "mov (Int/Trap Handler)";
635 }
636 break;
637 case OP_SYSENTER:
638 pszInstr = "sysenter";
639 break;
640 case OP_PUSH:
641 pszInstr = "push (cs)";
642 break;
643 case OP_CALL:
644 pszInstr = "call";
645 break;
646 case OP_IRET:
647 pszInstr = "iret";
648 break;
649 }
650 return pszInstr;
651}
652#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette