VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PATMAll.cpp@ 55747

Last change on this file since 55747 was 55001, checked in by vboxsync, 10 years ago

CPUMCTXCORE elimination.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 23.4 KB
Line 
1/* $Id: PATMAll.cpp 55001 2015-03-29 16:59:20Z vboxsync $ */
2/** @file
3 * PATM - The Patch Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PATM
22#include <VBox/vmm/patm.h>
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/em.h>
25#include <VBox/vmm/hm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include "PATMInternal.h"
29#include <VBox/vmm/vm.h>
30#include <VBox/vmm/vmm.h>
31#include "PATMA.h"
32
33#include <VBox/dis.h>
34#include <VBox/disopcode.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <iprt/assert.h>
38#include <iprt/string.h>
39
40
41/**
42 * Load virtualized flags.
43 *
44 * This function is called from CPUMRawEnter(). It doesn't have to update the
45 * IF and IOPL eflags bits, the caller will enforce those to set and 0 respectively.
46 *
47 * @param pVM Pointer to the VM.
48 * @param pCtx The cpu context.
49 * @see pg_raw
50 */
51VMM_INT_DECL(void) PATMRawEnter(PVM pVM, PCPUMCTX pCtx)
52{
53 Assert(!HMIsEnabled(pVM));
54
55 /*
56 * Currently we don't bother to check whether PATM is enabled or not.
57 * For all cases where it isn't, IOPL will be safe and IF will be set.
58 */
59 uint32_t efl = pCtx->eflags.u32;
60 CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
61
62 AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTRCPTR)pCtx->eip),
63 ("X86_EFL_IF is clear and PATM is disabled! (eip=%RRv eflags=%08x fPATM=%d pPATMGC=%RRv-%RRv\n",
64 pCtx->eip, pCtx->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC,
65 pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem));
66
67 AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || PATMIsPatchGCAddr(pVM, pCtx->eip),
68 ("fPIF=%d eip=%RRv\n", pVM->patm.s.CTXSUFF(pGCState)->fPIF, pCtx->eip));
69
70 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
71 efl |= X86_EFL_IF;
72 pCtx->eflags.u32 = efl;
73
74#ifdef IN_RING3
75# ifdef PATM_EMULATE_SYSENTER
76 PCPUMCTX pCtx;
77
78 /* Check if the sysenter handler has changed. */
79 pCtx = CPUMQueryGuestCtxPtr(pVM);
80 if ( pCtx->SysEnter.cs != 0
81 && pCtx->SysEnter.eip != 0
82 )
83 {
84 if (pVM->patm.s.pfnSysEnterGC != (RTRCPTR)pCtx->SysEnter.eip)
85 {
86 pVM->patm.s.pfnSysEnterPatchGC = 0;
87 pVM->patm.s.pfnSysEnterGC = 0;
88
89 Log2(("PATMRawEnter: installing sysenter patch for %RRv\n", pCtx->SysEnter.eip));
90 pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
91 if (pVM->patm.s.pfnSysEnterPatchGC == 0)
92 {
93 rc = PATMR3InstallPatch(pVM, pCtx->SysEnter.eip, PATMFL_SYSENTER | PATMFL_CODE32);
94 if (rc == VINF_SUCCESS)
95 {
96 pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
97 pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
98 Assert(pVM->patm.s.pfnSysEnterPatchGC);
99 }
100 }
101 else
102 pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
103 }
104 }
105 else
106 {
107 pVM->patm.s.pfnSysEnterPatchGC = 0;
108 pVM->patm.s.pfnSysEnterGC = 0;
109 }
110# endif /* PATM_EMULATE_SYSENTER */
111#endif
112}
113
114
115/**
116 * Restores virtualized flags.
117 *
118 * This function is called from CPUMRawLeave(). It will update the eflags register.
119 *
120 ** @note Only here we are allowed to switch back to guest code (without a special reason such as a trap in patch code)!!
121 *
122 * @param pVM Pointer to the VM.
123 * @param pCtx The cpu context.
124 * @param rawRC Raw mode return code
125 * @see @ref pg_raw
126 */
127VMM_INT_DECL(void) PATMRawLeave(PVM pVM, PCPUMCTX pCtx, int rawRC)
128{
129 Assert(!HMIsEnabled(pVM));
130 bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtx->eip);
131
132 /*
133 * We will only be called if PATMRawEnter was previously called.
134 */
135 uint32_t efl = pCtx->eflags.u32;
136 efl = (efl & ~PATM_VIRTUAL_FLAGS_MASK) | (CTXSUFF(pVM->patm.s.pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK);
137 pCtx->eflags.u32 = efl;
138 CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = X86_EFL_IF;
139
140 AssertReleaseMsg((efl & X86_EFL_IF) || fPatchCode || rawRC == VINF_PATM_PENDING_IRQ_AFTER_IRET || RT_FAILURE(rawRC), ("Inconsistent state at %RRv rc=%Rrc\n", pCtx->eip, rawRC));
141 AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode || RT_FAILURE(rawRC), ("fPIF=%d eip=%RRv rc=%Rrc\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtx->eip, rawRC));
142
143#ifdef IN_RING3
144 if ( (efl & X86_EFL_IF)
145 && fPatchCode)
146 {
147 if ( rawRC < VINF_PATM_LEAVE_RC_FIRST
148 || rawRC > VINF_PATM_LEAVE_RC_LAST)
149 {
150 /*
151 * Golden rules:
152 * - Don't interrupt special patch streams that replace special instructions
153 * - Don't break instruction fusing (sti, pop ss, mov ss)
154 * - Don't go back to an instruction that has been overwritten by a patch jump
155 * - Don't interrupt an idt handler on entry (1st instruction); technically incorrect
156 *
157 */
158 if (CTXSUFF(pVM->patm.s.pGCState)->fPIF == 1) /* consistent patch instruction state */
159 {
160 PATMTRANSSTATE enmState;
161 RTRCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->eip, &enmState);
162
163 AssertRelease(pOrgInstrGC);
164
165 Assert(enmState != PATMTRANS_OVERWRITTEN);
166 if (enmState == PATMTRANS_SAFE)
167 {
168 Assert(!patmFindActivePatchByEntrypoint(pVM, pOrgInstrGC));
169 Log(("Switchback from %RRv to %RRv (Psp=%x)\n", pCtx->eip, pOrgInstrGC, CTXSUFF(pVM->patm.s.pGCState)->Psp));
170 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBack);
171 pCtx->eip = pOrgInstrGC;
172 fPatchCode = false; /* to reset the stack ptr */
173
174 CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0; /* reset this pointer; safe otherwise the state would be PATMTRANS_INHIBITIRQ */
175 }
176 else
177 {
178 LogFlow(("Patch address %RRv can't be interrupted (state=%d)!\n", pCtx->eip, enmState));
179 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
180 }
181 }
182 else
183 {
184 LogFlow(("Patch address %RRv can't be interrupted (fPIF=%d)!\n", pCtx->eip, CTXSUFF(pVM->patm.s.pGCState)->fPIF));
185 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
186 }
187 }
188 }
189#else /* !IN_RING3 */
190 /*
191 * When leaving raw-mode state while IN_RC, it's generally for interpreting
192 * a single original guest instruction.
193 */
194 AssertMsg(!fPatchCode, ("eip=%RRv\n", pCtx->eip));
195#endif /* !IN_RING3 */
196
197 if (!fPatchCode)
198 {
199 if (CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts == (RTRCPTR)pCtx->eip)
200 {
201 EMSetInhibitInterruptsPC(VMMGetCpu0(pVM), pCtx->eip);
202 }
203 CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0;
204
205 /* Reset the stack pointer to the top of the stack. */
206#ifdef DEBUG
207 if (CTXSUFF(pVM->patm.s.pGCState)->Psp != PATM_STACK_SIZE)
208 {
209 LogFlow(("PATMRawLeave: Reset PATM stack (Psp = %x)\n", CTXSUFF(pVM->patm.s.pGCState)->Psp));
210 }
211#endif
212 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
213 }
214}
215
216/**
217 * Get the EFLAGS.
218 * This is a worker for CPUMRawGetEFlags().
219 *
220 * @returns The eflags.
221 * @param pVM Pointer to the VM.
222 * @param pCtx The guest cpu context.
223 */
224VMM_INT_DECL(uint32_t) PATMRawGetEFlags(PVM pVM, PCCPUMCTX pCtx)
225{
226 Assert(!HMIsEnabled(pVM));
227 uint32_t efl = pCtx->eflags.u32;
228 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
229 efl |= pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK;
230 return efl;
231}
232
233/**
234 * Updates the EFLAGS.
235 * This is a worker for CPUMRawSetEFlags().
236 *
237 * @param pVM Pointer to the VM.
238 * @param pCtx The guest cpu context.
239 * @param efl The new EFLAGS value.
240 */
241VMM_INT_DECL(void) PATMRawSetEFlags(PVM pVM, PCPUMCTX pCtx, uint32_t efl)
242{
243 Assert(!HMIsEnabled(pVM));
244 pVM->patm.s.CTXSUFF(pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
245 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
246 efl |= X86_EFL_IF;
247 pCtx->eflags.u32 = efl;
248}
249
250/**
251 * Check if we must use raw mode (patch code being executed)
252 *
253 * @param pVM Pointer to the VM.
254 * @param pAddrGC Guest context address
255 */
256VMM_INT_DECL(bool) PATMShouldUseRawMode(PVM pVM, RTRCPTR pAddrGC)
257{
258 return PATMIsEnabled(pVM)
259 && ( (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem
260 || (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC < pVM->patm.s.cbPatchHelpers);
261}
262
263/**
264 * Returns the guest context pointer and size of the GC context structure
265 *
266 * @returns VBox status code.
267 * @param pVM Pointer to the VM.
268 */
269VMM_INT_DECL(RCPTRTYPE(PPATMGCSTATE)) PATMGetGCState(PVM pVM)
270{
271 AssertReturn(!HMIsEnabled(pVM), NIL_RTRCPTR);
272 return pVM->patm.s.pGCStateGC;
273}
274
275/**
276 * Checks whether the GC address is part of our patch or helper regions.
277 *
278 * @returns VBox status code.
279 * @param pVM Pointer to the VM.
280 * @param uGCAddr Guest context address.
281 * @internal
282 */
283VMMDECL(bool) PATMIsPatchGCAddr(PVM pVM, RTRCUINTPTR uGCAddr)
284{
285 return PATMIsEnabled(pVM)
286 && ( uGCAddr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem
287 || uGCAddr - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC < pVM->patm.s.cbPatchHelpers);
288}
289
290/**
291 * Checks whether the GC address is part of our patch region.
292 *
293 * @returns VBox status code.
294 * @param pVM Pointer to the VM.
295 * @param uGCAddr Guest context address.
296 * @internal
297 */
298VMMDECL(bool) PATMIsPatchGCAddrExclHelpers(PVM pVM, RTRCUINTPTR uGCAddr)
299{
300 return PATMIsEnabled(pVM)
301 && uGCAddr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem;
302}
303
304/**
305 * Reads patch code.
306 *
307 * @retval VINF_SUCCESS on success.
308 * @retval VERR_PATCH_NOT_FOUND if the request is entirely outside the patch
309 * code.
310 *
311 * @param pVM The cross context VM structure.
312 * @param GCPtrPatchCode The patch address to start reading at.
313 * @param pvDst Where to return the patch code.
314 * @param cbToRead Number of bytes to read.
315 * @param pcbRead Where to return the actual number of bytes we've
316 * read. Optional.
317 */
318VMM_INT_DECL(int) PATMReadPatchCode(PVM pVM, RTGCPTR GCPtrPatchCode, void *pvDst, size_t cbToRead, size_t *pcbRead)
319{
320 /* Shortcut. */
321 if (!PATMIsEnabled(pVM))
322 return VERR_PATCH_NOT_FOUND;
323 Assert(!HMIsEnabled(pVM));
324
325 /*
326 * Check patch code and patch helper code. We assume the requested bytes
327 * are not in either.
328 */
329 RTGCPTR offPatchCode = GCPtrPatchCode - (RTGCPTR32)pVM->patm.s.pPatchMemGC;
330 if (offPatchCode >= pVM->patm.s.cbPatchMem)
331 {
332 offPatchCode = GCPtrPatchCode - (RTGCPTR32)pVM->patm.s.pbPatchHelpersRC;
333 if (offPatchCode >= pVM->patm.s.cbPatchHelpers)
334 return VERR_PATCH_NOT_FOUND;
335
336 /*
337 * Patch helper memory.
338 */
339 uint32_t cbMaxRead = pVM->patm.s.cbPatchHelpers - (uint32_t)offPatchCode;
340 if (cbToRead > cbMaxRead)
341 cbToRead = cbMaxRead;
342#ifdef IN_RC
343 memcpy(pvDst, pVM->patm.s.pbPatchHelpersRC + (uint32_t)offPatchCode, cbToRead);
344#else
345 memcpy(pvDst, pVM->patm.s.pbPatchHelpersR3 + (uint32_t)offPatchCode, cbToRead);
346#endif
347 }
348 else
349 {
350 /*
351 * Patch memory.
352 */
353 uint32_t cbMaxRead = pVM->patm.s.cbPatchMem - (uint32_t)offPatchCode;
354 if (cbToRead > cbMaxRead)
355 cbToRead = cbMaxRead;
356#ifdef IN_RC
357 memcpy(pvDst, pVM->patm.s.pPatchMemGC + (uint32_t)offPatchCode, cbToRead);
358#else
359 memcpy(pvDst, pVM->patm.s.pPatchMemHC + (uint32_t)offPatchCode, cbToRead);
360#endif
361 }
362
363 if (pcbRead)
364 *pcbRead = cbToRead;
365 return VINF_SUCCESS;
366}
367
368/**
369 * Set parameters for pending MMIO patch operation
370 *
371 * @returns VBox status code.
372 * @param pDevIns Device instance.
373 * @param GCPhys MMIO physical address
374 * @param pCachedData GC pointer to cached data
375 */
376VMM_INT_DECL(int) PATMSetMMIOPatchInfo(PVM pVM, RTGCPHYS GCPhys, RTRCPTR pCachedData)
377{
378 if (!HMIsEnabled(pVM))
379 {
380 pVM->patm.s.mmio.GCPhys = GCPhys;
381 pVM->patm.s.mmio.pCachedData = (RTRCPTR)pCachedData;
382 }
383
384 return VINF_SUCCESS;
385}
386
387/**
388 * Checks if the interrupt flag is enabled or not.
389 *
390 * @returns true if it's enabled.
391 * @returns false if it's disabled.
392 *
393 * @param pVM Pointer to the VM.
394 * @todo CPUM should wrap this, EM.cpp shouldn't call us.
395 */
396VMM_INT_DECL(bool) PATMAreInterruptsEnabled(PVM pVM)
397{
398 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
399
400 return PATMAreInterruptsEnabledByCtx(pVM, pCtx);
401}
402
403/**
404 * Checks if the interrupt flag is enabled or not.
405 *
406 * @returns true if it's enabled.
407 * @returns false if it's disabled.
408 *
409 * @param pVM Pointer to the VM.
410 * @param pCtx The guest CPU context.
411 * @todo CPUM should wrap this, EM.cpp shouldn't call us.
412 */
413VMM_INT_DECL(bool) PATMAreInterruptsEnabledByCtx(PVM pVM, PCPUMCTX pCtx)
414{
415 if (PATMIsEnabled(pVM))
416 {
417 Assert(!HMIsEnabled(pVM));
418 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
419 return false;
420 }
421 return !!(pCtx->eflags.u32 & X86_EFL_IF);
422}
423
424/**
425 * Check if the instruction is patched as a duplicated function
426 *
427 * @returns patch record
428 * @param pVM Pointer to the VM.
429 * @param pInstrGC Guest context point to the instruction
430 *
431 */
432PPATMPATCHREC patmQueryFunctionPatch(PVM pVM, RTRCPTR pInstrGC)
433{
434 PPATMPATCHREC pRec;
435
436 AssertCompile(sizeof(AVLOU32KEY) == sizeof(pInstrGC));
437 pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC);
438 if ( pRec
439 && (pRec->patch.uState == PATCH_ENABLED)
440 && (pRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CALLABLE_AS_FUNCTION))
441 )
442 return pRec;
443 return 0;
444}
445
446/**
447 * Checks if the int 3 was caused by a patched instruction
448 *
449 * @returns VBox status
450 *
451 * @param pVM Pointer to the VM.
452 * @param pInstrGC Instruction pointer
453 * @param pOpcode Original instruction opcode (out, optional)
454 * @param pSize Original instruction size (out, optional)
455 */
456VMM_INT_DECL(bool) PATMIsInt3Patch(PVM pVM, RTRCPTR pInstrGC, uint32_t *pOpcode, uint32_t *pSize)
457{
458 PPATMPATCHREC pRec;
459 Assert(!HMIsEnabled(pVM));
460
461 pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC);
462 if ( pRec
463 && (pRec->patch.uState == PATCH_ENABLED)
464 && (pRec->patch.flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
465 )
466 {
467 if (pOpcode) *pOpcode = pRec->patch.opcode;
468 if (pSize) *pSize = pRec->patch.cbPrivInstr;
469 return true;
470 }
471 return false;
472}
473
474/**
475 * Emulate sysenter, sysexit and syscall instructions
476 *
477 * @returns VBox status
478 *
479 * @param pVM Pointer to the VM.
480 * @param pCtx The relevant guest cpu context.
481 * @param pCpu Disassembly state.
482 */
483VMMDECL(int) PATMSysCall(PVM pVM, PCPUMCTX pCtx, PDISCPUSTATE pCpu)
484{
485 Assert(CPUMQueryGuestCtxPtr(VMMGetCpu0(pVM)) == pCtx);
486 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
487
488 if (pCpu->pCurInstr->uOpcode == OP_SYSENTER)
489 {
490 if ( pCtx->SysEnter.cs == 0
491 || pCtx->eflags.Bits.u1VM
492 || (pCtx->cs.Sel & X86_SEL_RPL) != 3
493 || pVM->patm.s.pfnSysEnterPatchGC == 0
494 || pVM->patm.s.pfnSysEnterGC != (RTRCPTR)(RTRCUINTPTR)pCtx->SysEnter.eip
495 || !(PATMRawGetEFlags(pVM, pCtx) & X86_EFL_IF))
496 goto end;
497
498 Log2(("PATMSysCall: sysenter from %RRv to %RRv\n", pCtx->eip, pVM->patm.s.pfnSysEnterPatchGC));
499 /** @todo the base and limit are forced to 0 & 4G-1 resp. We assume the selector is wide open here. */
500 /** @note The Intel manual suggests that the OS is responsible for this. */
501 pCtx->cs.Sel = (pCtx->SysEnter.cs & ~X86_SEL_RPL) | 1;
502 pCtx->eip = /** @todo ugly conversion! */(uint32_t)pVM->patm.s.pfnSysEnterPatchGC;
503 pCtx->ss.Sel = pCtx->cs.Sel + 8; /* SysEnter.cs + 8 */
504 pCtx->esp = pCtx->SysEnter.esp;
505 pCtx->eflags.u32 &= ~(X86_EFL_VM | X86_EFL_RF);
506 pCtx->eflags.u32 |= X86_EFL_IF;
507
508 /* Turn off interrupts. */
509 pVM->patm.s.CTXSUFF(pGCState)->uVMFlags &= ~X86_EFL_IF;
510
511 STAM_COUNTER_INC(&pVM->patm.s.StatSysEnter);
512
513 return VINF_SUCCESS;
514 }
515 if (pCpu->pCurInstr->uOpcode == OP_SYSEXIT)
516 {
517 if ( pCtx->SysEnter.cs == 0
518 || (pCtx->cs.Sel & X86_SEL_RPL) != 1
519 || pCtx->eflags.Bits.u1VM
520 || !(PATMRawGetEFlags(pVM, pCtx) & X86_EFL_IF))
521 goto end;
522
523 Log2(("PATMSysCall: sysexit from %RRv to %RRv\n", pCtx->eip, pCtx->edx));
524
525 pCtx->cs.Sel = ((pCtx->SysEnter.cs + 16) & ~X86_SEL_RPL) | 3;
526 pCtx->eip = pCtx->edx;
527 pCtx->ss.Sel = pCtx->cs.Sel + 8; /* SysEnter.cs + 24 */
528 pCtx->esp = pCtx->ecx;
529
530 STAM_COUNTER_INC(&pVM->patm.s.StatSysExit);
531
532 return VINF_SUCCESS;
533 }
534 if (pCpu->pCurInstr->uOpcode == OP_SYSCALL)
535 {
536 /** @todo implement syscall */
537 }
538 else
539 if (pCpu->pCurInstr->uOpcode == OP_SYSRET)
540 {
541 /** @todo implement sysret */
542 }
543
544end:
545 return VINF_EM_RAW_RING_SWITCH;
546}
547
548/**
549 * Adds branch pair to the lookup cache of the particular branch instruction
550 *
551 * @returns VBox status
552 * @param pVM Pointer to the VM.
553 * @param pJumpTableGC Pointer to branch instruction lookup cache
554 * @param pBranchTarget Original branch target
555 * @param pRelBranchPatch Relative duplicated function address
556 */
557int patmAddBranchToLookupCache(PVM pVM, RTRCPTR pJumpTableGC, RTRCPTR pBranchTarget, RTRCUINTPTR pRelBranchPatch)
558{
559 PPATCHJUMPTABLE pJumpTable;
560
561 Log(("PATMAddBranchToLookupCache: Adding (%RRv->%RRv (%RRv)) to table %RRv\n", pBranchTarget, pRelBranchPatch + pVM->patm.s.pPatchMemGC, pRelBranchPatch, pJumpTableGC));
562
563 AssertReturn(PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pJumpTableGC), VERR_INVALID_PARAMETER);
564
565#ifdef IN_RC
566 pJumpTable = (PPATCHJUMPTABLE) pJumpTableGC;
567#else
568 pJumpTable = (PPATCHJUMPTABLE) (pJumpTableGC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemHC);
569#endif
570 Log(("Nr addresses = %d, insert pos = %d\n", pJumpTable->cAddresses, pJumpTable->ulInsertPos));
571 if (pJumpTable->cAddresses < pJumpTable->nrSlots)
572 {
573 uint32_t i;
574
575 for (i=0;i<pJumpTable->nrSlots;i++)
576 {
577 if (pJumpTable->Slot[i].pInstrGC == 0)
578 {
579 pJumpTable->Slot[i].pInstrGC = pBranchTarget;
580 /* Relative address - eases relocation */
581 pJumpTable->Slot[i].pRelPatchGC = pRelBranchPatch;
582 pJumpTable->cAddresses++;
583 break;
584 }
585 }
586 AssertReturn(i < pJumpTable->nrSlots, VERR_INTERNAL_ERROR);
587#ifdef VBOX_WITH_STATISTICS
588 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupInsert);
589 if (pVM->patm.s.StatU32FunctionMaxSlotsUsed < i)
590 pVM->patm.s.StatU32FunctionMaxSlotsUsed = i + 1;
591#endif
592 }
593 else
594 {
595 /* Replace an old entry. */
596 /** @todo replacement strategy isn't really bright. change to something better if required. */
597 Assert(pJumpTable->ulInsertPos < pJumpTable->nrSlots);
598 Assert((pJumpTable->nrSlots & 1) == 0);
599
600 pJumpTable->ulInsertPos &= (pJumpTable->nrSlots-1);
601 pJumpTable->Slot[pJumpTable->ulInsertPos].pInstrGC = pBranchTarget;
602 /* Relative address - eases relocation */
603 pJumpTable->Slot[pJumpTable->ulInsertPos].pRelPatchGC = pRelBranchPatch;
604
605 pJumpTable->ulInsertPos = (pJumpTable->ulInsertPos+1) & (pJumpTable->nrSlots-1);
606
607 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupReplace);
608 }
609
610 return VINF_SUCCESS;
611}
612
613
614#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
615/**
616 * Return the name of the patched instruction
617 *
618 * @returns instruction name
619 *
620 * @param opcode DIS instruction opcode
621 * @param fPatchFlags Patch flags
622 */
623const char *patmGetInstructionString(uint32_t opcode, uint32_t fPatchFlags)
624{
625 const char *pszInstr = NULL;
626
627 switch (opcode)
628 {
629 case OP_CLI:
630 pszInstr = "cli";
631 break;
632 case OP_PUSHF:
633 pszInstr = "pushf";
634 break;
635 case OP_POPF:
636 pszInstr = "popf";
637 break;
638 case OP_STR:
639 pszInstr = "str";
640 break;
641 case OP_LSL:
642 pszInstr = "lsl";
643 break;
644 case OP_LAR:
645 pszInstr = "lar";
646 break;
647 case OP_SGDT:
648 pszInstr = "sgdt";
649 break;
650 case OP_SLDT:
651 pszInstr = "sldt";
652 break;
653 case OP_SIDT:
654 pszInstr = "sidt";
655 break;
656 case OP_SMSW:
657 pszInstr = "smsw";
658 break;
659 case OP_VERW:
660 pszInstr = "verw";
661 break;
662 case OP_VERR:
663 pszInstr = "verr";
664 break;
665 case OP_CPUID:
666 pszInstr = "cpuid";
667 break;
668 case OP_JMP:
669 pszInstr = "jmp";
670 break;
671 case OP_JO:
672 pszInstr = "jo";
673 break;
674 case OP_JNO:
675 pszInstr = "jno";
676 break;
677 case OP_JC:
678 pszInstr = "jc";
679 break;
680 case OP_JNC:
681 pszInstr = "jnc";
682 break;
683 case OP_JE:
684 pszInstr = "je";
685 break;
686 case OP_JNE:
687 pszInstr = "jne";
688 break;
689 case OP_JBE:
690 pszInstr = "jbe";
691 break;
692 case OP_JNBE:
693 pszInstr = "jnbe";
694 break;
695 case OP_JS:
696 pszInstr = "js";
697 break;
698 case OP_JNS:
699 pszInstr = "jns";
700 break;
701 case OP_JP:
702 pszInstr = "jp";
703 break;
704 case OP_JNP:
705 pszInstr = "jnp";
706 break;
707 case OP_JL:
708 pszInstr = "jl";
709 break;
710 case OP_JNL:
711 pszInstr = "jnl";
712 break;
713 case OP_JLE:
714 pszInstr = "jle";
715 break;
716 case OP_JNLE:
717 pszInstr = "jnle";
718 break;
719 case OP_JECXZ:
720 pszInstr = "jecxz";
721 break;
722 case OP_LOOP:
723 pszInstr = "loop";
724 break;
725 case OP_LOOPNE:
726 pszInstr = "loopne";
727 break;
728 case OP_LOOPE:
729 pszInstr = "loope";
730 break;
731 case OP_MOV:
732 if (fPatchFlags & PATMFL_IDTHANDLER)
733 pszInstr = "mov (Int/Trap Handler)";
734 else
735 pszInstr = "mov (cs)";
736 break;
737 case OP_SYSENTER:
738 pszInstr = "sysenter";
739 break;
740 case OP_PUSH:
741 pszInstr = "push (cs)";
742 break;
743 case OP_CALL:
744 pszInstr = "call";
745 break;
746 case OP_IRET:
747 pszInstr = "iret";
748 break;
749 }
750 return pszInstr;
751}
752#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette