VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PATMAll.cpp@ 56225

Last change on this file since 56225 was 56043, checked in by vboxsync, 10 years ago

PATMAll.cpp: docs

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.3 KB
Line 
1/* $Id: PATMAll.cpp 56043 2015-05-22 21:03:49Z vboxsync $ */
2/** @file
3 * PATM - The Patch Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PATM
22#include <VBox/vmm/patm.h>
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/em.h>
25#include <VBox/vmm/hm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include "PATMInternal.h"
29#include <VBox/vmm/vm.h>
30#include <VBox/vmm/vmm.h>
31#include "PATMA.h"
32
33#include <VBox/dis.h>
34#include <VBox/disopcode.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <iprt/assert.h>
38#include <iprt/string.h>
39
40
41/**
42 * @callback_method_impl{FNPGMPHYSHANDLER, PATM all access handler callback.}
43 *
44 * @remarks The @a pvUser argument is the base address of the page being
45 * monitored.
46 */
47PGM_ALL_CB2_DECL(VBOXSTRICTRC) patmVirtPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
48 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
49{
50 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
51 NOREF(pvPtr); NOREF(pvBuf); NOREF(cbBuf); NOREF(enmOrigin); NOREF(pvUser);
52 Assert(pvUser); Assert(!((uintptr_t)pvUser & PAGE_OFFSET_MASK));
53
54 pVM->patm.s.pvFaultMonitor = (RTRCPTR)((uintptr_t)pvUser + (GCPtr & PAGE_OFFSET_MASK));
55#ifdef IN_RING3
56 PATMR3HandleMonitoredPage(pVM);
57 return VINF_PGM_HANDLER_DO_DEFAULT;
58#else
59 /* RC: Go handle this in ring-3. */
60 return VINF_PATM_CHECK_PATCH_PAGE;
61#endif
62}
63
64
65/**
66 * Load virtualized flags.
67 *
68 * This function is called from CPUMRawEnter(). It doesn't have to update the
69 * IF and IOPL eflags bits, the caller will enforce those to set and 0 respectively.
70 *
71 * @param pVM Pointer to the VM.
72 * @param pCtx The cpu context.
73 * @see pg_raw
74 */
75VMM_INT_DECL(void) PATMRawEnter(PVM pVM, PCPUMCTX pCtx)
76{
77 Assert(!HMIsEnabled(pVM));
78
79 /*
80 * Currently we don't bother to check whether PATM is enabled or not.
81 * For all cases where it isn't, IOPL will be safe and IF will be set.
82 */
83 uint32_t efl = pCtx->eflags.u32;
84 CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
85
86 AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTRCPTR)pCtx->eip),
87 ("X86_EFL_IF is clear and PATM is disabled! (eip=%RRv eflags=%08x fPATM=%d pPATMGC=%RRv-%RRv\n",
88 pCtx->eip, pCtx->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC,
89 pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem));
90
91 AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || PATMIsPatchGCAddr(pVM, pCtx->eip),
92 ("fPIF=%d eip=%RRv\n", pVM->patm.s.CTXSUFF(pGCState)->fPIF, pCtx->eip));
93
94 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
95 efl |= X86_EFL_IF;
96 pCtx->eflags.u32 = efl;
97
98#ifdef IN_RING3
99# ifdef PATM_EMULATE_SYSENTER
100 PCPUMCTX pCtx;
101
102 /* Check if the sysenter handler has changed. */
103 pCtx = CPUMQueryGuestCtxPtr(pVM);
104 if ( pCtx->SysEnter.cs != 0
105 && pCtx->SysEnter.eip != 0
106 )
107 {
108 if (pVM->patm.s.pfnSysEnterGC != (RTRCPTR)pCtx->SysEnter.eip)
109 {
110 pVM->patm.s.pfnSysEnterPatchGC = 0;
111 pVM->patm.s.pfnSysEnterGC = 0;
112
113 Log2(("PATMRawEnter: installing sysenter patch for %RRv\n", pCtx->SysEnter.eip));
114 pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
115 if (pVM->patm.s.pfnSysEnterPatchGC == 0)
116 {
117 rc = PATMR3InstallPatch(pVM, pCtx->SysEnter.eip, PATMFL_SYSENTER | PATMFL_CODE32);
118 if (rc == VINF_SUCCESS)
119 {
120 pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
121 pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
122 Assert(pVM->patm.s.pfnSysEnterPatchGC);
123 }
124 }
125 else
126 pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
127 }
128 }
129 else
130 {
131 pVM->patm.s.pfnSysEnterPatchGC = 0;
132 pVM->patm.s.pfnSysEnterGC = 0;
133 }
134# endif /* PATM_EMULATE_SYSENTER */
135#endif
136}
137
138
139/**
140 * Restores virtualized flags.
141 *
142 * This function is called from CPUMRawLeave(). It will update the eflags register.
143 *
144 ** @note Only here we are allowed to switch back to guest code (without a special reason such as a trap in patch code)!!
145 *
146 * @param pVM Pointer to the VM.
147 * @param pCtx The cpu context.
148 * @param rawRC Raw mode return code
149 * @see @ref pg_raw
150 */
151VMM_INT_DECL(void) PATMRawLeave(PVM pVM, PCPUMCTX pCtx, int rawRC)
152{
153 Assert(!HMIsEnabled(pVM));
154 bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtx->eip);
155
156 /*
157 * We will only be called if PATMRawEnter was previously called.
158 */
159 uint32_t efl = pCtx->eflags.u32;
160 efl = (efl & ~PATM_VIRTUAL_FLAGS_MASK) | (CTXSUFF(pVM->patm.s.pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK);
161 pCtx->eflags.u32 = efl;
162 CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = X86_EFL_IF;
163
164 AssertReleaseMsg((efl & X86_EFL_IF) || fPatchCode || rawRC == VINF_PATM_PENDING_IRQ_AFTER_IRET || RT_FAILURE(rawRC), ("Inconsistent state at %RRv rc=%Rrc\n", pCtx->eip, rawRC));
165 AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode || RT_FAILURE(rawRC), ("fPIF=%d eip=%RRv rc=%Rrc\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtx->eip, rawRC));
166
167#ifdef IN_RING3
168 if ( (efl & X86_EFL_IF)
169 && fPatchCode)
170 {
171 if ( rawRC < VINF_PATM_LEAVE_RC_FIRST
172 || rawRC > VINF_PATM_LEAVE_RC_LAST)
173 {
174 /*
175 * Golden rules:
176 * - Don't interrupt special patch streams that replace special instructions
177 * - Don't break instruction fusing (sti, pop ss, mov ss)
178 * - Don't go back to an instruction that has been overwritten by a patch jump
179 * - Don't interrupt an idt handler on entry (1st instruction); technically incorrect
180 *
181 */
182 if (CTXSUFF(pVM->patm.s.pGCState)->fPIF == 1) /* consistent patch instruction state */
183 {
184 PATMTRANSSTATE enmState;
185 RTRCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->eip, &enmState);
186
187 AssertRelease(pOrgInstrGC);
188
189 Assert(enmState != PATMTRANS_OVERWRITTEN);
190 if (enmState == PATMTRANS_SAFE)
191 {
192 Assert(!patmFindActivePatchByEntrypoint(pVM, pOrgInstrGC));
193 Log(("Switchback from %RRv to %RRv (Psp=%x)\n", pCtx->eip, pOrgInstrGC, CTXSUFF(pVM->patm.s.pGCState)->Psp));
194 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBack);
195 pCtx->eip = pOrgInstrGC;
196 fPatchCode = false; /* to reset the stack ptr */
197
198 CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0; /* reset this pointer; safe otherwise the state would be PATMTRANS_INHIBITIRQ */
199 }
200 else
201 {
202 LogFlow(("Patch address %RRv can't be interrupted (state=%d)!\n", pCtx->eip, enmState));
203 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
204 }
205 }
206 else
207 {
208 LogFlow(("Patch address %RRv can't be interrupted (fPIF=%d)!\n", pCtx->eip, CTXSUFF(pVM->patm.s.pGCState)->fPIF));
209 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
210 }
211 }
212 }
213#else /* !IN_RING3 */
214 /*
215 * When leaving raw-mode state while IN_RC, it's generally for interpreting
216 * a single original guest instruction.
217 */
218 AssertMsg(!fPatchCode, ("eip=%RRv\n", pCtx->eip));
219#endif /* !IN_RING3 */
220
221 if (!fPatchCode)
222 {
223 if (CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts == (RTRCPTR)pCtx->eip)
224 {
225 EMSetInhibitInterruptsPC(VMMGetCpu0(pVM), pCtx->eip);
226 }
227 CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0;
228
229 /* Reset the stack pointer to the top of the stack. */
230#ifdef DEBUG
231 if (CTXSUFF(pVM->patm.s.pGCState)->Psp != PATM_STACK_SIZE)
232 {
233 LogFlow(("PATMRawLeave: Reset PATM stack (Psp = %x)\n", CTXSUFF(pVM->patm.s.pGCState)->Psp));
234 }
235#endif
236 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
237 }
238}
239
240/**
241 * Get the EFLAGS.
242 * This is a worker for CPUMRawGetEFlags().
243 *
244 * @returns The eflags.
245 * @param pVM Pointer to the VM.
246 * @param pCtx The guest cpu context.
247 */
248VMM_INT_DECL(uint32_t) PATMRawGetEFlags(PVM pVM, PCCPUMCTX pCtx)
249{
250 Assert(!HMIsEnabled(pVM));
251 uint32_t efl = pCtx->eflags.u32;
252 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
253 efl |= pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK;
254 return efl;
255}
256
257/**
258 * Updates the EFLAGS.
259 * This is a worker for CPUMRawSetEFlags().
260 *
261 * @param pVM Pointer to the VM.
262 * @param pCtx The guest cpu context.
263 * @param efl The new EFLAGS value.
264 */
265VMM_INT_DECL(void) PATMRawSetEFlags(PVM pVM, PCPUMCTX pCtx, uint32_t efl)
266{
267 Assert(!HMIsEnabled(pVM));
268 pVM->patm.s.CTXSUFF(pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
269 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
270 efl |= X86_EFL_IF;
271 pCtx->eflags.u32 = efl;
272}
273
274/**
275 * Check if we must use raw mode (patch code being executed)
276 *
277 * @param pVM Pointer to the VM.
278 * @param pAddrGC Guest context address
279 */
280VMM_INT_DECL(bool) PATMShouldUseRawMode(PVM pVM, RTRCPTR pAddrGC)
281{
282 return PATMIsEnabled(pVM)
283 && ( (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem
284 || (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC < pVM->patm.s.cbPatchHelpers);
285}
286
287/**
288 * Returns the guest context pointer and size of the GC context structure
289 *
290 * @returns VBox status code.
291 * @param pVM Pointer to the VM.
292 */
293VMM_INT_DECL(RCPTRTYPE(PPATMGCSTATE)) PATMGetGCState(PVM pVM)
294{
295 AssertReturn(!HMIsEnabled(pVM), NIL_RTRCPTR);
296 return pVM->patm.s.pGCStateGC;
297}
298
299/**
300 * Checks whether the GC address is part of our patch or helper regions.
301 *
302 * @returns VBox status code.
303 * @param pVM Pointer to the VM.
304 * @param uGCAddr Guest context address.
305 * @internal
306 */
307VMMDECL(bool) PATMIsPatchGCAddr(PVM pVM, RTRCUINTPTR uGCAddr)
308{
309 return PATMIsEnabled(pVM)
310 && ( uGCAddr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem
311 || uGCAddr - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC < pVM->patm.s.cbPatchHelpers);
312}
313
314/**
315 * Checks whether the GC address is part of our patch region.
316 *
317 * @returns VBox status code.
318 * @param pVM Pointer to the VM.
319 * @param uGCAddr Guest context address.
320 * @internal
321 */
322VMMDECL(bool) PATMIsPatchGCAddrExclHelpers(PVM pVM, RTRCUINTPTR uGCAddr)
323{
324 return PATMIsEnabled(pVM)
325 && uGCAddr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem;
326}
327
328/**
329 * Reads patch code.
330 *
331 * @retval VINF_SUCCESS on success.
332 * @retval VERR_PATCH_NOT_FOUND if the request is entirely outside the patch
333 * code.
334 *
335 * @param pVM The cross context VM structure.
336 * @param GCPtrPatchCode The patch address to start reading at.
337 * @param pvDst Where to return the patch code.
338 * @param cbToRead Number of bytes to read.
339 * @param pcbRead Where to return the actual number of bytes we've
340 * read. Optional.
341 */
342VMM_INT_DECL(int) PATMReadPatchCode(PVM pVM, RTGCPTR GCPtrPatchCode, void *pvDst, size_t cbToRead, size_t *pcbRead)
343{
344 /* Shortcut. */
345 if (!PATMIsEnabled(pVM))
346 return VERR_PATCH_NOT_FOUND;
347 Assert(!HMIsEnabled(pVM));
348
349 /*
350 * Check patch code and patch helper code. We assume the requested bytes
351 * are not in either.
352 */
353 RTGCPTR offPatchCode = GCPtrPatchCode - (RTGCPTR32)pVM->patm.s.pPatchMemGC;
354 if (offPatchCode >= pVM->patm.s.cbPatchMem)
355 {
356 offPatchCode = GCPtrPatchCode - (RTGCPTR32)pVM->patm.s.pbPatchHelpersRC;
357 if (offPatchCode >= pVM->patm.s.cbPatchHelpers)
358 return VERR_PATCH_NOT_FOUND;
359
360 /*
361 * Patch helper memory.
362 */
363 uint32_t cbMaxRead = pVM->patm.s.cbPatchHelpers - (uint32_t)offPatchCode;
364 if (cbToRead > cbMaxRead)
365 cbToRead = cbMaxRead;
366#ifdef IN_RC
367 memcpy(pvDst, pVM->patm.s.pbPatchHelpersRC + (uint32_t)offPatchCode, cbToRead);
368#else
369 memcpy(pvDst, pVM->patm.s.pbPatchHelpersR3 + (uint32_t)offPatchCode, cbToRead);
370#endif
371 }
372 else
373 {
374 /*
375 * Patch memory.
376 */
377 uint32_t cbMaxRead = pVM->patm.s.cbPatchMem - (uint32_t)offPatchCode;
378 if (cbToRead > cbMaxRead)
379 cbToRead = cbMaxRead;
380#ifdef IN_RC
381 memcpy(pvDst, pVM->patm.s.pPatchMemGC + (uint32_t)offPatchCode, cbToRead);
382#else
383 memcpy(pvDst, pVM->patm.s.pPatchMemHC + (uint32_t)offPatchCode, cbToRead);
384#endif
385 }
386
387 if (pcbRead)
388 *pcbRead = cbToRead;
389 return VINF_SUCCESS;
390}
391
392/**
393 * Set parameters for pending MMIO patch operation
394 *
395 * @returns VBox status code.
396 * @param pDevIns Device instance.
397 * @param GCPhys MMIO physical address
398 * @param pCachedData GC pointer to cached data
399 */
400VMM_INT_DECL(int) PATMSetMMIOPatchInfo(PVM pVM, RTGCPHYS GCPhys, RTRCPTR pCachedData)
401{
402 if (!HMIsEnabled(pVM))
403 {
404 pVM->patm.s.mmio.GCPhys = GCPhys;
405 pVM->patm.s.mmio.pCachedData = (RTRCPTR)pCachedData;
406 }
407
408 return VINF_SUCCESS;
409}
410
411/**
412 * Checks if the interrupt flag is enabled or not.
413 *
414 * @returns true if it's enabled.
415 * @returns false if it's disabled.
416 *
417 * @param pVM Pointer to the VM.
418 * @todo CPUM should wrap this, EM.cpp shouldn't call us.
419 */
420VMM_INT_DECL(bool) PATMAreInterruptsEnabled(PVM pVM)
421{
422 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
423
424 return PATMAreInterruptsEnabledByCtx(pVM, pCtx);
425}
426
427/**
428 * Checks if the interrupt flag is enabled or not.
429 *
430 * @returns true if it's enabled.
431 * @returns false if it's disabled.
432 *
433 * @param pVM Pointer to the VM.
434 * @param pCtx The guest CPU context.
435 * @todo CPUM should wrap this, EM.cpp shouldn't call us.
436 */
437VMM_INT_DECL(bool) PATMAreInterruptsEnabledByCtx(PVM pVM, PCPUMCTX pCtx)
438{
439 if (PATMIsEnabled(pVM))
440 {
441 Assert(!HMIsEnabled(pVM));
442 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
443 return false;
444 }
445 return !!(pCtx->eflags.u32 & X86_EFL_IF);
446}
447
448/**
449 * Check if the instruction is patched as a duplicated function
450 *
451 * @returns patch record
452 * @param pVM Pointer to the VM.
453 * @param pInstrGC Guest context point to the instruction
454 *
455 */
456PPATMPATCHREC patmQueryFunctionPatch(PVM pVM, RTRCPTR pInstrGC)
457{
458 PPATMPATCHREC pRec;
459
460 AssertCompile(sizeof(AVLOU32KEY) == sizeof(pInstrGC));
461 pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC);
462 if ( pRec
463 && (pRec->patch.uState == PATCH_ENABLED)
464 && (pRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CALLABLE_AS_FUNCTION))
465 )
466 return pRec;
467 return 0;
468}
469
470/**
471 * Checks if the int 3 was caused by a patched instruction
472 *
473 * @returns VBox status
474 *
475 * @param pVM Pointer to the VM.
476 * @param pInstrGC Instruction pointer
477 * @param pOpcode Original instruction opcode (out, optional)
478 * @param pSize Original instruction size (out, optional)
479 */
480VMM_INT_DECL(bool) PATMIsInt3Patch(PVM pVM, RTRCPTR pInstrGC, uint32_t *pOpcode, uint32_t *pSize)
481{
482 PPATMPATCHREC pRec;
483 Assert(!HMIsEnabled(pVM));
484
485 pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC);
486 if ( pRec
487 && (pRec->patch.uState == PATCH_ENABLED)
488 && (pRec->patch.flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
489 )
490 {
491 if (pOpcode) *pOpcode = pRec->patch.opcode;
492 if (pSize) *pSize = pRec->patch.cbPrivInstr;
493 return true;
494 }
495 return false;
496}
497
498/**
499 * Emulate sysenter, sysexit and syscall instructions
500 *
501 * @returns VBox status
502 *
503 * @param pVM Pointer to the VM.
504 * @param pCtx The relevant guest cpu context.
505 * @param pCpu Disassembly state.
506 */
507VMMDECL(int) PATMSysCall(PVM pVM, PCPUMCTX pCtx, PDISCPUSTATE pCpu)
508{
509 Assert(CPUMQueryGuestCtxPtr(VMMGetCpu0(pVM)) == pCtx);
510 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
511
512 if (pCpu->pCurInstr->uOpcode == OP_SYSENTER)
513 {
514 if ( pCtx->SysEnter.cs == 0
515 || pCtx->eflags.Bits.u1VM
516 || (pCtx->cs.Sel & X86_SEL_RPL) != 3
517 || pVM->patm.s.pfnSysEnterPatchGC == 0
518 || pVM->patm.s.pfnSysEnterGC != (RTRCPTR)(RTRCUINTPTR)pCtx->SysEnter.eip
519 || !(PATMRawGetEFlags(pVM, pCtx) & X86_EFL_IF))
520 goto end;
521
522 Log2(("PATMSysCall: sysenter from %RRv to %RRv\n", pCtx->eip, pVM->patm.s.pfnSysEnterPatchGC));
523 /** @todo the base and limit are forced to 0 & 4G-1 resp. We assume the selector is wide open here. */
524 /** @note The Intel manual suggests that the OS is responsible for this. */
525 pCtx->cs.Sel = (pCtx->SysEnter.cs & ~X86_SEL_RPL) | 1;
526 pCtx->eip = /** @todo ugly conversion! */(uint32_t)pVM->patm.s.pfnSysEnterPatchGC;
527 pCtx->ss.Sel = pCtx->cs.Sel + 8; /* SysEnter.cs + 8 */
528 pCtx->esp = pCtx->SysEnter.esp;
529 pCtx->eflags.u32 &= ~(X86_EFL_VM | X86_EFL_RF);
530 pCtx->eflags.u32 |= X86_EFL_IF;
531
532 /* Turn off interrupts. */
533 pVM->patm.s.CTXSUFF(pGCState)->uVMFlags &= ~X86_EFL_IF;
534
535 STAM_COUNTER_INC(&pVM->patm.s.StatSysEnter);
536
537 return VINF_SUCCESS;
538 }
539 if (pCpu->pCurInstr->uOpcode == OP_SYSEXIT)
540 {
541 if ( pCtx->SysEnter.cs == 0
542 || (pCtx->cs.Sel & X86_SEL_RPL) != 1
543 || pCtx->eflags.Bits.u1VM
544 || !(PATMRawGetEFlags(pVM, pCtx) & X86_EFL_IF))
545 goto end;
546
547 Log2(("PATMSysCall: sysexit from %RRv to %RRv\n", pCtx->eip, pCtx->edx));
548
549 pCtx->cs.Sel = ((pCtx->SysEnter.cs + 16) & ~X86_SEL_RPL) | 3;
550 pCtx->eip = pCtx->edx;
551 pCtx->ss.Sel = pCtx->cs.Sel + 8; /* SysEnter.cs + 24 */
552 pCtx->esp = pCtx->ecx;
553
554 STAM_COUNTER_INC(&pVM->patm.s.StatSysExit);
555
556 return VINF_SUCCESS;
557 }
558 if (pCpu->pCurInstr->uOpcode == OP_SYSCALL)
559 {
560 /** @todo implement syscall */
561 }
562 else
563 if (pCpu->pCurInstr->uOpcode == OP_SYSRET)
564 {
565 /** @todo implement sysret */
566 }
567
568end:
569 return VINF_EM_RAW_RING_SWITCH;
570}
571
572/**
573 * Adds branch pair to the lookup cache of the particular branch instruction
574 *
575 * @returns VBox status
576 * @param pVM Pointer to the VM.
577 * @param pJumpTableGC Pointer to branch instruction lookup cache
578 * @param pBranchTarget Original branch target
579 * @param pRelBranchPatch Relative duplicated function address
580 */
581int patmAddBranchToLookupCache(PVM pVM, RTRCPTR pJumpTableGC, RTRCPTR pBranchTarget, RTRCUINTPTR pRelBranchPatch)
582{
583 PPATCHJUMPTABLE pJumpTable;
584
585 Log(("PATMAddBranchToLookupCache: Adding (%RRv->%RRv (%RRv)) to table %RRv\n", pBranchTarget, pRelBranchPatch + pVM->patm.s.pPatchMemGC, pRelBranchPatch, pJumpTableGC));
586
587 AssertReturn(PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pJumpTableGC), VERR_INVALID_PARAMETER);
588
589#ifdef IN_RC
590 pJumpTable = (PPATCHJUMPTABLE) pJumpTableGC;
591#else
592 pJumpTable = (PPATCHJUMPTABLE) (pJumpTableGC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemHC);
593#endif
594 Log(("Nr addresses = %d, insert pos = %d\n", pJumpTable->cAddresses, pJumpTable->ulInsertPos));
595 if (pJumpTable->cAddresses < pJumpTable->nrSlots)
596 {
597 uint32_t i;
598
599 for (i=0;i<pJumpTable->nrSlots;i++)
600 {
601 if (pJumpTable->Slot[i].pInstrGC == 0)
602 {
603 pJumpTable->Slot[i].pInstrGC = pBranchTarget;
604 /* Relative address - eases relocation */
605 pJumpTable->Slot[i].pRelPatchGC = pRelBranchPatch;
606 pJumpTable->cAddresses++;
607 break;
608 }
609 }
610 AssertReturn(i < pJumpTable->nrSlots, VERR_INTERNAL_ERROR);
611#ifdef VBOX_WITH_STATISTICS
612 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupInsert);
613 if (pVM->patm.s.StatU32FunctionMaxSlotsUsed < i)
614 pVM->patm.s.StatU32FunctionMaxSlotsUsed = i + 1;
615#endif
616 }
617 else
618 {
619 /* Replace an old entry. */
620 /** @todo replacement strategy isn't really bright. change to something better if required. */
621 Assert(pJumpTable->ulInsertPos < pJumpTable->nrSlots);
622 Assert((pJumpTable->nrSlots & 1) == 0);
623
624 pJumpTable->ulInsertPos &= (pJumpTable->nrSlots-1);
625 pJumpTable->Slot[pJumpTable->ulInsertPos].pInstrGC = pBranchTarget;
626 /* Relative address - eases relocation */
627 pJumpTable->Slot[pJumpTable->ulInsertPos].pRelPatchGC = pRelBranchPatch;
628
629 pJumpTable->ulInsertPos = (pJumpTable->ulInsertPos+1) & (pJumpTable->nrSlots-1);
630
631 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupReplace);
632 }
633
634 return VINF_SUCCESS;
635}
636
637
638#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
639/**
640 * Return the name of the patched instruction
641 *
642 * @returns instruction name
643 *
644 * @param opcode DIS instruction opcode
645 * @param fPatchFlags Patch flags
646 */
647const char *patmGetInstructionString(uint32_t opcode, uint32_t fPatchFlags)
648{
649 const char *pszInstr = NULL;
650
651 switch (opcode)
652 {
653 case OP_CLI:
654 pszInstr = "cli";
655 break;
656 case OP_PUSHF:
657 pszInstr = "pushf";
658 break;
659 case OP_POPF:
660 pszInstr = "popf";
661 break;
662 case OP_STR:
663 pszInstr = "str";
664 break;
665 case OP_LSL:
666 pszInstr = "lsl";
667 break;
668 case OP_LAR:
669 pszInstr = "lar";
670 break;
671 case OP_SGDT:
672 pszInstr = "sgdt";
673 break;
674 case OP_SLDT:
675 pszInstr = "sldt";
676 break;
677 case OP_SIDT:
678 pszInstr = "sidt";
679 break;
680 case OP_SMSW:
681 pszInstr = "smsw";
682 break;
683 case OP_VERW:
684 pszInstr = "verw";
685 break;
686 case OP_VERR:
687 pszInstr = "verr";
688 break;
689 case OP_CPUID:
690 pszInstr = "cpuid";
691 break;
692 case OP_JMP:
693 pszInstr = "jmp";
694 break;
695 case OP_JO:
696 pszInstr = "jo";
697 break;
698 case OP_JNO:
699 pszInstr = "jno";
700 break;
701 case OP_JC:
702 pszInstr = "jc";
703 break;
704 case OP_JNC:
705 pszInstr = "jnc";
706 break;
707 case OP_JE:
708 pszInstr = "je";
709 break;
710 case OP_JNE:
711 pszInstr = "jne";
712 break;
713 case OP_JBE:
714 pszInstr = "jbe";
715 break;
716 case OP_JNBE:
717 pszInstr = "jnbe";
718 break;
719 case OP_JS:
720 pszInstr = "js";
721 break;
722 case OP_JNS:
723 pszInstr = "jns";
724 break;
725 case OP_JP:
726 pszInstr = "jp";
727 break;
728 case OP_JNP:
729 pszInstr = "jnp";
730 break;
731 case OP_JL:
732 pszInstr = "jl";
733 break;
734 case OP_JNL:
735 pszInstr = "jnl";
736 break;
737 case OP_JLE:
738 pszInstr = "jle";
739 break;
740 case OP_JNLE:
741 pszInstr = "jnle";
742 break;
743 case OP_JECXZ:
744 pszInstr = "jecxz";
745 break;
746 case OP_LOOP:
747 pszInstr = "loop";
748 break;
749 case OP_LOOPNE:
750 pszInstr = "loopne";
751 break;
752 case OP_LOOPE:
753 pszInstr = "loope";
754 break;
755 case OP_MOV:
756 if (fPatchFlags & PATMFL_IDTHANDLER)
757 pszInstr = "mov (Int/Trap Handler)";
758 else
759 pszInstr = "mov (cs)";
760 break;
761 case OP_SYSENTER:
762 pszInstr = "sysenter";
763 break;
764 case OP_PUSH:
765 pszInstr = "push (cs)";
766 break;
767 case OP_CALL:
768 pszInstr = "call";
769 break;
770 case OP_IRET:
771 pszInstr = "iret";
772 break;
773 }
774 return pszInstr;
775}
776#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette