VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PATMAll.cpp@ 55979

Last change on this file since 55979 was 55937, checked in by vboxsync, 10 years ago

CSAM,PATM: Changed csamRCCodePageWritePfHandler to store the pvFault address in pvDirtyFaultPage and made csamR3FlushDirtyPages make it instead of pvDirtyBasePage read-only (+ tell REM about it). Preparing ring-3 access handlers for raw-mode.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 25.1 KB
Line 
1/* $Id: PATMAll.cpp 55937 2015-05-19 14:27:00Z vboxsync $ */
2/** @file
3 * PATM - The Patch Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PATM
22#include <VBox/vmm/patm.h>
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/em.h>
25#include <VBox/vmm/hm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include "PATMInternal.h"
29#include <VBox/vmm/vm.h>
30#include <VBox/vmm/vmm.h>
31#include "PATMA.h"
32
33#include <VBox/dis.h>
34#include <VBox/disopcode.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <iprt/assert.h>
38#include <iprt/string.h>
39
40
41/**
42 * Access handler callback for virtual access handler ranges.
43 *
44 * Important to realize that a physical page in a range can have aliases, and
45 * for ALL and WRITE handlers these will also trigger.
46 *
47 * @returns VINF_SUCCESS if the handler have carried out the operation.
48 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
49 * @param pVM Pointer to the VM.
50 * @param pVCpu Pointer to the cross context CPU context for the
51 * calling EMT.
52 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
53 * @param pvPtr The HC mapping of that address.
54 * @param pvBuf What the guest is reading/writing.
55 * @param cbBuf How much it's reading/writing.
56 * @param enmAccessType The access type.
57 * @param enmOrigin Who is making this write.
58 * @param pvUser The address of the guest page we're monitoring.
59 */
60PGM_ALL_CB2_DECL(int) patmVirtPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
61 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
62{
63 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
64 NOREF(pvPtr); NOREF(pvBuf); NOREF(cbBuf); NOREF(enmOrigin); NOREF(pvUser);
65 Assert(pvUser); Assert(!((uintptr_t)pvUser & PAGE_OFFSET_MASK));
66
67 pVM->patm.s.pvFaultMonitor = (RTRCPTR)((uintptr_t)pvUser + (GCPtr & PAGE_OFFSET_MASK));
68#ifdef IN_RING3
69 PATMR3HandleMonitoredPage(pVM);
70 return VINF_PGM_HANDLER_DO_DEFAULT;
71#else
72 /* RC: Go handle this in ring-3. */
73 return VINF_PATM_CHECK_PATCH_PAGE;
74#endif
75}
76
77
78/**
79 * Load virtualized flags.
80 *
81 * This function is called from CPUMRawEnter(). It doesn't have to update the
82 * IF and IOPL eflags bits, the caller will enforce those to set and 0 respectively.
83 *
84 * @param pVM Pointer to the VM.
85 * @param pCtx The cpu context.
86 * @see pg_raw
87 */
88VMM_INT_DECL(void) PATMRawEnter(PVM pVM, PCPUMCTX pCtx)
89{
90 Assert(!HMIsEnabled(pVM));
91
92 /*
93 * Currently we don't bother to check whether PATM is enabled or not.
94 * For all cases where it isn't, IOPL will be safe and IF will be set.
95 */
96 uint32_t efl = pCtx->eflags.u32;
97 CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
98
99 AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTRCPTR)pCtx->eip),
100 ("X86_EFL_IF is clear and PATM is disabled! (eip=%RRv eflags=%08x fPATM=%d pPATMGC=%RRv-%RRv\n",
101 pCtx->eip, pCtx->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC,
102 pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem));
103
104 AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || PATMIsPatchGCAddr(pVM, pCtx->eip),
105 ("fPIF=%d eip=%RRv\n", pVM->patm.s.CTXSUFF(pGCState)->fPIF, pCtx->eip));
106
107 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
108 efl |= X86_EFL_IF;
109 pCtx->eflags.u32 = efl;
110
111#ifdef IN_RING3
112# ifdef PATM_EMULATE_SYSENTER
113 PCPUMCTX pCtx;
114
115 /* Check if the sysenter handler has changed. */
116 pCtx = CPUMQueryGuestCtxPtr(pVM);
117 if ( pCtx->SysEnter.cs != 0
118 && pCtx->SysEnter.eip != 0
119 )
120 {
121 if (pVM->patm.s.pfnSysEnterGC != (RTRCPTR)pCtx->SysEnter.eip)
122 {
123 pVM->patm.s.pfnSysEnterPatchGC = 0;
124 pVM->patm.s.pfnSysEnterGC = 0;
125
126 Log2(("PATMRawEnter: installing sysenter patch for %RRv\n", pCtx->SysEnter.eip));
127 pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
128 if (pVM->patm.s.pfnSysEnterPatchGC == 0)
129 {
130 rc = PATMR3InstallPatch(pVM, pCtx->SysEnter.eip, PATMFL_SYSENTER | PATMFL_CODE32);
131 if (rc == VINF_SUCCESS)
132 {
133 pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
134 pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
135 Assert(pVM->patm.s.pfnSysEnterPatchGC);
136 }
137 }
138 else
139 pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
140 }
141 }
142 else
143 {
144 pVM->patm.s.pfnSysEnterPatchGC = 0;
145 pVM->patm.s.pfnSysEnterGC = 0;
146 }
147# endif /* PATM_EMULATE_SYSENTER */
148#endif
149}
150
151
152/**
153 * Restores virtualized flags.
154 *
155 * This function is called from CPUMRawLeave(). It will update the eflags register.
156 *
157 ** @note Only here we are allowed to switch back to guest code (without a special reason such as a trap in patch code)!!
158 *
159 * @param pVM Pointer to the VM.
160 * @param pCtx The cpu context.
161 * @param rawRC Raw mode return code
162 * @see @ref pg_raw
163 */
164VMM_INT_DECL(void) PATMRawLeave(PVM pVM, PCPUMCTX pCtx, int rawRC)
165{
166 Assert(!HMIsEnabled(pVM));
167 bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtx->eip);
168
169 /*
170 * We will only be called if PATMRawEnter was previously called.
171 */
172 uint32_t efl = pCtx->eflags.u32;
173 efl = (efl & ~PATM_VIRTUAL_FLAGS_MASK) | (CTXSUFF(pVM->patm.s.pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK);
174 pCtx->eflags.u32 = efl;
175 CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = X86_EFL_IF;
176
177 AssertReleaseMsg((efl & X86_EFL_IF) || fPatchCode || rawRC == VINF_PATM_PENDING_IRQ_AFTER_IRET || RT_FAILURE(rawRC), ("Inconsistent state at %RRv rc=%Rrc\n", pCtx->eip, rawRC));
178 AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode || RT_FAILURE(rawRC), ("fPIF=%d eip=%RRv rc=%Rrc\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtx->eip, rawRC));
179
180#ifdef IN_RING3
181 if ( (efl & X86_EFL_IF)
182 && fPatchCode)
183 {
184 if ( rawRC < VINF_PATM_LEAVE_RC_FIRST
185 || rawRC > VINF_PATM_LEAVE_RC_LAST)
186 {
187 /*
188 * Golden rules:
189 * - Don't interrupt special patch streams that replace special instructions
190 * - Don't break instruction fusing (sti, pop ss, mov ss)
191 * - Don't go back to an instruction that has been overwritten by a patch jump
192 * - Don't interrupt an idt handler on entry (1st instruction); technically incorrect
193 *
194 */
195 if (CTXSUFF(pVM->patm.s.pGCState)->fPIF == 1) /* consistent patch instruction state */
196 {
197 PATMTRANSSTATE enmState;
198 RTRCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->eip, &enmState);
199
200 AssertRelease(pOrgInstrGC);
201
202 Assert(enmState != PATMTRANS_OVERWRITTEN);
203 if (enmState == PATMTRANS_SAFE)
204 {
205 Assert(!patmFindActivePatchByEntrypoint(pVM, pOrgInstrGC));
206 Log(("Switchback from %RRv to %RRv (Psp=%x)\n", pCtx->eip, pOrgInstrGC, CTXSUFF(pVM->patm.s.pGCState)->Psp));
207 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBack);
208 pCtx->eip = pOrgInstrGC;
209 fPatchCode = false; /* to reset the stack ptr */
210
211 CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0; /* reset this pointer; safe otherwise the state would be PATMTRANS_INHIBITIRQ */
212 }
213 else
214 {
215 LogFlow(("Patch address %RRv can't be interrupted (state=%d)!\n", pCtx->eip, enmState));
216 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
217 }
218 }
219 else
220 {
221 LogFlow(("Patch address %RRv can't be interrupted (fPIF=%d)!\n", pCtx->eip, CTXSUFF(pVM->patm.s.pGCState)->fPIF));
222 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
223 }
224 }
225 }
226#else /* !IN_RING3 */
227 /*
228 * When leaving raw-mode state while IN_RC, it's generally for interpreting
229 * a single original guest instruction.
230 */
231 AssertMsg(!fPatchCode, ("eip=%RRv\n", pCtx->eip));
232#endif /* !IN_RING3 */
233
234 if (!fPatchCode)
235 {
236 if (CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts == (RTRCPTR)pCtx->eip)
237 {
238 EMSetInhibitInterruptsPC(VMMGetCpu0(pVM), pCtx->eip);
239 }
240 CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0;
241
242 /* Reset the stack pointer to the top of the stack. */
243#ifdef DEBUG
244 if (CTXSUFF(pVM->patm.s.pGCState)->Psp != PATM_STACK_SIZE)
245 {
246 LogFlow(("PATMRawLeave: Reset PATM stack (Psp = %x)\n", CTXSUFF(pVM->patm.s.pGCState)->Psp));
247 }
248#endif
249 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
250 }
251}
252
253/**
254 * Get the EFLAGS.
255 * This is a worker for CPUMRawGetEFlags().
256 *
257 * @returns The eflags.
258 * @param pVM Pointer to the VM.
259 * @param pCtx The guest cpu context.
260 */
261VMM_INT_DECL(uint32_t) PATMRawGetEFlags(PVM pVM, PCCPUMCTX pCtx)
262{
263 Assert(!HMIsEnabled(pVM));
264 uint32_t efl = pCtx->eflags.u32;
265 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
266 efl |= pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK;
267 return efl;
268}
269
270/**
271 * Updates the EFLAGS.
272 * This is a worker for CPUMRawSetEFlags().
273 *
274 * @param pVM Pointer to the VM.
275 * @param pCtx The guest cpu context.
276 * @param efl The new EFLAGS value.
277 */
278VMM_INT_DECL(void) PATMRawSetEFlags(PVM pVM, PCPUMCTX pCtx, uint32_t efl)
279{
280 Assert(!HMIsEnabled(pVM));
281 pVM->patm.s.CTXSUFF(pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
282 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
283 efl |= X86_EFL_IF;
284 pCtx->eflags.u32 = efl;
285}
286
287/**
288 * Check if we must use raw mode (patch code being executed)
289 *
290 * @param pVM Pointer to the VM.
291 * @param pAddrGC Guest context address
292 */
293VMM_INT_DECL(bool) PATMShouldUseRawMode(PVM pVM, RTRCPTR pAddrGC)
294{
295 return PATMIsEnabled(pVM)
296 && ( (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem
297 || (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC < pVM->patm.s.cbPatchHelpers);
298}
299
300/**
301 * Returns the guest context pointer and size of the GC context structure
302 *
303 * @returns VBox status code.
304 * @param pVM Pointer to the VM.
305 */
306VMM_INT_DECL(RCPTRTYPE(PPATMGCSTATE)) PATMGetGCState(PVM pVM)
307{
308 AssertReturn(!HMIsEnabled(pVM), NIL_RTRCPTR);
309 return pVM->patm.s.pGCStateGC;
310}
311
312/**
313 * Checks whether the GC address is part of our patch or helper regions.
314 *
315 * @returns VBox status code.
316 * @param pVM Pointer to the VM.
317 * @param uGCAddr Guest context address.
318 * @internal
319 */
320VMMDECL(bool) PATMIsPatchGCAddr(PVM pVM, RTRCUINTPTR uGCAddr)
321{
322 return PATMIsEnabled(pVM)
323 && ( uGCAddr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem
324 || uGCAddr - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC < pVM->patm.s.cbPatchHelpers);
325}
326
327/**
328 * Checks whether the GC address is part of our patch region.
329 *
330 * @returns VBox status code.
331 * @param pVM Pointer to the VM.
332 * @param uGCAddr Guest context address.
333 * @internal
334 */
335VMMDECL(bool) PATMIsPatchGCAddrExclHelpers(PVM pVM, RTRCUINTPTR uGCAddr)
336{
337 return PATMIsEnabled(pVM)
338 && uGCAddr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem;
339}
340
341/**
342 * Reads patch code.
343 *
344 * @retval VINF_SUCCESS on success.
345 * @retval VERR_PATCH_NOT_FOUND if the request is entirely outside the patch
346 * code.
347 *
348 * @param pVM The cross context VM structure.
349 * @param GCPtrPatchCode The patch address to start reading at.
350 * @param pvDst Where to return the patch code.
351 * @param cbToRead Number of bytes to read.
352 * @param pcbRead Where to return the actual number of bytes we've
353 * read. Optional.
354 */
355VMM_INT_DECL(int) PATMReadPatchCode(PVM pVM, RTGCPTR GCPtrPatchCode, void *pvDst, size_t cbToRead, size_t *pcbRead)
356{
357 /* Shortcut. */
358 if (!PATMIsEnabled(pVM))
359 return VERR_PATCH_NOT_FOUND;
360 Assert(!HMIsEnabled(pVM));
361
362 /*
363 * Check patch code and patch helper code. We assume the requested bytes
364 * are not in either.
365 */
366 RTGCPTR offPatchCode = GCPtrPatchCode - (RTGCPTR32)pVM->patm.s.pPatchMemGC;
367 if (offPatchCode >= pVM->patm.s.cbPatchMem)
368 {
369 offPatchCode = GCPtrPatchCode - (RTGCPTR32)pVM->patm.s.pbPatchHelpersRC;
370 if (offPatchCode >= pVM->patm.s.cbPatchHelpers)
371 return VERR_PATCH_NOT_FOUND;
372
373 /*
374 * Patch helper memory.
375 */
376 uint32_t cbMaxRead = pVM->patm.s.cbPatchHelpers - (uint32_t)offPatchCode;
377 if (cbToRead > cbMaxRead)
378 cbToRead = cbMaxRead;
379#ifdef IN_RC
380 memcpy(pvDst, pVM->patm.s.pbPatchHelpersRC + (uint32_t)offPatchCode, cbToRead);
381#else
382 memcpy(pvDst, pVM->patm.s.pbPatchHelpersR3 + (uint32_t)offPatchCode, cbToRead);
383#endif
384 }
385 else
386 {
387 /*
388 * Patch memory.
389 */
390 uint32_t cbMaxRead = pVM->patm.s.cbPatchMem - (uint32_t)offPatchCode;
391 if (cbToRead > cbMaxRead)
392 cbToRead = cbMaxRead;
393#ifdef IN_RC
394 memcpy(pvDst, pVM->patm.s.pPatchMemGC + (uint32_t)offPatchCode, cbToRead);
395#else
396 memcpy(pvDst, pVM->patm.s.pPatchMemHC + (uint32_t)offPatchCode, cbToRead);
397#endif
398 }
399
400 if (pcbRead)
401 *pcbRead = cbToRead;
402 return VINF_SUCCESS;
403}
404
405/**
406 * Set parameters for pending MMIO patch operation
407 *
408 * @returns VBox status code.
409 * @param pDevIns Device instance.
410 * @param GCPhys MMIO physical address
411 * @param pCachedData GC pointer to cached data
412 */
413VMM_INT_DECL(int) PATMSetMMIOPatchInfo(PVM pVM, RTGCPHYS GCPhys, RTRCPTR pCachedData)
414{
415 if (!HMIsEnabled(pVM))
416 {
417 pVM->patm.s.mmio.GCPhys = GCPhys;
418 pVM->patm.s.mmio.pCachedData = (RTRCPTR)pCachedData;
419 }
420
421 return VINF_SUCCESS;
422}
423
424/**
425 * Checks if the interrupt flag is enabled or not.
426 *
427 * @returns true if it's enabled.
428 * @returns false if it's disabled.
429 *
430 * @param pVM Pointer to the VM.
431 * @todo CPUM should wrap this, EM.cpp shouldn't call us.
432 */
433VMM_INT_DECL(bool) PATMAreInterruptsEnabled(PVM pVM)
434{
435 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
436
437 return PATMAreInterruptsEnabledByCtx(pVM, pCtx);
438}
439
440/**
441 * Checks if the interrupt flag is enabled or not.
442 *
443 * @returns true if it's enabled.
444 * @returns false if it's disabled.
445 *
446 * @param pVM Pointer to the VM.
447 * @param pCtx The guest CPU context.
448 * @todo CPUM should wrap this, EM.cpp shouldn't call us.
449 */
450VMM_INT_DECL(bool) PATMAreInterruptsEnabledByCtx(PVM pVM, PCPUMCTX pCtx)
451{
452 if (PATMIsEnabled(pVM))
453 {
454 Assert(!HMIsEnabled(pVM));
455 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
456 return false;
457 }
458 return !!(pCtx->eflags.u32 & X86_EFL_IF);
459}
460
461/**
462 * Check if the instruction is patched as a duplicated function
463 *
464 * @returns patch record
465 * @param pVM Pointer to the VM.
466 * @param pInstrGC Guest context point to the instruction
467 *
468 */
469PPATMPATCHREC patmQueryFunctionPatch(PVM pVM, RTRCPTR pInstrGC)
470{
471 PPATMPATCHREC pRec;
472
473 AssertCompile(sizeof(AVLOU32KEY) == sizeof(pInstrGC));
474 pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC);
475 if ( pRec
476 && (pRec->patch.uState == PATCH_ENABLED)
477 && (pRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CALLABLE_AS_FUNCTION))
478 )
479 return pRec;
480 return 0;
481}
482
483/**
484 * Checks if the int 3 was caused by a patched instruction
485 *
486 * @returns VBox status
487 *
488 * @param pVM Pointer to the VM.
489 * @param pInstrGC Instruction pointer
490 * @param pOpcode Original instruction opcode (out, optional)
491 * @param pSize Original instruction size (out, optional)
492 */
493VMM_INT_DECL(bool) PATMIsInt3Patch(PVM pVM, RTRCPTR pInstrGC, uint32_t *pOpcode, uint32_t *pSize)
494{
495 PPATMPATCHREC pRec;
496 Assert(!HMIsEnabled(pVM));
497
498 pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC);
499 if ( pRec
500 && (pRec->patch.uState == PATCH_ENABLED)
501 && (pRec->patch.flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
502 )
503 {
504 if (pOpcode) *pOpcode = pRec->patch.opcode;
505 if (pSize) *pSize = pRec->patch.cbPrivInstr;
506 return true;
507 }
508 return false;
509}
510
511/**
512 * Emulate sysenter, sysexit and syscall instructions
513 *
514 * @returns VBox status
515 *
516 * @param pVM Pointer to the VM.
517 * @param pCtx The relevant guest cpu context.
518 * @param pCpu Disassembly state.
519 */
520VMMDECL(int) PATMSysCall(PVM pVM, PCPUMCTX pCtx, PDISCPUSTATE pCpu)
521{
522 Assert(CPUMQueryGuestCtxPtr(VMMGetCpu0(pVM)) == pCtx);
523 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
524
525 if (pCpu->pCurInstr->uOpcode == OP_SYSENTER)
526 {
527 if ( pCtx->SysEnter.cs == 0
528 || pCtx->eflags.Bits.u1VM
529 || (pCtx->cs.Sel & X86_SEL_RPL) != 3
530 || pVM->patm.s.pfnSysEnterPatchGC == 0
531 || pVM->patm.s.pfnSysEnterGC != (RTRCPTR)(RTRCUINTPTR)pCtx->SysEnter.eip
532 || !(PATMRawGetEFlags(pVM, pCtx) & X86_EFL_IF))
533 goto end;
534
535 Log2(("PATMSysCall: sysenter from %RRv to %RRv\n", pCtx->eip, pVM->patm.s.pfnSysEnterPatchGC));
536 /** @todo the base and limit are forced to 0 & 4G-1 resp. We assume the selector is wide open here. */
537 /** @note The Intel manual suggests that the OS is responsible for this. */
538 pCtx->cs.Sel = (pCtx->SysEnter.cs & ~X86_SEL_RPL) | 1;
539 pCtx->eip = /** @todo ugly conversion! */(uint32_t)pVM->patm.s.pfnSysEnterPatchGC;
540 pCtx->ss.Sel = pCtx->cs.Sel + 8; /* SysEnter.cs + 8 */
541 pCtx->esp = pCtx->SysEnter.esp;
542 pCtx->eflags.u32 &= ~(X86_EFL_VM | X86_EFL_RF);
543 pCtx->eflags.u32 |= X86_EFL_IF;
544
545 /* Turn off interrupts. */
546 pVM->patm.s.CTXSUFF(pGCState)->uVMFlags &= ~X86_EFL_IF;
547
548 STAM_COUNTER_INC(&pVM->patm.s.StatSysEnter);
549
550 return VINF_SUCCESS;
551 }
552 if (pCpu->pCurInstr->uOpcode == OP_SYSEXIT)
553 {
554 if ( pCtx->SysEnter.cs == 0
555 || (pCtx->cs.Sel & X86_SEL_RPL) != 1
556 || pCtx->eflags.Bits.u1VM
557 || !(PATMRawGetEFlags(pVM, pCtx) & X86_EFL_IF))
558 goto end;
559
560 Log2(("PATMSysCall: sysexit from %RRv to %RRv\n", pCtx->eip, pCtx->edx));
561
562 pCtx->cs.Sel = ((pCtx->SysEnter.cs + 16) & ~X86_SEL_RPL) | 3;
563 pCtx->eip = pCtx->edx;
564 pCtx->ss.Sel = pCtx->cs.Sel + 8; /* SysEnter.cs + 24 */
565 pCtx->esp = pCtx->ecx;
566
567 STAM_COUNTER_INC(&pVM->patm.s.StatSysExit);
568
569 return VINF_SUCCESS;
570 }
571 if (pCpu->pCurInstr->uOpcode == OP_SYSCALL)
572 {
573 /** @todo implement syscall */
574 }
575 else
576 if (pCpu->pCurInstr->uOpcode == OP_SYSRET)
577 {
578 /** @todo implement sysret */
579 }
580
581end:
582 return VINF_EM_RAW_RING_SWITCH;
583}
584
585/**
586 * Adds branch pair to the lookup cache of the particular branch instruction
587 *
588 * @returns VBox status
589 * @param pVM Pointer to the VM.
590 * @param pJumpTableGC Pointer to branch instruction lookup cache
591 * @param pBranchTarget Original branch target
592 * @param pRelBranchPatch Relative duplicated function address
593 */
594int patmAddBranchToLookupCache(PVM pVM, RTRCPTR pJumpTableGC, RTRCPTR pBranchTarget, RTRCUINTPTR pRelBranchPatch)
595{
596 PPATCHJUMPTABLE pJumpTable;
597
598 Log(("PATMAddBranchToLookupCache: Adding (%RRv->%RRv (%RRv)) to table %RRv\n", pBranchTarget, pRelBranchPatch + pVM->patm.s.pPatchMemGC, pRelBranchPatch, pJumpTableGC));
599
600 AssertReturn(PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pJumpTableGC), VERR_INVALID_PARAMETER);
601
602#ifdef IN_RC
603 pJumpTable = (PPATCHJUMPTABLE) pJumpTableGC;
604#else
605 pJumpTable = (PPATCHJUMPTABLE) (pJumpTableGC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemHC);
606#endif
607 Log(("Nr addresses = %d, insert pos = %d\n", pJumpTable->cAddresses, pJumpTable->ulInsertPos));
608 if (pJumpTable->cAddresses < pJumpTable->nrSlots)
609 {
610 uint32_t i;
611
612 for (i=0;i<pJumpTable->nrSlots;i++)
613 {
614 if (pJumpTable->Slot[i].pInstrGC == 0)
615 {
616 pJumpTable->Slot[i].pInstrGC = pBranchTarget;
617 /* Relative address - eases relocation */
618 pJumpTable->Slot[i].pRelPatchGC = pRelBranchPatch;
619 pJumpTable->cAddresses++;
620 break;
621 }
622 }
623 AssertReturn(i < pJumpTable->nrSlots, VERR_INTERNAL_ERROR);
624#ifdef VBOX_WITH_STATISTICS
625 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupInsert);
626 if (pVM->patm.s.StatU32FunctionMaxSlotsUsed < i)
627 pVM->patm.s.StatU32FunctionMaxSlotsUsed = i + 1;
628#endif
629 }
630 else
631 {
632 /* Replace an old entry. */
633 /** @todo replacement strategy isn't really bright. change to something better if required. */
634 Assert(pJumpTable->ulInsertPos < pJumpTable->nrSlots);
635 Assert((pJumpTable->nrSlots & 1) == 0);
636
637 pJumpTable->ulInsertPos &= (pJumpTable->nrSlots-1);
638 pJumpTable->Slot[pJumpTable->ulInsertPos].pInstrGC = pBranchTarget;
639 /* Relative address - eases relocation */
640 pJumpTable->Slot[pJumpTable->ulInsertPos].pRelPatchGC = pRelBranchPatch;
641
642 pJumpTable->ulInsertPos = (pJumpTable->ulInsertPos+1) & (pJumpTable->nrSlots-1);
643
644 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupReplace);
645 }
646
647 return VINF_SUCCESS;
648}
649
650
651#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
652/**
653 * Return the name of the patched instruction
654 *
655 * @returns instruction name
656 *
657 * @param opcode DIS instruction opcode
658 * @param fPatchFlags Patch flags
659 */
660const char *patmGetInstructionString(uint32_t opcode, uint32_t fPatchFlags)
661{
662 const char *pszInstr = NULL;
663
664 switch (opcode)
665 {
666 case OP_CLI:
667 pszInstr = "cli";
668 break;
669 case OP_PUSHF:
670 pszInstr = "pushf";
671 break;
672 case OP_POPF:
673 pszInstr = "popf";
674 break;
675 case OP_STR:
676 pszInstr = "str";
677 break;
678 case OP_LSL:
679 pszInstr = "lsl";
680 break;
681 case OP_LAR:
682 pszInstr = "lar";
683 break;
684 case OP_SGDT:
685 pszInstr = "sgdt";
686 break;
687 case OP_SLDT:
688 pszInstr = "sldt";
689 break;
690 case OP_SIDT:
691 pszInstr = "sidt";
692 break;
693 case OP_SMSW:
694 pszInstr = "smsw";
695 break;
696 case OP_VERW:
697 pszInstr = "verw";
698 break;
699 case OP_VERR:
700 pszInstr = "verr";
701 break;
702 case OP_CPUID:
703 pszInstr = "cpuid";
704 break;
705 case OP_JMP:
706 pszInstr = "jmp";
707 break;
708 case OP_JO:
709 pszInstr = "jo";
710 break;
711 case OP_JNO:
712 pszInstr = "jno";
713 break;
714 case OP_JC:
715 pszInstr = "jc";
716 break;
717 case OP_JNC:
718 pszInstr = "jnc";
719 break;
720 case OP_JE:
721 pszInstr = "je";
722 break;
723 case OP_JNE:
724 pszInstr = "jne";
725 break;
726 case OP_JBE:
727 pszInstr = "jbe";
728 break;
729 case OP_JNBE:
730 pszInstr = "jnbe";
731 break;
732 case OP_JS:
733 pszInstr = "js";
734 break;
735 case OP_JNS:
736 pszInstr = "jns";
737 break;
738 case OP_JP:
739 pszInstr = "jp";
740 break;
741 case OP_JNP:
742 pszInstr = "jnp";
743 break;
744 case OP_JL:
745 pszInstr = "jl";
746 break;
747 case OP_JNL:
748 pszInstr = "jnl";
749 break;
750 case OP_JLE:
751 pszInstr = "jle";
752 break;
753 case OP_JNLE:
754 pszInstr = "jnle";
755 break;
756 case OP_JECXZ:
757 pszInstr = "jecxz";
758 break;
759 case OP_LOOP:
760 pszInstr = "loop";
761 break;
762 case OP_LOOPNE:
763 pszInstr = "loopne";
764 break;
765 case OP_LOOPE:
766 pszInstr = "loope";
767 break;
768 case OP_MOV:
769 if (fPatchFlags & PATMFL_IDTHANDLER)
770 pszInstr = "mov (Int/Trap Handler)";
771 else
772 pszInstr = "mov (cs)";
773 break;
774 case OP_SYSENTER:
775 pszInstr = "sysenter";
776 break;
777 case OP_PUSH:
778 pszInstr = "push (cs)";
779 break;
780 case OP_CALL:
781 pszInstr = "call";
782 break;
783 case OP_IRET:
784 pszInstr = "iret";
785 break;
786 }
787 return pszInstr;
788}
789#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette