1 | /* $Id: PATMAll.cpp 44362 2013-01-24 21:11:05Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * PATM - The Patch Manager, all contexts.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2006-2013 Oracle Corporation
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | * available from http://www.virtualbox.org. This file is free software;
|
---|
11 | * you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | * General Public License (GPL) as published by the Free Software
|
---|
13 | * Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | */
|
---|
17 |
|
---|
18 | /*******************************************************************************
|
---|
19 | * Header Files *
|
---|
20 | *******************************************************************************/
|
---|
21 | #define LOG_GROUP LOG_GROUP_PATM
|
---|
22 | #include <VBox/vmm/patm.h>
|
---|
23 | #include <VBox/vmm/cpum.h>
|
---|
24 | #include <VBox/dis.h>
|
---|
25 | #include <VBox/disopcode.h>
|
---|
26 | #include <VBox/vmm/em.h>
|
---|
27 | #include <VBox/err.h>
|
---|
28 | #include <VBox/vmm/selm.h>
|
---|
29 | #include <VBox/vmm/mm.h>
|
---|
30 | #include "PATMInternal.h"
|
---|
31 | #include <VBox/vmm/vm.h>
|
---|
32 | #include <VBox/vmm/vmm.h>
|
---|
33 | #include "PATMA.h"
|
---|
34 |
|
---|
35 | #include <VBox/log.h>
|
---|
36 | #include <iprt/assert.h>
|
---|
37 |
|
---|
38 |
|
---|
39 | /**
|
---|
40 | * Load virtualized flags.
|
---|
41 | *
|
---|
42 | * This function is called from CPUMRawEnter(). It doesn't have to update the
|
---|
43 | * IF and IOPL eflags bits, the caller will enforce those to set and 0 respectively.
|
---|
44 | *
|
---|
45 | * @param pVM Pointer to the VM.
|
---|
46 | * @param pCtxCore The cpu context core.
|
---|
47 | * @see pg_raw
|
---|
48 | */
|
---|
49 | VMM_INT_DECL(void) PATMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
|
---|
50 | {
|
---|
51 | bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtxCore->eip);
|
---|
52 |
|
---|
53 | /*
|
---|
54 | * Currently we don't bother to check whether PATM is enabled or not.
|
---|
55 | * For all cases where it isn't, IOPL will be safe and IF will be set.
|
---|
56 | */
|
---|
57 | register uint32_t efl = pCtxCore->eflags.u32;
|
---|
58 | CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
|
---|
59 | AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTRCPTR)pCtxCore->eip), ("X86_EFL_IF is clear and PATM is disabled! (eip=%RRv eflags=%08x fPATM=%d pPATMGC=%RRv-%RRv\n", pCtxCore->eip, pCtxCore->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC, pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem));
|
---|
60 |
|
---|
61 | AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode, ("fPIF=%d eip=%RRv\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip));
|
---|
62 |
|
---|
63 | efl &= ~PATM_VIRTUAL_FLAGS_MASK;
|
---|
64 | efl |= X86_EFL_IF;
|
---|
65 | pCtxCore->eflags.u32 = efl;
|
---|
66 |
|
---|
67 | #ifdef IN_RING3
|
---|
68 | #ifdef PATM_EMULATE_SYSENTER
|
---|
69 | PCPUMCTX pCtx;
|
---|
70 |
|
---|
71 | /* Check if the sysenter handler has changed. */
|
---|
72 | pCtx = CPUMQueryGuestCtxPtr(pVM);
|
---|
73 | if ( pCtx->SysEnter.cs != 0
|
---|
74 | && pCtx->SysEnter.eip != 0
|
---|
75 | )
|
---|
76 | {
|
---|
77 | if (pVM->patm.s.pfnSysEnterGC != (RTRCPTR)pCtx->SysEnter.eip)
|
---|
78 | {
|
---|
79 | pVM->patm.s.pfnSysEnterPatchGC = 0;
|
---|
80 | pVM->patm.s.pfnSysEnterGC = 0;
|
---|
81 |
|
---|
82 | Log2(("PATMRawEnter: installing sysenter patch for %RRv\n", pCtx->SysEnter.eip));
|
---|
83 | pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
|
---|
84 | if (pVM->patm.s.pfnSysEnterPatchGC == 0)
|
---|
85 | {
|
---|
86 | rc = PATMR3InstallPatch(pVM, pCtx->SysEnter.eip, PATMFL_SYSENTER | PATMFL_CODE32);
|
---|
87 | if (rc == VINF_SUCCESS)
|
---|
88 | {
|
---|
89 | pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
|
---|
90 | pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
|
---|
91 | Assert(pVM->patm.s.pfnSysEnterPatchGC);
|
---|
92 | }
|
---|
93 | }
|
---|
94 | else
|
---|
95 | pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
|
---|
96 | }
|
---|
97 | }
|
---|
98 | else
|
---|
99 | {
|
---|
100 | pVM->patm.s.pfnSysEnterPatchGC = 0;
|
---|
101 | pVM->patm.s.pfnSysEnterGC = 0;
|
---|
102 | }
|
---|
103 | #endif
|
---|
104 | #endif
|
---|
105 | }
|
---|
106 |
|
---|
107 |
|
---|
108 | /**
|
---|
109 | * Restores virtualized flags.
|
---|
110 | *
|
---|
111 | * This function is called from CPUMRawLeave(). It will update the eflags register.
|
---|
112 | *
|
---|
113 | ** @note Only here we are allowed to switch back to guest code (without a special reason such as a trap in patch code)!!
|
---|
114 | *
|
---|
115 | * @param pVM Pointer to the VM.
|
---|
116 | * @param pCtxCore The cpu context core.
|
---|
117 | * @param rawRC Raw mode return code
|
---|
118 | * @see @ref pg_raw
|
---|
119 | */
|
---|
120 | VMM_INT_DECL(void) PATMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rawRC)
|
---|
121 | {
|
---|
122 | bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtxCore->eip);
|
---|
123 | /*
|
---|
124 | * We will only be called if PATMRawEnter was previously called.
|
---|
125 | */
|
---|
126 | register uint32_t efl = pCtxCore->eflags.u32;
|
---|
127 | efl = (efl & ~PATM_VIRTUAL_FLAGS_MASK) | (CTXSUFF(pVM->patm.s.pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK);
|
---|
128 | pCtxCore->eflags.u32 = efl;
|
---|
129 | CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = X86_EFL_IF;
|
---|
130 |
|
---|
131 | AssertReleaseMsg((efl & X86_EFL_IF) || fPatchCode || rawRC == VINF_PATM_PENDING_IRQ_AFTER_IRET || RT_FAILURE(rawRC), ("Inconsistent state at %RRv rc=%Rrc\n", pCtxCore->eip, rawRC));
|
---|
132 | AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode || RT_FAILURE(rawRC), ("fPIF=%d eip=%RRv rc=%Rrc\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip, rawRC));
|
---|
133 |
|
---|
134 | #ifdef IN_RING3
|
---|
135 | if ( (efl & X86_EFL_IF)
|
---|
136 | && fPatchCode
|
---|
137 | )
|
---|
138 | {
|
---|
139 | if ( rawRC < VINF_PATM_LEAVE_RC_FIRST
|
---|
140 | || rawRC > VINF_PATM_LEAVE_RC_LAST)
|
---|
141 | {
|
---|
142 | /*
|
---|
143 | * Golden rules:
|
---|
144 | * - Don't interrupt special patch streams that replace special instructions
|
---|
145 | * - Don't break instruction fusing (sti, pop ss, mov ss)
|
---|
146 | * - Don't go back to an instruction that has been overwritten by a patch jump
|
---|
147 | * - Don't interrupt an idt handler on entry (1st instruction); technically incorrect
|
---|
148 | *
|
---|
149 | */
|
---|
150 | if (CTXSUFF(pVM->patm.s.pGCState)->fPIF == 1) /* consistent patch instruction state */
|
---|
151 | {
|
---|
152 | PATMTRANSSTATE enmState;
|
---|
153 | RTRCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtxCore->eip, &enmState);
|
---|
154 |
|
---|
155 | AssertRelease(pOrgInstrGC);
|
---|
156 |
|
---|
157 | Assert(enmState != PATMTRANS_OVERWRITTEN);
|
---|
158 | if (enmState == PATMTRANS_SAFE)
|
---|
159 | {
|
---|
160 | Assert(!patmFindActivePatchByEntrypoint(pVM, pOrgInstrGC));
|
---|
161 | Log(("Switchback from %RRv to %RRv (Psp=%x)\n", pCtxCore->eip, pOrgInstrGC, CTXSUFF(pVM->patm.s.pGCState)->Psp));
|
---|
162 | STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBack);
|
---|
163 | pCtxCore->eip = pOrgInstrGC;
|
---|
164 | fPatchCode = false; /* to reset the stack ptr */
|
---|
165 |
|
---|
166 | CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0; /* reset this pointer; safe otherwise the state would be PATMTRANS_INHIBITIRQ */
|
---|
167 | }
|
---|
168 | else
|
---|
169 | {
|
---|
170 | LogFlow(("Patch address %RRv can't be interrupted (state=%d)!\n", pCtxCore->eip, enmState));
|
---|
171 | STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
|
---|
172 | }
|
---|
173 | }
|
---|
174 | else
|
---|
175 | {
|
---|
176 | LogFlow(("Patch address %RRv can't be interrupted (fPIF=%d)!\n", pCtxCore->eip, CTXSUFF(pVM->patm.s.pGCState)->fPIF));
|
---|
177 | STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
|
---|
178 | }
|
---|
179 | }
|
---|
180 | }
|
---|
181 | #else /* !IN_RING3 */
|
---|
182 | AssertMsgFailed(("!IN_RING3"));
|
---|
183 | #endif /* !IN_RING3 */
|
---|
184 |
|
---|
185 | if (!fPatchCode)
|
---|
186 | {
|
---|
187 | if (CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts == (RTRCPTR)pCtxCore->eip)
|
---|
188 | {
|
---|
189 | EMSetInhibitInterruptsPC(VMMGetCpu0(pVM), pCtxCore->eip);
|
---|
190 | }
|
---|
191 | CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0;
|
---|
192 |
|
---|
193 | /* Reset the stack pointer to the top of the stack. */
|
---|
194 | #ifdef DEBUG
|
---|
195 | if (CTXSUFF(pVM->patm.s.pGCState)->Psp != PATM_STACK_SIZE)
|
---|
196 | {
|
---|
197 | LogFlow(("PATMRawLeave: Reset PATM stack (Psp = %x)\n", CTXSUFF(pVM->patm.s.pGCState)->Psp));
|
---|
198 | }
|
---|
199 | #endif
|
---|
200 | CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
|
---|
201 | }
|
---|
202 | }
|
---|
203 |
|
---|
204 | /**
|
---|
205 | * Get the EFLAGS.
|
---|
206 | * This is a worker for CPUMRawGetEFlags().
|
---|
207 | *
|
---|
208 | * @returns The eflags.
|
---|
209 | * @param pVM Pointer to the VM.
|
---|
210 | * @param pCtxCore The context core.
|
---|
211 | */
|
---|
212 | VMM_INT_DECL(uint32_t) PATMRawGetEFlags(PVM pVM, PCCPUMCTXCORE pCtxCore)
|
---|
213 | {
|
---|
214 | uint32_t efl = pCtxCore->eflags.u32;
|
---|
215 | efl &= ~PATM_VIRTUAL_FLAGS_MASK;
|
---|
216 | efl |= pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK;
|
---|
217 | return efl;
|
---|
218 | }
|
---|
219 |
|
---|
220 | /**
|
---|
221 | * Updates the EFLAGS.
|
---|
222 | * This is a worker for CPUMRawSetEFlags().
|
---|
223 | *
|
---|
224 | * @param pVM Pointer to the VM.
|
---|
225 | * @param pCtxCore The context core.
|
---|
226 | * @param efl The new EFLAGS value.
|
---|
227 | */
|
---|
228 | VMM_INT_DECL(void) PATMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t efl)
|
---|
229 | {
|
---|
230 | pVM->patm.s.CTXSUFF(pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
|
---|
231 | efl &= ~PATM_VIRTUAL_FLAGS_MASK;
|
---|
232 | efl |= X86_EFL_IF;
|
---|
233 | pCtxCore->eflags.u32 = efl;
|
---|
234 | }
|
---|
235 |
|
---|
236 | /**
|
---|
237 | * Check if we must use raw mode (patch code being executed)
|
---|
238 | *
|
---|
239 | * @param pVM Pointer to the VM.
|
---|
240 | * @param pAddrGC Guest context address
|
---|
241 | */
|
---|
242 | VMM_INT_DECL(bool) PATMShouldUseRawMode(PVM pVM, RTRCPTR pAddrGC)
|
---|
243 | {
|
---|
244 | return ( PATMIsEnabled(pVM)
|
---|
245 | && ((pAddrGC >= (RTRCPTR)pVM->patm.s.pPatchMemGC && pAddrGC < (RTRCPTR)((RTRCUINTPTR)pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem)))) ? true : false;
|
---|
246 | }
|
---|
247 |
|
---|
248 | /**
|
---|
249 | * Returns the guest context pointer and size of the GC context structure
|
---|
250 | *
|
---|
251 | * @returns VBox status code.
|
---|
252 | * @param pVM Pointer to the VM.
|
---|
253 | */
|
---|
254 | VMM_INT_DECL(RCPTRTYPE(PPATMGCSTATE)) PATMQueryGCState(PVM pVM)
|
---|
255 | {
|
---|
256 | return pVM->patm.s.pGCStateGC;
|
---|
257 | }
|
---|
258 |
|
---|
259 | /**
|
---|
260 | * Checks whether the GC address is part of our patch region
|
---|
261 | *
|
---|
262 | * @returns VBox status code.
|
---|
263 | * @param pVM Pointer to the VM.
|
---|
264 | * @param pAddrGC Guest context address
|
---|
265 | * @internal
|
---|
266 | */
|
---|
267 | VMMDECL(bool) PATMIsPatchGCAddr(PVM pVM, RTRCUINTPTR pAddrGC)
|
---|
268 | {
|
---|
269 | return (PATMIsEnabled(pVM) && pAddrGC - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem) ? true : false;
|
---|
270 | }
|
---|
271 |
|
---|
272 | /**
|
---|
273 | * Set parameters for pending MMIO patch operation
|
---|
274 | *
|
---|
275 | * @returns VBox status code.
|
---|
276 | * @param pDevIns Device instance.
|
---|
277 | * @param GCPhys MMIO physical address
|
---|
278 | * @param pCachedData GC pointer to cached data
|
---|
279 | */
|
---|
280 | VMM_INT_DECL(int) PATMSetMMIOPatchInfo(PVM pVM, RTGCPHYS GCPhys, RTRCPTR pCachedData)
|
---|
281 | {
|
---|
282 | pVM->patm.s.mmio.GCPhys = GCPhys;
|
---|
283 | pVM->patm.s.mmio.pCachedData = (RTRCPTR)pCachedData;
|
---|
284 |
|
---|
285 | return VINF_SUCCESS;
|
---|
286 | }
|
---|
287 |
|
---|
288 | /**
|
---|
289 | * Checks if the interrupt flag is enabled or not.
|
---|
290 | *
|
---|
291 | * @returns true if it's enabled.
|
---|
292 | * @returns false if it's disabled.
|
---|
293 | *
|
---|
294 | * @param pVM Pointer to the VM.
|
---|
295 | */
|
---|
296 | VMM_INT_DECL(bool) PATMAreInterruptsEnabled(PVM pVM)
|
---|
297 | {
|
---|
298 | PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
|
---|
299 |
|
---|
300 | return PATMAreInterruptsEnabledByCtxCore(pVM, CPUMCTX2CORE(pCtx));
|
---|
301 | }
|
---|
302 |
|
---|
303 | /**
|
---|
304 | * Checks if the interrupt flag is enabled or not.
|
---|
305 | *
|
---|
306 | * @returns true if it's enabled.
|
---|
307 | * @returns false if it's disabled.
|
---|
308 | *
|
---|
309 | * @param pVM Pointer to the VM.
|
---|
310 | * @param pCtxCore CPU context
|
---|
311 | */
|
---|
312 | VMM_INT_DECL(bool) PATMAreInterruptsEnabledByCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore)
|
---|
313 | {
|
---|
314 | if (PATMIsEnabled(pVM))
|
---|
315 | {
|
---|
316 | if (PATMIsPatchGCAddr(pVM, pCtxCore->eip))
|
---|
317 | return false;
|
---|
318 | }
|
---|
319 | return !!(pCtxCore->eflags.u32 & X86_EFL_IF);
|
---|
320 | }
|
---|
321 |
|
---|
322 | /**
|
---|
323 | * Check if the instruction is patched as a duplicated function
|
---|
324 | *
|
---|
325 | * @returns patch record
|
---|
326 | * @param pVM Pointer to the VM.
|
---|
327 | * @param pInstrGC Guest context point to the instruction
|
---|
328 | *
|
---|
329 | */
|
---|
330 | PPATMPATCHREC patmQueryFunctionPatch(PVM pVM, RTRCPTR pInstrGC)
|
---|
331 | {
|
---|
332 | PPATMPATCHREC pRec;
|
---|
333 |
|
---|
334 | AssertCompile(sizeof(AVLOU32KEY) == sizeof(pInstrGC));
|
---|
335 | pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC);
|
---|
336 | if ( pRec
|
---|
337 | && (pRec->patch.uState == PATCH_ENABLED)
|
---|
338 | && (pRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CALLABLE_AS_FUNCTION))
|
---|
339 | )
|
---|
340 | return pRec;
|
---|
341 | return 0;
|
---|
342 | }
|
---|
343 |
|
---|
344 | /**
|
---|
345 | * Checks if the int 3 was caused by a patched instruction
|
---|
346 | *
|
---|
347 | * @returns VBox status
|
---|
348 | *
|
---|
349 | * @param pVM Pointer to the VM.
|
---|
350 | * @param pInstrGC Instruction pointer
|
---|
351 | * @param pOpcode Original instruction opcode (out, optional)
|
---|
352 | * @param pSize Original instruction size (out, optional)
|
---|
353 | */
|
---|
354 | VMM_INT_DECL(bool) PATMIsInt3Patch(PVM pVM, RTRCPTR pInstrGC, uint32_t *pOpcode, uint32_t *pSize)
|
---|
355 | {
|
---|
356 | PPATMPATCHREC pRec;
|
---|
357 |
|
---|
358 | pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC);
|
---|
359 | if ( pRec
|
---|
360 | && (pRec->patch.uState == PATCH_ENABLED)
|
---|
361 | && (pRec->patch.flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
|
---|
362 | )
|
---|
363 | {
|
---|
364 | if (pOpcode) *pOpcode = pRec->patch.opcode;
|
---|
365 | if (pSize) *pSize = pRec->patch.cbPrivInstr;
|
---|
366 | return true;
|
---|
367 | }
|
---|
368 | return false;
|
---|
369 | }
|
---|
370 |
|
---|
371 | /**
|
---|
372 | * Emulate sysenter, sysexit and syscall instructions
|
---|
373 | *
|
---|
374 | * @returns VBox status
|
---|
375 | *
|
---|
376 | * @param pVM Pointer to the VM.
|
---|
377 | * @param pCtxCore The relevant core context.
|
---|
378 | * @param pCpu Disassembly context
|
---|
379 | */
|
---|
380 | VMMDECL(int) PATMSysCall(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
|
---|
381 | {
|
---|
382 | PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu0(pVM));
|
---|
383 |
|
---|
384 | if (pCpu->pCurInstr->uOpcode == OP_SYSENTER)
|
---|
385 | {
|
---|
386 | if ( pCtx->SysEnter.cs == 0
|
---|
387 | || pRegFrame->eflags.Bits.u1VM
|
---|
388 | || (pRegFrame->cs.Sel & X86_SEL_RPL) != 3
|
---|
389 | || pVM->patm.s.pfnSysEnterPatchGC == 0
|
---|
390 | || pVM->patm.s.pfnSysEnterGC != (RTRCPTR)(RTRCUINTPTR)pCtx->SysEnter.eip
|
---|
391 | || !(PATMRawGetEFlags(pVM, pRegFrame) & X86_EFL_IF))
|
---|
392 | goto end;
|
---|
393 |
|
---|
394 | Log2(("PATMSysCall: sysenter from %RRv to %RRv\n", pRegFrame->eip, pVM->patm.s.pfnSysEnterPatchGC));
|
---|
395 | /** @todo the base and limit are forced to 0 & 4G-1 resp. We assume the selector is wide open here. */
|
---|
396 | /** @note The Intel manual suggests that the OS is responsible for this. */
|
---|
397 | pRegFrame->cs.Sel = (pCtx->SysEnter.cs & ~X86_SEL_RPL) | 1;
|
---|
398 | pRegFrame->eip = /** @todo ugly conversion! */(uint32_t)pVM->patm.s.pfnSysEnterPatchGC;
|
---|
399 | pRegFrame->ss.Sel = pRegFrame->cs.Sel + 8; /* SysEnter.cs + 8 */
|
---|
400 | pRegFrame->esp = pCtx->SysEnter.esp;
|
---|
401 | pRegFrame->eflags.u32 &= ~(X86_EFL_VM | X86_EFL_RF);
|
---|
402 | pRegFrame->eflags.u32 |= X86_EFL_IF;
|
---|
403 |
|
---|
404 | /* Turn off interrupts. */
|
---|
405 | pVM->patm.s.CTXSUFF(pGCState)->uVMFlags &= ~X86_EFL_IF;
|
---|
406 |
|
---|
407 | STAM_COUNTER_INC(&pVM->patm.s.StatSysEnter);
|
---|
408 |
|
---|
409 | return VINF_SUCCESS;
|
---|
410 | }
|
---|
411 | if (pCpu->pCurInstr->uOpcode == OP_SYSEXIT)
|
---|
412 | {
|
---|
413 | if ( pCtx->SysEnter.cs == 0
|
---|
414 | || (pRegFrame->cs.Sel & X86_SEL_RPL) != 1
|
---|
415 | || pRegFrame->eflags.Bits.u1VM
|
---|
416 | || !(PATMRawGetEFlags(pVM, pRegFrame) & X86_EFL_IF))
|
---|
417 | goto end;
|
---|
418 |
|
---|
419 | Log2(("PATMSysCall: sysexit from %RRv to %RRv\n", pRegFrame->eip, pRegFrame->edx));
|
---|
420 |
|
---|
421 | pRegFrame->cs.Sel = ((pCtx->SysEnter.cs + 16) & ~X86_SEL_RPL) | 3;
|
---|
422 | pRegFrame->eip = pRegFrame->edx;
|
---|
423 | pRegFrame->ss.Sel = pRegFrame->cs.Sel + 8; /* SysEnter.cs + 24 */
|
---|
424 | pRegFrame->esp = pRegFrame->ecx;
|
---|
425 |
|
---|
426 | STAM_COUNTER_INC(&pVM->patm.s.StatSysExit);
|
---|
427 |
|
---|
428 | return VINF_SUCCESS;
|
---|
429 | }
|
---|
430 | if (pCpu->pCurInstr->uOpcode == OP_SYSCALL)
|
---|
431 | {
|
---|
432 | /** @todo implement syscall */
|
---|
433 | }
|
---|
434 | else
|
---|
435 | if (pCpu->pCurInstr->uOpcode == OP_SYSRET)
|
---|
436 | {
|
---|
437 | /** @todo implement sysret */
|
---|
438 | }
|
---|
439 |
|
---|
440 | end:
|
---|
441 | return VINF_EM_RAW_RING_SWITCH;
|
---|
442 | }
|
---|
443 |
|
---|
444 | /**
|
---|
445 | * Adds branch pair to the lookup cache of the particular branch instruction
|
---|
446 | *
|
---|
447 | * @returns VBox status
|
---|
448 | * @param pVM Pointer to the VM.
|
---|
449 | * @param pJumpTableGC Pointer to branch instruction lookup cache
|
---|
450 | * @param pBranchTarget Original branch target
|
---|
451 | * @param pRelBranchPatch Relative duplicated function address
|
---|
452 | */
|
---|
453 | int patmAddBranchToLookupCache(PVM pVM, RTRCPTR pJumpTableGC, RTRCPTR pBranchTarget, RTRCUINTPTR pRelBranchPatch)
|
---|
454 | {
|
---|
455 | PPATCHJUMPTABLE pJumpTable;
|
---|
456 |
|
---|
457 | Log(("PATMAddBranchToLookupCache: Adding (%RRv->%RRv (%RRv)) to table %RRv\n", pBranchTarget, pRelBranchPatch + pVM->patm.s.pPatchMemGC, pRelBranchPatch, pJumpTableGC));
|
---|
458 |
|
---|
459 | AssertReturn(PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pJumpTableGC), VERR_INVALID_PARAMETER);
|
---|
460 |
|
---|
461 | #ifdef IN_RC
|
---|
462 | pJumpTable = (PPATCHJUMPTABLE) pJumpTableGC;
|
---|
463 | #else
|
---|
464 | pJumpTable = (PPATCHJUMPTABLE) (pJumpTableGC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemHC);
|
---|
465 | #endif
|
---|
466 | Log(("Nr addresses = %d, insert pos = %d\n", pJumpTable->cAddresses, pJumpTable->ulInsertPos));
|
---|
467 | if (pJumpTable->cAddresses < pJumpTable->nrSlots)
|
---|
468 | {
|
---|
469 | uint32_t i;
|
---|
470 |
|
---|
471 | for (i=0;i<pJumpTable->nrSlots;i++)
|
---|
472 | {
|
---|
473 | if (pJumpTable->Slot[i].pInstrGC == 0)
|
---|
474 | {
|
---|
475 | pJumpTable->Slot[i].pInstrGC = pBranchTarget;
|
---|
476 | /* Relative address - eases relocation */
|
---|
477 | pJumpTable->Slot[i].pRelPatchGC = pRelBranchPatch;
|
---|
478 | pJumpTable->cAddresses++;
|
---|
479 | break;
|
---|
480 | }
|
---|
481 | }
|
---|
482 | AssertReturn(i < pJumpTable->nrSlots, VERR_INTERNAL_ERROR);
|
---|
483 | #ifdef VBOX_WITH_STATISTICS
|
---|
484 | STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupInsert);
|
---|
485 | if (pVM->patm.s.StatU32FunctionMaxSlotsUsed < i)
|
---|
486 | pVM->patm.s.StatU32FunctionMaxSlotsUsed = i + 1;
|
---|
487 | #endif
|
---|
488 | }
|
---|
489 | else
|
---|
490 | {
|
---|
491 | /* Replace an old entry. */
|
---|
492 | /** @todo replacement strategy isn't really bright. change to something better if required. */
|
---|
493 | Assert(pJumpTable->ulInsertPos < pJumpTable->nrSlots);
|
---|
494 | Assert((pJumpTable->nrSlots & 1) == 0);
|
---|
495 |
|
---|
496 | pJumpTable->ulInsertPos &= (pJumpTable->nrSlots-1);
|
---|
497 | pJumpTable->Slot[pJumpTable->ulInsertPos].pInstrGC = pBranchTarget;
|
---|
498 | /* Relative address - eases relocation */
|
---|
499 | pJumpTable->Slot[pJumpTable->ulInsertPos].pRelPatchGC = pRelBranchPatch;
|
---|
500 |
|
---|
501 | pJumpTable->ulInsertPos = (pJumpTable->ulInsertPos+1) & (pJumpTable->nrSlots-1);
|
---|
502 |
|
---|
503 | STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupReplace);
|
---|
504 | }
|
---|
505 |
|
---|
506 | return VINF_SUCCESS;
|
---|
507 | }
|
---|
508 |
|
---|
509 |
|
---|
510 | #if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
|
---|
511 | /**
|
---|
512 | * Return the name of the patched instruction
|
---|
513 | *
|
---|
514 | * @returns instruction name
|
---|
515 | *
|
---|
516 | * @param opcode DIS instruction opcode
|
---|
517 | * @param fPatchFlags Patch flags
|
---|
518 | */
|
---|
519 | const char *patmGetInstructionString(uint32_t opcode, uint32_t fPatchFlags)
|
---|
520 | {
|
---|
521 | const char *pszInstr = NULL;
|
---|
522 |
|
---|
523 | switch (opcode)
|
---|
524 | {
|
---|
525 | case OP_CLI:
|
---|
526 | pszInstr = "cli";
|
---|
527 | break;
|
---|
528 | case OP_PUSHF:
|
---|
529 | pszInstr = "pushf";
|
---|
530 | break;
|
---|
531 | case OP_POPF:
|
---|
532 | pszInstr = "popf";
|
---|
533 | break;
|
---|
534 | case OP_STR:
|
---|
535 | pszInstr = "str";
|
---|
536 | break;
|
---|
537 | case OP_LSL:
|
---|
538 | pszInstr = "lsl";
|
---|
539 | break;
|
---|
540 | case OP_LAR:
|
---|
541 | pszInstr = "lar";
|
---|
542 | break;
|
---|
543 | case OP_SGDT:
|
---|
544 | pszInstr = "sgdt";
|
---|
545 | break;
|
---|
546 | case OP_SLDT:
|
---|
547 | pszInstr = "sldt";
|
---|
548 | break;
|
---|
549 | case OP_SIDT:
|
---|
550 | pszInstr = "sidt";
|
---|
551 | break;
|
---|
552 | case OP_SMSW:
|
---|
553 | pszInstr = "smsw";
|
---|
554 | break;
|
---|
555 | case OP_VERW:
|
---|
556 | pszInstr = "verw";
|
---|
557 | break;
|
---|
558 | case OP_VERR:
|
---|
559 | pszInstr = "verr";
|
---|
560 | break;
|
---|
561 | case OP_CPUID:
|
---|
562 | pszInstr = "cpuid";
|
---|
563 | break;
|
---|
564 | case OP_JMP:
|
---|
565 | pszInstr = "jmp";
|
---|
566 | break;
|
---|
567 | case OP_JO:
|
---|
568 | pszInstr = "jo";
|
---|
569 | break;
|
---|
570 | case OP_JNO:
|
---|
571 | pszInstr = "jno";
|
---|
572 | break;
|
---|
573 | case OP_JC:
|
---|
574 | pszInstr = "jc";
|
---|
575 | break;
|
---|
576 | case OP_JNC:
|
---|
577 | pszInstr = "jnc";
|
---|
578 | break;
|
---|
579 | case OP_JE:
|
---|
580 | pszInstr = "je";
|
---|
581 | break;
|
---|
582 | case OP_JNE:
|
---|
583 | pszInstr = "jne";
|
---|
584 | break;
|
---|
585 | case OP_JBE:
|
---|
586 | pszInstr = "jbe";
|
---|
587 | break;
|
---|
588 | case OP_JNBE:
|
---|
589 | pszInstr = "jnbe";
|
---|
590 | break;
|
---|
591 | case OP_JS:
|
---|
592 | pszInstr = "js";
|
---|
593 | break;
|
---|
594 | case OP_JNS:
|
---|
595 | pszInstr = "jns";
|
---|
596 | break;
|
---|
597 | case OP_JP:
|
---|
598 | pszInstr = "jp";
|
---|
599 | break;
|
---|
600 | case OP_JNP:
|
---|
601 | pszInstr = "jnp";
|
---|
602 | break;
|
---|
603 | case OP_JL:
|
---|
604 | pszInstr = "jl";
|
---|
605 | break;
|
---|
606 | case OP_JNL:
|
---|
607 | pszInstr = "jnl";
|
---|
608 | break;
|
---|
609 | case OP_JLE:
|
---|
610 | pszInstr = "jle";
|
---|
611 | break;
|
---|
612 | case OP_JNLE:
|
---|
613 | pszInstr = "jnle";
|
---|
614 | break;
|
---|
615 | case OP_JECXZ:
|
---|
616 | pszInstr = "jecxz";
|
---|
617 | break;
|
---|
618 | case OP_LOOP:
|
---|
619 | pszInstr = "loop";
|
---|
620 | break;
|
---|
621 | case OP_LOOPNE:
|
---|
622 | pszInstr = "loopne";
|
---|
623 | break;
|
---|
624 | case OP_LOOPE:
|
---|
625 | pszInstr = "loope";
|
---|
626 | break;
|
---|
627 | case OP_MOV:
|
---|
628 | if (fPatchFlags & PATMFL_IDTHANDLER)
|
---|
629 | {
|
---|
630 | pszInstr = "mov (Int/Trap Handler)";
|
---|
631 | }
|
---|
632 | break;
|
---|
633 | case OP_SYSENTER:
|
---|
634 | pszInstr = "sysenter";
|
---|
635 | break;
|
---|
636 | case OP_PUSH:
|
---|
637 | pszInstr = "push (cs)";
|
---|
638 | break;
|
---|
639 | case OP_CALL:
|
---|
640 | pszInstr = "call";
|
---|
641 | break;
|
---|
642 | case OP_IRET:
|
---|
643 | pszInstr = "iret";
|
---|
644 | break;
|
---|
645 | }
|
---|
646 | return pszInstr;
|
---|
647 | }
|
---|
648 | #endif
|
---|