VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 36669

Last change on this file since 36669 was 36669, checked in by vboxsync, 14 years ago

PATM: Record trampoline patches in the target to update the displacement on a patch refresh. (see xTracker #5593 for further information)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 252.8 KB
Line 
1/* $Id: PATM.cpp 36669 2011-04-14 12:21:43Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * NOTE: Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/cpumdis.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/trpm.h>
34#include <VBox/vmm/cfgm.h>
35#include <VBox/param.h>
36#include <VBox/vmm/selm.h>
37#include <iprt/avl.h>
38#include "PATMInternal.h"
39#include "PATMPatch.h"
40#include <VBox/vmm/vm.h>
41#include <VBox/vmm/csam.h>
42#include <VBox/dbg.h>
43#include <VBox/err.h>
44#include <VBox/log.h>
45#include <iprt/assert.h>
46#include <iprt/asm.h>
47#include <VBox/dis.h>
48#include <VBox/disopcode.h>
49#include "internal/pgm.h"
50
51#include <iprt/string.h>
52#include "PATMA.h"
53
54//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
55//#define PATM_DISABLE_ALL
56
57/**
58 * Refresh trampoline patch state.
59 */
60typedef struct PATMREFRESHPATCH
61{
62 /** Pointer to the VM structure. */
63 PVM pVM;
64 /** The trampoline patch record. */
65 PPATCHINFO pPatchTrampoline;
66 /** The new patch we want to jump to. */
67 PPATCHINFO pPatchRec;
68} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
69
70
71/*******************************************************************************
72* Internal Functions *
73*******************************************************************************/
74
75static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
76static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
77static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
78
79#ifdef LOG_ENABLED // keep gcc quiet
80static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
81#endif
82#ifdef VBOX_WITH_STATISTICS
83static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
84static void patmResetStat(PVM pVM, void *pvSample);
85static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
86#endif
87
88#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
89#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
90
91static int patmReinit(PVM pVM);
92static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
93
94#ifdef VBOX_WITH_DEBUGGER
95static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
96static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
97static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
98
99/** Command descriptors. */
100static const DBGCCMD g_aCmds[] =
101{
102 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
103 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
104 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
105};
106#endif
107
108/* Don't want to break saved states, so put it here as a global variable. */
109static unsigned int cIDTHandlersDisabled = 0;
110
111/**
112 * Initializes the PATM.
113 *
114 * @returns VBox status code.
115 * @param pVM The VM to operate on.
116 */
117VMMR3DECL(int) PATMR3Init(PVM pVM)
118{
119 int rc;
120
121 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
122
123 /* These values can't change as they are hardcoded in patch code (old saved states!) */
124 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
125 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
126 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
127 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
128
129 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
130 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
131
132 /* Allocate patch memory and GC patch state memory. */
133 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
134 /* Add another page in case the generated code is much larger than expected. */
135 /** @todo bad safety precaution */
136 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
137 if (RT_FAILURE(rc))
138 {
139 Log(("MMHyperAlloc failed with %Rrc\n", rc));
140 return rc;
141 }
142 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
143
144 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
145 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
146 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
147
148 /*
149 * Hypervisor memory for GC status data (read/write)
150 *
151 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
152 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
153 *
154 */
155 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
156 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
157 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
158
159 /* Hypervisor memory for patch statistics */
160 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
161 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
162
163 /* Memory for patch lookup trees. */
164 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
165 AssertRCReturn(rc, rc);
166 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
167
168#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
169 /* Check CFGM option. */
170 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
171 if (RT_FAILURE(rc))
172# ifdef PATM_DISABLE_ALL
173 pVM->fPATMEnabled = false;
174# else
175 pVM->fPATMEnabled = true;
176# endif
177#endif
178
179 rc = patmReinit(pVM);
180 AssertRC(rc);
181 if (RT_FAILURE(rc))
182 return rc;
183
184 /*
185 * Register save and load state notifiers.
186 */
187 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
188 NULL, NULL, NULL,
189 NULL, patmR3Save, NULL,
190 NULL, patmR3Load, NULL);
191 AssertRCReturn(rc, rc);
192
193#ifdef VBOX_WITH_DEBUGGER
194 /*
195 * Debugger commands.
196 */
197 static bool s_fRegisteredCmds = false;
198 if (!s_fRegisteredCmds)
199 {
200 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
201 if (RT_SUCCESS(rc2))
202 s_fRegisteredCmds = true;
203 }
204#endif
205
206#ifdef VBOX_WITH_STATISTICS
207 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
208 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
209 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
210 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
211 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
212 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
213 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
214 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
215
216 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
217 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
218
219 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
220 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
221 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
222
223 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
224 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
225 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
226 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
227 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
228
229 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
230 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
231
232 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
233 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
234
235 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
236 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
237 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
238
239 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
240 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
241 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
242
243 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
244 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
245
246 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
247 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
248 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
249 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
250
251 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
252 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
253
254 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
255 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
256
257 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
258 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
259 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
260
261 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
262 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
263 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
264 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
265
266 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
267 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
268 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
269 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
270 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
271
272 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
273#endif /* VBOX_WITH_STATISTICS */
274
275 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
276 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
277 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
278 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
279 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
280 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
281 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
282 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
283
284 return rc;
285}
286
287/**
288 * Finalizes HMA page attributes.
289 *
290 * @returns VBox status code.
291 * @param pVM The VM handle.
292 */
293VMMR3DECL(int) PATMR3InitFinalize(PVM pVM)
294{
295 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
296 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
297 if (RT_FAILURE(rc))
298 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
299
300 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
301 if (RT_FAILURE(rc))
302 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
303
304 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
305 if (RT_FAILURE(rc))
306 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
307
308 return rc;
309}
310
311/**
312 * (Re)initializes PATM
313 *
314 * @param pVM The VM.
315 */
316static int patmReinit(PVM pVM)
317{
318 int rc;
319
320 /*
321 * Assert alignment and sizes.
322 */
323 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
324 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
325
326 /*
327 * Setup any fixed pointers and offsets.
328 */
329 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
330
331#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
332#ifndef PATM_DISABLE_ALL
333 pVM->fPATMEnabled = true;
334#endif
335#endif
336
337 Assert(pVM->patm.s.pGCStateHC);
338 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
339 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
340
341 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
342 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
343
344 Assert(pVM->patm.s.pGCStackHC);
345 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
346 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
347 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
348 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
349
350 Assert(pVM->patm.s.pStatsHC);
351 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
352 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
353
354 Assert(pVM->patm.s.pPatchMemHC);
355 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
356 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
357 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
358
359 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
360 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
361
362 Assert(pVM->patm.s.PatchLookupTreeHC);
363 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
364
365 /*
366 * (Re)Initialize PATM structure
367 */
368 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
369 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
370 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
371 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
372 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
373 pVM->patm.s.pvFaultMonitor = 0;
374 pVM->patm.s.deltaReloc = 0;
375
376 /* Lowest and highest patched instruction */
377 pVM->patm.s.pPatchedInstrGCLowest = ~0;
378 pVM->patm.s.pPatchedInstrGCHighest = 0;
379
380 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
381 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
382 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
383
384 pVM->patm.s.pfnSysEnterPatchGC = 0;
385 pVM->patm.s.pfnSysEnterGC = 0;
386
387 pVM->patm.s.fOutOfMemory = false;
388
389 pVM->patm.s.pfnHelperCallGC = 0;
390
391 /* Generate all global functions to be used by future patches. */
392 /* We generate a fake patch in order to use the existing code for relocation. */
393 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
394 if (RT_FAILURE(rc))
395 {
396 Log(("Out of memory!!!!\n"));
397 return VERR_NO_MEMORY;
398 }
399 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
400 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
401 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
402
403 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
404 AssertRC(rc);
405
406 /* Update free pointer in patch memory. */
407 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
408 /* Round to next 8 byte boundary. */
409 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
410 return rc;
411}
412
413
414/**
415 * Applies relocations to data and code managed by this
416 * component. This function will be called at init and
417 * whenever the VMM need to relocate it self inside the GC.
418 *
419 * The PATM will update the addresses used by the switcher.
420 *
421 * @param pVM The VM.
422 */
423VMMR3DECL(void) PATMR3Relocate(PVM pVM)
424{
425 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
426 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
427
428 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
429 if (delta)
430 {
431 PCPUMCTX pCtx;
432
433 /* Update CPUMCTX guest context pointer. */
434 pVM->patm.s.pCPUMCtxGC += delta;
435
436 pVM->patm.s.deltaReloc = delta;
437
438 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
439
440 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
441
442 /* If we are running patch code right now, then also adjust EIP. */
443 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
444 pCtx->eip += delta;
445
446 pVM->patm.s.pGCStateGC = GCPtrNew;
447 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
448
449 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
450
451 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
452
453 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
454
455 if (pVM->patm.s.pfnSysEnterPatchGC)
456 pVM->patm.s.pfnSysEnterPatchGC += delta;
457
458 /* Deal with the global patch functions. */
459 pVM->patm.s.pfnHelperCallGC += delta;
460 pVM->patm.s.pfnHelperRetGC += delta;
461 pVM->patm.s.pfnHelperIretGC += delta;
462 pVM->patm.s.pfnHelperJumpGC += delta;
463
464 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
465 }
466}
467
468
469/**
470 * Terminates the PATM.
471 *
472 * Termination means cleaning up and freeing all resources,
473 * the VM it self is at this point powered off or suspended.
474 *
475 * @returns VBox status code.
476 * @param pVM The VM to operate on.
477 */
478VMMR3DECL(int) PATMR3Term(PVM pVM)
479{
480 /* Memory was all allocated from the two MM heaps and requires no freeing. */
481 return VINF_SUCCESS;
482}
483
484
485/**
486 * PATM reset callback.
487 *
488 * @returns VBox status code.
489 * @param pVM The VM which is reset.
490 */
491VMMR3DECL(int) PATMR3Reset(PVM pVM)
492{
493 Log(("PATMR3Reset\n"));
494
495 /* Free all patches. */
496 while (true)
497 {
498 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
499 if (pPatchRec)
500 {
501 PATMRemovePatch(pVM, pPatchRec, true);
502 }
503 else
504 break;
505 }
506 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
507 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
508 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
509 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
510
511 int rc = patmReinit(pVM);
512 if (RT_SUCCESS(rc))
513 rc = PATMR3InitFinalize(pVM); /* paranoia */
514
515 return rc;
516}
517
518/**
519 * Read callback for disassembly function; supports reading bytes that cross a page boundary
520 *
521 * @returns VBox status code.
522 * @param pSrc GC source pointer
523 * @param pDest HC destination pointer
524 * @param size Number of bytes to read
525 * @param pvUserdata Callback specific user data (pCpu)
526 *
527 */
528int patmReadBytes(RTUINTPTR pSrc, uint8_t *pDest, unsigned size, void *pvUserdata)
529{
530 DISCPUSTATE *pCpu = (DISCPUSTATE *)pvUserdata;
531 PATMDISASM *pDisInfo = (PATMDISASM *)pCpu->apvUserData[0];
532 int orgsize = size;
533
534 Assert(size);
535 if (size == 0)
536 return VERR_INVALID_PARAMETER;
537
538 /*
539 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
540 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
541 */
542 /** @todo could change in the future! */
543 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
544 {
545 for (int i=0;i<orgsize;i++)
546 {
547 int rc = PATMR3QueryOpcode(pDisInfo->pVM, (RTRCPTR)pSrc, pDest);
548 if (RT_SUCCESS(rc))
549 {
550 pSrc++;
551 pDest++;
552 size--;
553 }
554 else break;
555 }
556 if (size == 0)
557 return VINF_SUCCESS;
558#ifdef VBOX_STRICT
559 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
560 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
561 {
562 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc, NULL) == false);
563 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc+size-1, NULL) == false);
564 }
565#endif
566 }
567
568 if ( !pDisInfo->pInstrHC
569 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(pSrc + size - 1)
570 && !PATMIsPatchGCAddr(pDisInfo->pVM, pSrc)))
571 {
572 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, pSrc));
573 return PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], pDest, pSrc, size);
574 }
575 else
576 {
577 Assert(pDisInfo->pInstrHC);
578
579 uint8_t *pInstrHC = pDisInfo->pInstrHC;
580
581 Assert(pInstrHC);
582
583 /* pInstrHC is the base address; adjust according to the GC pointer. */
584 pInstrHC = pInstrHC + (pSrc - pDisInfo->pInstrGC);
585
586 memcpy(pDest, (void *)pInstrHC, size);
587 }
588
589 return VINF_SUCCESS;
590}
591
592/**
593 * Callback function for RTAvloU32DoWithAll
594 *
595 * Updates all fixups in the patches
596 *
597 * @returns VBox status code.
598 * @param pNode Current node
599 * @param pParam The VM to operate on.
600 */
601static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
602{
603 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
604 PVM pVM = (PVM)pParam;
605 RTRCINTPTR delta;
606#ifdef LOG_ENABLED
607 DISCPUSTATE cpu;
608 char szOutput[256];
609 uint32_t opsize;
610 bool disret;
611#endif
612 int rc;
613
614 /* Nothing to do if the patch is not active. */
615 if (pPatch->patch.uState == PATCH_REFUSED)
616 return 0;
617
618#ifdef LOG_ENABLED
619 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
620 {
621 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
622 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
623 Log(("Org patch jump: %s", szOutput));
624 }
625#endif
626
627 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
628 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
629
630 /*
631 * Apply fixups
632 */
633 PRELOCREC pRec = 0;
634 AVLPVKEY key = 0;
635
636 while (true)
637 {
638 /* Get the record that's closest from above */
639 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
640 if (pRec == 0)
641 break;
642
643 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
644
645 switch (pRec->uType)
646 {
647 case FIXUP_ABSOLUTE:
648 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
649 if ( !pRec->pSource
650 || PATMIsPatchGCAddr(pVM, pRec->pSource))
651 {
652 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
653 }
654 else
655 {
656 uint8_t curInstr[15];
657 uint8_t oldInstr[15];
658 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
659
660 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
661
662 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
663 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
664
665 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
666 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
667
668 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
669
670 if ( rc == VERR_PAGE_NOT_PRESENT
671 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
672 {
673 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
674
675 Log(("PATM: Patch page not present -> check later!\n"));
676 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
677 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
678 }
679 else
680 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
681 {
682 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
683 /*
684 * Disable patch; this is not a good solution
685 */
686 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
687 pPatch->patch.uState = PATCH_DISABLED;
688 }
689 else
690 if (RT_SUCCESS(rc))
691 {
692 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
693 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
694 AssertRC(rc);
695 }
696 }
697 break;
698
699 case FIXUP_REL_JMPTOPATCH:
700 {
701 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
702
703 if ( pPatch->patch.uState == PATCH_ENABLED
704 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
705 {
706 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
707 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
708 RTRCPTR pJumpOffGC;
709 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
710 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
711
712#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
713 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
714#else
715 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
716#endif
717
718 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
719#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
720 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
721 {
722 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
723
724 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
725 oldJump[0] = pPatch->patch.aPrivInstr[0];
726 oldJump[1] = pPatch->patch.aPrivInstr[1];
727 *(RTRCUINTPTR *)&oldJump[2] = displOld;
728 }
729 else
730#endif
731 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
732 {
733 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
734 oldJump[0] = 0xE9;
735 *(RTRCUINTPTR *)&oldJump[1] = displOld;
736 }
737 else
738 {
739 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
740 continue; //this should never happen!!
741 }
742 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
743
744 /*
745 * Read old patch jump and compare it to the one we previously installed
746 */
747 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
748 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
749
750 if ( rc == VERR_PAGE_NOT_PRESENT
751 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
752 {
753 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
754
755 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
756 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
757 }
758 else
759 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
760 {
761 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
762 /*
763 * Disable patch; this is not a good solution
764 */
765 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
766 pPatch->patch.uState = PATCH_DISABLED;
767 }
768 else
769 if (RT_SUCCESS(rc))
770 {
771 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
772 AssertRC(rc);
773 }
774 else
775 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
776 }
777 else
778 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
779
780 pRec->pDest = pTarget;
781 break;
782 }
783
784 case FIXUP_REL_JMPTOGUEST:
785 {
786 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
787 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
788
789 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
790 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
791 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
792 pRec->pSource = pSource;
793 break;
794 }
795
796 default:
797 AssertMsg(0, ("Invalid fixup type!!\n"));
798 return VERR_INVALID_PARAMETER;
799 }
800 }
801
802#ifdef LOG_ENABLED
803 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
804 {
805 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
806 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
807 Log(("Rel patch jump: %s", szOutput));
808 }
809#endif
810 return 0;
811}
812
813/**
814 * \#PF Handler callback for virtual access handler ranges.
815 *
816 * Important to realize that a physical page in a range can have aliases, and
817 * for ALL and WRITE handlers these will also trigger.
818 *
819 * @returns VINF_SUCCESS if the handler have carried out the operation.
820 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
821 * @param pVM VM Handle.
822 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
823 * @param pvPtr The HC mapping of that address.
824 * @param pvBuf What the guest is reading/writing.
825 * @param cbBuf How much it's reading/writing.
826 * @param enmAccessType The access type.
827 * @param pvUser User argument.
828 */
829DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
830{
831 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
832 /** @todo could be the wrong virtual address (alias) */
833 pVM->patm.s.pvFaultMonitor = GCPtr;
834 PATMR3HandleMonitoredPage(pVM);
835 return VINF_PGM_HANDLER_DO_DEFAULT;
836}
837
838
839#ifdef VBOX_WITH_DEBUGGER
840/**
841 * Callback function for RTAvloU32DoWithAll
842 *
843 * Enables the patch that's being enumerated
844 *
845 * @returns 0 (continue enumeration).
846 * @param pNode Current node
847 * @param pVM The VM to operate on.
848 */
849static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
850{
851 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
852
853 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
854 return 0;
855}
856#endif /* VBOX_WITH_DEBUGGER */
857
858
859#ifdef VBOX_WITH_DEBUGGER
860/**
861 * Callback function for RTAvloU32DoWithAll
862 *
863 * Disables the patch that's being enumerated
864 *
865 * @returns 0 (continue enumeration).
866 * @param pNode Current node
867 * @param pVM The VM to operate on.
868 */
869static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
870{
871 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
872
873 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
874 return 0;
875}
876#endif
877
878/**
879 * Returns the host context pointer and size of the patch memory block
880 *
881 * @returns VBox status code.
882 * @param pVM The VM to operate on.
883 * @param pcb Size of the patch memory block
884 */
885VMMR3DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
886{
887 if (pcb)
888 *pcb = pVM->patm.s.cbPatchMem;
889
890 return pVM->patm.s.pPatchMemHC;
891}
892
893
894/**
895 * Returns the guest context pointer and size of the patch memory block
896 *
897 * @returns VBox status code.
898 * @param pVM The VM to operate on.
899 * @param pcb Size of the patch memory block
900 */
901VMMR3DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
902{
903 if (pcb)
904 *pcb = pVM->patm.s.cbPatchMem;
905
906 return pVM->patm.s.pPatchMemGC;
907}
908
909
910/**
911 * Returns the host context pointer of the GC context structure
912 *
913 * @returns VBox status code.
914 * @param pVM The VM to operate on.
915 */
916VMMR3DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
917{
918 return pVM->patm.s.pGCStateHC;
919}
920
921
922/**
923 * Checks whether the HC address is part of our patch region
924 *
925 * @returns VBox status code.
926 * @param pVM The VM to operate on.
927 * @param pAddrGC Guest context address
928 */
929VMMR3DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, R3PTRTYPE(uint8_t *) pAddrHC)
930{
931 return (pAddrHC >= pVM->patm.s.pPatchMemHC && pAddrHC < pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) ? true : false;
932}
933
934
935/**
936 * Allows or disallow patching of privileged instructions executed by the guest OS
937 *
938 * @returns VBox status code.
939 * @param pVM The VM to operate on.
940 * @param fAllowPatching Allow/disallow patching
941 */
942VMMR3DECL(int) PATMR3AllowPatching(PVM pVM, uint32_t fAllowPatching)
943{
944 pVM->fPATMEnabled = (fAllowPatching) ? true : false;
945 return VINF_SUCCESS;
946}
947
948/**
949 * Convert a GC patch block pointer to a HC patch pointer
950 *
951 * @returns HC pointer or NULL if it's not a GC patch pointer
952 * @param pVM The VM to operate on.
953 * @param pAddrGC GC pointer
954 */
955VMMR3DECL(R3PTRTYPE(void *)) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
956{
957 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
958 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
959 else
960 return NULL;
961}
962
963/**
964 * Query PATM state (enabled/disabled)
965 *
966 * @returns 0 - disabled, 1 - enabled
967 * @param pVM The VM to operate on.
968 */
969VMMR3DECL(int) PATMR3IsEnabled(PVM pVM)
970{
971 return pVM->fPATMEnabled;
972}
973
974
975/**
976 * Convert guest context address to host context pointer
977 *
978 * @returns VBox status code.
979 * @param pVM The VM to operate on.
980 * @param pCacheRec Address conversion cache record
981 * @param pGCPtr Guest context pointer
982 *
983 * @returns Host context pointer or NULL in case of an error
984 *
985 */
986R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
987{
988 int rc;
989 R3PTRTYPE(uint8_t *) pHCPtr;
990 uint32_t offset;
991
992 if (PATMIsPatchGCAddr(pVM, pGCPtr))
993 {
994 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
995 Assert(pPatch);
996 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
997 }
998
999 offset = pGCPtr & PAGE_OFFSET_MASK;
1000 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1001 return pCacheRec->pPageLocStartHC + offset;
1002
1003 /* Release previous lock if any. */
1004 if (pCacheRec->Lock.pvMap)
1005 {
1006 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1007 pCacheRec->Lock.pvMap = NULL;
1008 }
1009
1010 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1011 if (rc != VINF_SUCCESS)
1012 {
1013 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1014 return NULL;
1015 }
1016 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1017 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1018 return pHCPtr;
1019}
1020
1021
1022/* Calculates and fills in all branch targets
1023 *
1024 * @returns VBox status code.
1025 * @param pVM The VM to operate on.
1026 * @param pPatch Current patch block pointer
1027 *
1028 */
1029static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1030{
1031 int32_t displ;
1032
1033 PJUMPREC pRec = 0;
1034 unsigned nrJumpRecs = 0;
1035
1036 /*
1037 * Set all branch targets inside the patch block.
1038 * We remove all jump records as they are no longer needed afterwards.
1039 */
1040 while (true)
1041 {
1042 RCPTRTYPE(uint8_t *) pInstrGC;
1043 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1044
1045 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1046 if (pRec == 0)
1047 break;
1048
1049 nrJumpRecs++;
1050
1051 /* HC in patch block to GC in patch block. */
1052 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1053
1054 if (pRec->opcode == OP_CALL)
1055 {
1056 /* Special case: call function replacement patch from this patch block.
1057 */
1058 PPATMPATCHREC pFunctionRec = PATMQueryFunctionPatch(pVM, pRec->pTargetGC);
1059 if (!pFunctionRec)
1060 {
1061 int rc;
1062
1063 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1064 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1065 else
1066 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1067
1068 if (RT_FAILURE(rc))
1069 {
1070 uint8_t *pPatchHC;
1071 RTRCPTR pPatchGC;
1072 RTRCPTR pOrgInstrGC;
1073
1074 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1075 Assert(pOrgInstrGC);
1076
1077 /* Failure for some reason -> mark exit point with int 3. */
1078 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1079
1080 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1081 Assert(pPatchGC);
1082
1083 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1084
1085 /* Set a breakpoint at the very beginning of the recompiled instruction */
1086 *pPatchHC = 0xCC;
1087
1088 continue;
1089 }
1090 }
1091 else
1092 {
1093 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1094 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1095 }
1096
1097 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1098 }
1099 else
1100 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1101
1102 if (pBranchTargetGC == 0)
1103 {
1104 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1105 return VERR_PATCHING_REFUSED;
1106 }
1107 /* Our jumps *always* have a dword displacement (to make things easier). */
1108 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1109 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1110 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1111 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1112 }
1113 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1114 Assert(pPatch->JumpTree == 0);
1115 return VINF_SUCCESS;
1116}
1117
1118/* Add an illegal instruction record
1119 *
1120 * @param pVM The VM to operate on.
1121 * @param pPatch Patch structure ptr
1122 * @param pInstrGC Guest context pointer to privileged instruction
1123 *
1124 */
1125static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1126{
1127 PAVLPVNODECORE pRec;
1128
1129 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1130 Assert(pRec);
1131 pRec->Key = (AVLPVKEY)pInstrGC;
1132
1133 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1134 Assert(ret); NOREF(ret);
1135 pPatch->pTempInfo->nrIllegalInstr++;
1136}
1137
1138static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1139{
1140 PAVLPVNODECORE pRec;
1141
1142 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)pInstrGC);
1143 if (pRec)
1144 return true;
1145 else
1146 return false;
1147}
1148
1149/**
1150 * Add a patch to guest lookup record
1151 *
1152 * @param pVM The VM to operate on.
1153 * @param pPatch Patch structure ptr
1154 * @param pPatchInstrHC Guest context pointer to patch block
1155 * @param pInstrGC Guest context pointer to privileged instruction
1156 * @param enmType Lookup type
1157 * @param fDirty Dirty flag
1158 *
1159 */
1160 /** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
1161void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1162{
1163 bool ret;
1164 PRECPATCHTOGUEST pPatchToGuestRec;
1165 PRECGUESTTOPATCH pGuestToPatchRec;
1166 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1167
1168 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1169 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1170
1171 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1172 {
1173 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1174 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1175 return; /* already there */
1176
1177 Assert(!pPatchToGuestRec);
1178 }
1179#ifdef VBOX_STRICT
1180 else
1181 {
1182 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1183 Assert(!pPatchToGuestRec);
1184 }
1185#endif
1186
1187 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1188 Assert(pPatchToGuestRec);
1189 pPatchToGuestRec->Core.Key = PatchOffset;
1190 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1191 pPatchToGuestRec->enmType = enmType;
1192 pPatchToGuestRec->fDirty = fDirty;
1193
1194 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1195 Assert(ret);
1196
1197 /* GC to patch address */
1198 if (enmType == PATM_LOOKUP_BOTHDIR)
1199 {
1200 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1201 if (!pGuestToPatchRec)
1202 {
1203 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1204 pGuestToPatchRec->Core.Key = pInstrGC;
1205 pGuestToPatchRec->PatchOffset = PatchOffset;
1206
1207 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1208 Assert(ret);
1209 }
1210 }
1211
1212 pPatch->nrPatch2GuestRecs++;
1213}
1214
1215
1216/**
1217 * Removes a patch to guest lookup record
1218 *
1219 * @param pVM The VM to operate on.
1220 * @param pPatch Patch structure ptr
1221 * @param pPatchInstrGC Guest context pointer to patch block
1222 */
1223void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1224{
1225 PAVLU32NODECORE pNode;
1226 PAVLU32NODECORE pNode2;
1227 PRECPATCHTOGUEST pPatchToGuestRec;
1228 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1229
1230 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1231 Assert(pPatchToGuestRec);
1232 if (pPatchToGuestRec)
1233 {
1234 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1235 {
1236 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1237
1238 Assert(pGuestToPatchRec->Core.Key);
1239 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1240 Assert(pNode2);
1241 }
1242 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1243 Assert(pNode);
1244
1245 MMR3HeapFree(pPatchToGuestRec);
1246 pPatch->nrPatch2GuestRecs--;
1247 }
1248}
1249
1250
1251/**
1252 * RTAvlPVDestroy callback.
1253 */
1254static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1255{
1256 MMR3HeapFree(pNode);
1257 return 0;
1258}
1259
1260/**
1261 * Empty the specified tree (PV tree, MMR3 heap)
1262 *
1263 * @param pVM The VM to operate on.
1264 * @param ppTree Tree to empty
1265 */
1266void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1267{
1268 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1269}
1270
1271
1272/**
1273 * RTAvlU32Destroy callback.
1274 */
1275static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1276{
1277 MMR3HeapFree(pNode);
1278 return 0;
1279}
1280
1281/**
1282 * Empty the specified tree (U32 tree, MMR3 heap)
1283 *
1284 * @param pVM The VM to operate on.
1285 * @param ppTree Tree to empty
1286 */
1287void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1288{
1289 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1290}
1291
1292
1293/**
1294 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1295 *
1296 * @returns VBox status code.
1297 * @param pVM The VM to operate on.
1298 * @param pCpu CPU disassembly state
1299 * @param pInstrGC Guest context pointer to privileged instruction
1300 * @param pCurInstrGC Guest context pointer to the current instruction
1301 * @param pCacheRec Cache record ptr
1302 *
1303 */
1304static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1305{
1306 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1307 bool fIllegalInstr = false;
1308
1309 //Preliminary heuristics:
1310 //- no call instructions without a fixed displacement between cli and sti/popf
1311 //- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1312 //- no nested pushf/cli
1313 //- sti/popf should be the (eventual) target of all branches
1314 //- no near or far returns; no int xx, no into
1315 //
1316 // Note: Later on we can impose less stricter guidelines if the need arises
1317
1318 /* Bail out if the patch gets too big. */
1319 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1320 {
1321 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1322 fIllegalInstr = true;
1323 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1324 }
1325 else
1326 {
1327 /* No unconditional jumps or calls without fixed displacements. */
1328 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1329 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1330 )
1331 {
1332 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1333 if ( pCpu->param1.size == 6 /* far call/jmp */
1334 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1335 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1336 )
1337 {
1338 fIllegalInstr = true;
1339 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1340 }
1341 }
1342
1343 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1344 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->opcode == OP_JMP)
1345 {
1346 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC + pCpu->opsize < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1347 {
1348 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1349 /* We turn this one into a int 3 callable patch. */
1350 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1351 }
1352 }
1353 else
1354 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1355 if (pPatch->opcode == OP_PUSHF)
1356 {
1357 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->opcode == OP_PUSHF)
1358 {
1359 fIllegalInstr = true;
1360 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1361 }
1362 }
1363
1364 // no far returns
1365 if (pCpu->pCurInstr->opcode == OP_RETF)
1366 {
1367 pPatch->pTempInfo->nrRetInstr++;
1368 fIllegalInstr = true;
1369 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1370 }
1371 else
1372 // no int xx or into either
1373 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1374 {
1375 fIllegalInstr = true;
1376 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1377 }
1378 }
1379
1380 pPatch->cbPatchBlockSize += pCpu->opsize;
1381
1382 /* Illegal instruction -> end of analysis phase for this code block */
1383 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1384 return VINF_SUCCESS;
1385
1386 /* Check for exit points. */
1387 switch (pCpu->pCurInstr->opcode)
1388 {
1389 case OP_SYSEXIT:
1390 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1391
1392 case OP_SYSENTER:
1393 case OP_ILLUD2:
1394 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1395 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1396 return VINF_SUCCESS;
1397
1398 case OP_STI:
1399 case OP_POPF:
1400 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1401 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1402 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1403 {
1404 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1405 return VERR_PATCHING_REFUSED;
1406 }
1407 if (pPatch->opcode == OP_PUSHF)
1408 {
1409 if (pCpu->pCurInstr->opcode == OP_POPF)
1410 {
1411 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1412 return VINF_SUCCESS;
1413
1414 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1415 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1416 pPatch->flags |= PATMFL_CHECK_SIZE;
1417 }
1418 break; //sti doesn't mark the end of a pushf block; only popf does
1419 }
1420 //else no break
1421 case OP_RETN: /* exit point for function replacement */
1422 return VINF_SUCCESS;
1423
1424 case OP_IRET:
1425 return VINF_SUCCESS; /* exitpoint */
1426
1427 case OP_CPUID:
1428 case OP_CALL:
1429 case OP_JMP:
1430 break;
1431
1432 default:
1433 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1434 {
1435 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1436 return VINF_SUCCESS; /* exit point */
1437 }
1438 break;
1439 }
1440
1441 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1442 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW))
1443 {
1444 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1445 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->opsize));
1446 return VINF_SUCCESS;
1447 }
1448
1449 return VWRN_CONTINUE_ANALYSIS;
1450}
1451
1452/**
1453 * Analyses the instructions inside a function for compliance
1454 *
1455 * @returns VBox status code.
1456 * @param pVM The VM to operate on.
1457 * @param pCpu CPU disassembly state
1458 * @param pInstrGC Guest context pointer to privileged instruction
1459 * @param pCurInstrGC Guest context pointer to the current instruction
1460 * @param pCacheRec Cache record ptr
1461 *
1462 */
1463static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1464{
1465 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1466 bool fIllegalInstr = false;
1467
1468 //Preliminary heuristics:
1469 //- no call instructions
1470 //- ret ends a block
1471
1472 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1473
1474 // bail out if the patch gets too big
1475 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1476 {
1477 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1478 fIllegalInstr = true;
1479 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1480 }
1481 else
1482 {
1483 // no unconditional jumps or calls without fixed displacements
1484 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1485 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1486 )
1487 {
1488 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1489 if ( pCpu->param1.size == 6 /* far call/jmp */
1490 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1491 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1492 )
1493 {
1494 fIllegalInstr = true;
1495 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1496 }
1497 }
1498 else /* no far returns */
1499 if (pCpu->pCurInstr->opcode == OP_RETF)
1500 {
1501 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1502 fIllegalInstr = true;
1503 }
1504 else /* no int xx or into either */
1505 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1506 {
1507 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1508 fIllegalInstr = true;
1509 }
1510
1511 #if 0
1512 ///@todo we can handle certain in/out and privileged instructions in the guest context
1513 if (pCpu->pCurInstr->optype & OPTYPE_PRIVILEGED && pCpu->pCurInstr->opcode != OP_STI)
1514 {
1515 Log(("Illegal instructions for function patch!!\n"));
1516 return VERR_PATCHING_REFUSED;
1517 }
1518 #endif
1519 }
1520
1521 pPatch->cbPatchBlockSize += pCpu->opsize;
1522
1523 /* Illegal instruction -> end of analysis phase for this code block */
1524 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1525 {
1526 return VINF_SUCCESS;
1527 }
1528
1529 // Check for exit points
1530 switch (pCpu->pCurInstr->opcode)
1531 {
1532 case OP_ILLUD2:
1533 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1534 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1535 return VINF_SUCCESS;
1536
1537 case OP_IRET:
1538 case OP_SYSEXIT: /* will fault or emulated in GC */
1539 case OP_RETN:
1540 return VINF_SUCCESS;
1541
1542 case OP_POPF:
1543 case OP_STI:
1544 return VWRN_CONTINUE_ANALYSIS;
1545 default:
1546 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1547 {
1548 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1549 return VINF_SUCCESS; /* exit point */
1550 }
1551 return VWRN_CONTINUE_ANALYSIS;
1552 }
1553
1554 return VWRN_CONTINUE_ANALYSIS;
1555}
1556
1557/**
1558 * Recompiles the instructions in a code block
1559 *
1560 * @returns VBox status code.
1561 * @param pVM The VM to operate on.
1562 * @param pCpu CPU disassembly state
1563 * @param pInstrGC Guest context pointer to privileged instruction
1564 * @param pCurInstrGC Guest context pointer to the current instruction
1565 * @param pCacheRec Cache record ptr
1566 *
1567 */
1568static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1569{
1570 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1571 int rc = VINF_SUCCESS;
1572 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1573
1574 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1575
1576 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1577 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1578 {
1579 /*
1580 * Been there, done that; so insert a jump (we don't want to duplicate code)
1581 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1582 */
1583 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1584 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->prefix & PREFIX_OPSIZE));
1585 }
1586
1587 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1588 {
1589 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1590 }
1591 else
1592 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1593
1594 if (RT_FAILURE(rc))
1595 return rc;
1596
1597 /* Note: Never do a direct return unless a failure is encountered! */
1598
1599 /* Clear recompilation of next instruction flag; we are doing that right here. */
1600 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1601 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1602
1603 /* Add lookup record for patch to guest address translation */
1604 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1605
1606 /* Update lowest and highest instruction address for this patch */
1607 if (pCurInstrGC < pPatch->pInstrGCLowest)
1608 pPatch->pInstrGCLowest = pCurInstrGC;
1609 else
1610 if (pCurInstrGC > pPatch->pInstrGCHighest)
1611 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->opsize;
1612
1613 /* Illegal instruction -> end of recompile phase for this code block. */
1614 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1615 {
1616 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1617 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1618 goto end;
1619 }
1620
1621 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1622 * Indirect calls are handled below.
1623 */
1624 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1625 && (pCpu->pCurInstr->opcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1626 && (OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J))
1627 {
1628 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1629 if (pTargetGC == 0)
1630 {
1631 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
1632 return VERR_PATCHING_REFUSED;
1633 }
1634
1635 if (pCpu->pCurInstr->opcode == OP_CALL)
1636 {
1637 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1638 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1639 if (RT_FAILURE(rc))
1640 goto end;
1641 }
1642 else
1643 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->opcode, !!(pCpu->prefix & PREFIX_OPSIZE));
1644
1645 if (RT_SUCCESS(rc))
1646 rc = VWRN_CONTINUE_RECOMPILE;
1647
1648 goto end;
1649 }
1650
1651 switch (pCpu->pCurInstr->opcode)
1652 {
1653 case OP_CLI:
1654 {
1655 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1656 * until we've found the proper exit point(s).
1657 */
1658 if ( pCurInstrGC != pInstrGC
1659 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1660 )
1661 {
1662 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1663 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1664 }
1665 /* Set by irq inhibition; no longer valid now. */
1666 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1667
1668 rc = patmPatchGenCli(pVM, pPatch);
1669 if (RT_SUCCESS(rc))
1670 rc = VWRN_CONTINUE_RECOMPILE;
1671 break;
1672 }
1673
1674 case OP_MOV:
1675 if (pCpu->pCurInstr->optype & OPTYPE_POTENTIALLY_DANGEROUS)
1676 {
1677 /* mov ss, src? */
1678 if ( (pCpu->param1.flags & USE_REG_SEG)
1679 && (pCpu->param1.base.reg_seg == DIS_SELREG_SS))
1680 {
1681 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1682 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1683 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1684 }
1685#if 0 /* necessary for Haiku */
1686 else
1687 if ( (pCpu->param2.flags & USE_REG_SEG)
1688 && (pCpu->param2.base.reg_seg == USE_REG_SS)
1689 && (pCpu->param1.flags & (USE_REG_GEN32|USE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1690 {
1691 /* mov GPR, ss */
1692 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1693 if (RT_SUCCESS(rc))
1694 rc = VWRN_CONTINUE_RECOMPILE;
1695 break;
1696 }
1697#endif
1698 }
1699 goto duplicate_instr;
1700
1701 case OP_POP:
1702 if (pCpu->pCurInstr->param1 == OP_PARM_REG_SS)
1703 {
1704 Assert(pCpu->pCurInstr->optype & OPTYPE_INHIBIT_IRQS);
1705
1706 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1707 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1708 }
1709 goto duplicate_instr;
1710
1711 case OP_STI:
1712 {
1713 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1714
1715 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1716 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1717 {
1718 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1719 fInhibitIRQInstr = true;
1720 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1721 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1722 }
1723 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1724
1725 if (RT_SUCCESS(rc))
1726 {
1727 DISCPUSTATE cpu = *pCpu;
1728 unsigned opsize;
1729 int disret;
1730 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1731
1732 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1733
1734 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1735 { /* Force pNextInstrHC out of scope after using it */
1736 uint8_t *pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1737 if (pNextInstrHC == NULL)
1738 {
1739 AssertFailed();
1740 return VERR_PATCHING_REFUSED;
1741 }
1742
1743 // Disassemble the next instruction
1744 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pNextInstrGC, pNextInstrHC, &opsize, NULL);
1745 }
1746 if (disret == false)
1747 {
1748 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1749 return VERR_PATCHING_REFUSED;
1750 }
1751 pReturnInstrGC = pNextInstrGC + opsize;
1752
1753 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1754 || pReturnInstrGC <= pInstrGC
1755 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1756 )
1757 {
1758 /* Not an exit point for function duplication patches */
1759 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1760 && RT_SUCCESS(rc))
1761 {
1762 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1763 rc = VWRN_CONTINUE_RECOMPILE;
1764 }
1765 else
1766 rc = VINF_SUCCESS; //exit point
1767 }
1768 else {
1769 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1770 rc = VERR_PATCHING_REFUSED; //not allowed!!
1771 }
1772 }
1773 break;
1774 }
1775
1776 case OP_POPF:
1777 {
1778 bool fGenerateJmpBack = (pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32);
1779
1780 /* Not an exit point for IDT handler or function replacement patches */
1781 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1782 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1783 fGenerateJmpBack = false;
1784
1785 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->opsize, !!(pCpu->prefix & PREFIX_OPSIZE), fGenerateJmpBack);
1786 if (RT_SUCCESS(rc))
1787 {
1788 if (fGenerateJmpBack == false)
1789 {
1790 /* Not an exit point for IDT handler or function replacement patches */
1791 rc = VWRN_CONTINUE_RECOMPILE;
1792 }
1793 else
1794 {
1795 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1796 rc = VINF_SUCCESS; /* exit point! */
1797 }
1798 }
1799 break;
1800 }
1801
1802 case OP_PUSHF:
1803 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->prefix & PREFIX_OPSIZE));
1804 if (RT_SUCCESS(rc))
1805 rc = VWRN_CONTINUE_RECOMPILE;
1806 break;
1807
1808 case OP_PUSH:
1809 if (pCpu->pCurInstr->param1 == OP_PARM_REG_CS)
1810 {
1811 rc = patmPatchGenPushCS(pVM, pPatch);
1812 if (RT_SUCCESS(rc))
1813 rc = VWRN_CONTINUE_RECOMPILE;
1814 break;
1815 }
1816 goto duplicate_instr;
1817
1818 case OP_IRET:
1819 Log(("IRET at %RRv\n", pCurInstrGC));
1820 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->prefix & PREFIX_OPSIZE));
1821 if (RT_SUCCESS(rc))
1822 {
1823 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1824 rc = VINF_SUCCESS; /* exit point by definition */
1825 }
1826 break;
1827
1828 case OP_ILLUD2:
1829 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1830 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1831 if (RT_SUCCESS(rc))
1832 rc = VINF_SUCCESS; /* exit point by definition */
1833 Log(("Illegal opcode (0xf 0xb)\n"));
1834 break;
1835
1836 case OP_CPUID:
1837 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1838 if (RT_SUCCESS(rc))
1839 rc = VWRN_CONTINUE_RECOMPILE;
1840 break;
1841
1842 case OP_STR:
1843 case OP_SLDT:
1844 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1845 if (RT_SUCCESS(rc))
1846 rc = VWRN_CONTINUE_RECOMPILE;
1847 break;
1848
1849 case OP_SGDT:
1850 case OP_SIDT:
1851 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1852 if (RT_SUCCESS(rc))
1853 rc = VWRN_CONTINUE_RECOMPILE;
1854 break;
1855
1856 case OP_RETN:
1857 /* retn is an exit point for function patches */
1858 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
1859 if (RT_SUCCESS(rc))
1860 rc = VINF_SUCCESS; /* exit point by definition */
1861 break;
1862
1863 case OP_SYSEXIT:
1864 /* Duplicate it, so it can be emulated in GC (or fault). */
1865 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1866 if (RT_SUCCESS(rc))
1867 rc = VINF_SUCCESS; /* exit point by definition */
1868 break;
1869
1870 case OP_CALL:
1871 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1872 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1873 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1874 */
1875 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1876 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far calls! */)
1877 {
1878 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
1879 if (RT_SUCCESS(rc))
1880 {
1881 rc = VWRN_CONTINUE_RECOMPILE;
1882 }
1883 break;
1884 }
1885 goto gen_illegal_instr;
1886
1887 case OP_JMP:
1888 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1889 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1890 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1891 */
1892 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1893 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far jumps! */)
1894 {
1895 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
1896 if (RT_SUCCESS(rc))
1897 rc = VINF_SUCCESS; /* end of branch */
1898 break;
1899 }
1900 goto gen_illegal_instr;
1901
1902 case OP_INT3:
1903 case OP_INT:
1904 case OP_INTO:
1905 goto gen_illegal_instr;
1906
1907 case OP_MOV_DR:
1908 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1909 if (pCpu->pCurInstr->param2 == OP_PARM_Dd)
1910 {
1911 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
1912 if (RT_SUCCESS(rc))
1913 rc = VWRN_CONTINUE_RECOMPILE;
1914 break;
1915 }
1916 goto duplicate_instr;
1917
1918 case OP_MOV_CR:
1919 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1920 if (pCpu->pCurInstr->param2 == OP_PARM_Cd)
1921 {
1922 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
1923 if (RT_SUCCESS(rc))
1924 rc = VWRN_CONTINUE_RECOMPILE;
1925 break;
1926 }
1927 goto duplicate_instr;
1928
1929 default:
1930 if (pCpu->pCurInstr->optype & (OPTYPE_CONTROLFLOW | OPTYPE_PRIVILEGED_NOTRAP))
1931 {
1932gen_illegal_instr:
1933 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1934 if (RT_SUCCESS(rc))
1935 rc = VINF_SUCCESS; /* exit point by definition */
1936 }
1937 else
1938 {
1939duplicate_instr:
1940 Log(("patmPatchGenDuplicate\n"));
1941 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1942 if (RT_SUCCESS(rc))
1943 rc = VWRN_CONTINUE_RECOMPILE;
1944 }
1945 break;
1946 }
1947
1948end:
1949
1950 if ( !fInhibitIRQInstr
1951 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
1952 {
1953 int rc2;
1954 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1955
1956 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
1957 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
1958 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
1959 {
1960 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
1961
1962 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
1963 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1964 rc = VINF_SUCCESS; /* end of the line */
1965 }
1966 else
1967 {
1968 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
1969 }
1970 if (RT_FAILURE(rc2))
1971 rc = rc2;
1972 }
1973
1974 if (RT_SUCCESS(rc))
1975 {
1976 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1977 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
1978 && pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32
1979 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
1980 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
1981 )
1982 {
1983 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1984
1985 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1986 Log(("patmRecompileCallback: end found for single instruction patch at %RRv opsize %d\n", pNextInstrGC, pCpu->opsize));
1987
1988 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
1989 AssertRC(rc);
1990 }
1991 }
1992 return rc;
1993}
1994
1995
1996#ifdef LOG_ENABLED
1997
1998/* Add a disasm jump record (temporary for prevent duplicate analysis)
1999 *
2000 * @param pVM The VM to operate on.
2001 * @param pPatch Patch structure ptr
2002 * @param pInstrGC Guest context pointer to privileged instruction
2003 *
2004 */
2005static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2006{
2007 PAVLPVNODECORE pRec;
2008
2009 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2010 Assert(pRec);
2011 pRec->Key = (AVLPVKEY)pInstrGC;
2012
2013 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2014 Assert(ret);
2015}
2016
2017/**
2018 * Checks if jump target has been analysed before.
2019 *
2020 * @returns VBox status code.
2021 * @param pPatch Patch struct
2022 * @param pInstrGC Jump target
2023 *
2024 */
2025static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2026{
2027 PAVLPVNODECORE pRec;
2028
2029 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)pInstrGC);
2030 if (pRec)
2031 return true;
2032 return false;
2033}
2034
2035/**
2036 * For proper disassembly of the final patch block
2037 *
2038 * @returns VBox status code.
2039 * @param pVM The VM to operate on.
2040 * @param pCpu CPU disassembly state
2041 * @param pInstrGC Guest context pointer to privileged instruction
2042 * @param pCurInstrGC Guest context pointer to the current instruction
2043 * @param pCacheRec Cache record ptr
2044 *
2045 */
2046int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2047{
2048 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2049
2050 if (pCpu->pCurInstr->opcode == OP_INT3)
2051 {
2052 /* Could be an int3 inserted in a call patch. Check to be sure */
2053 DISCPUSTATE cpu;
2054 RTRCPTR pOrgJumpGC;
2055 uint32_t dummy;
2056
2057 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2058 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2059
2060 { /* Force pOrgJumpHC out of scope after using it */
2061 uint8_t *pOrgJumpHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2062
2063 bool disret = PATMR3DISInstr(pVM, pPatch, &cpu, pOrgJumpGC, pOrgJumpHC, &dummy, NULL);
2064 if (!disret || cpu.pCurInstr->opcode != OP_CALL || cpu.param1.size != 4 /* only near calls */)
2065 return VINF_SUCCESS;
2066 }
2067 return VWRN_CONTINUE_ANALYSIS;
2068 }
2069
2070 if ( pCpu->pCurInstr->opcode == OP_ILLUD2
2071 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2072 {
2073 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2074 return VWRN_CONTINUE_ANALYSIS;
2075 }
2076
2077 if ( (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2078 || pCpu->pCurInstr->opcode == OP_INT
2079 || pCpu->pCurInstr->opcode == OP_IRET
2080 || pCpu->pCurInstr->opcode == OP_RETN
2081 || pCpu->pCurInstr->opcode == OP_RETF
2082 )
2083 {
2084 return VINF_SUCCESS;
2085 }
2086
2087 if (pCpu->pCurInstr->opcode == OP_ILLUD2)
2088 return VINF_SUCCESS;
2089
2090 return VWRN_CONTINUE_ANALYSIS;
2091}
2092
2093
2094/**
2095 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2096 *
2097 * @returns VBox status code.
2098 * @param pVM The VM to operate on.
2099 * @param pInstrGC Guest context pointer to the initial privileged instruction
2100 * @param pCurInstrGC Guest context pointer to the current instruction
2101 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2102 * @param pCacheRec Cache record ptr
2103 *
2104 */
2105int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2106{
2107 DISCPUSTATE cpu;
2108 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2109 int rc = VWRN_CONTINUE_ANALYSIS;
2110 uint32_t opsize, delta;
2111 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2112 bool disret;
2113 char szOutput[256];
2114
2115 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2116
2117 /* We need this to determine branch targets (and for disassembling). */
2118 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2119
2120 while(rc == VWRN_CONTINUE_ANALYSIS)
2121 {
2122 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2123
2124 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2125 if (pCurInstrHC == NULL)
2126 {
2127 rc = VERR_PATCHING_REFUSED;
2128 goto end;
2129 }
2130
2131 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2132 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2133 {
2134 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2135
2136 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2137 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2138 else
2139 Log(("DIS %s", szOutput));
2140
2141 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2142 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2143 {
2144 rc = VINF_SUCCESS;
2145 goto end;
2146 }
2147 }
2148 else
2149 Log(("DIS: %s", szOutput));
2150
2151 if (disret == false)
2152 {
2153 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2154 rc = VINF_SUCCESS;
2155 goto end;
2156 }
2157
2158 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2159 if (rc != VWRN_CONTINUE_ANALYSIS) {
2160 break; //done!
2161 }
2162
2163 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2164 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2165 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2166 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2167 )
2168 {
2169 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2170 RTRCPTR pOrgTargetGC;
2171
2172 if (pTargetGC == 0)
2173 {
2174 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2175 rc = VERR_PATCHING_REFUSED;
2176 break;
2177 }
2178
2179 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2180 {
2181 //jump back to guest code
2182 rc = VINF_SUCCESS;
2183 goto end;
2184 }
2185 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2186
2187 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2188 {
2189 rc = VINF_SUCCESS;
2190 goto end;
2191 }
2192
2193 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2194 {
2195 /* New jump, let's check it. */
2196 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2197
2198 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2199 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2200 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2201
2202 if (rc != VINF_SUCCESS) {
2203 break; //done!
2204 }
2205 }
2206 if (cpu.pCurInstr->opcode == OP_JMP)
2207 {
2208 /* Unconditional jump; return to caller. */
2209 rc = VINF_SUCCESS;
2210 goto end;
2211 }
2212
2213 rc = VWRN_CONTINUE_ANALYSIS;
2214 }
2215 pCurInstrGC += opsize;
2216 }
2217end:
2218 return rc;
2219}
2220
2221/**
2222 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2223 *
2224 * @returns VBox status code.
2225 * @param pVM The VM to operate on.
2226 * @param pInstrGC Guest context pointer to the initial privileged instruction
2227 * @param pCurInstrGC Guest context pointer to the current instruction
2228 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2229 * @param pCacheRec Cache record ptr
2230 *
2231 */
2232int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2233{
2234 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2235
2236 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2237 /* Free all disasm jump records. */
2238 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2239 return rc;
2240}
2241
2242#endif /* LOG_ENABLED */
2243
2244/**
2245 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2246 * If so, this patch is permanently disabled.
2247 *
2248 * @param pVM The VM to operate on.
2249 * @param pInstrGC Guest context pointer to instruction
2250 * @param pConflictGC Guest context pointer to check
2251 *
2252 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2253 *
2254 */
2255VMMR3DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2256{
2257 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2258 if (pTargetPatch)
2259 {
2260 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2261 }
2262 return VERR_PATCH_NO_CONFLICT;
2263}
2264
2265/**
2266 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2267 *
2268 * @returns VBox status code.
2269 * @param pVM The VM to operate on.
2270 * @param pInstrGC Guest context pointer to privileged instruction
2271 * @param pCurInstrGC Guest context pointer to the current instruction
2272 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2273 * @param pCacheRec Cache record ptr
2274 *
2275 */
2276static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2277{
2278 DISCPUSTATE cpu;
2279 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2280 int rc = VWRN_CONTINUE_ANALYSIS;
2281 uint32_t opsize;
2282 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2283 bool disret;
2284#ifdef LOG_ENABLED
2285 char szOutput[256];
2286#endif
2287
2288 while (rc == VWRN_CONTINUE_RECOMPILE)
2289 {
2290 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2291
2292 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2293 if (pCurInstrHC == NULL)
2294 {
2295 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2296 goto end;
2297 }
2298#ifdef LOG_ENABLED
2299 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput);
2300 Log(("Recompile: %s", szOutput));
2301#else
2302 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2303#endif
2304 if (disret == false)
2305 {
2306 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2307
2308 /* Add lookup record for patch to guest address translation */
2309 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2310 patmPatchGenIllegalInstr(pVM, pPatch);
2311 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2312 goto end;
2313 }
2314
2315 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2316 if (rc != VWRN_CONTINUE_RECOMPILE)
2317 {
2318 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2319 if ( rc == VINF_SUCCESS
2320 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2321 {
2322 DISCPUSTATE cpunext;
2323 uint32_t opsizenext;
2324 uint8_t *pNextInstrHC;
2325 RTRCPTR pNextInstrGC = pCurInstrGC + opsize;
2326
2327 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2328
2329 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2330 * Recompile the next instruction as well
2331 */
2332 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2333 if (pNextInstrHC == NULL)
2334 {
2335 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2336 goto end;
2337 }
2338 cpunext.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2339 disret = PATMR3DISInstr(pVM, pPatch, &cpunext, pNextInstrGC, pNextInstrHC, &opsizenext, NULL);
2340 if (disret == false)
2341 {
2342 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2343 goto end;
2344 }
2345 switch(cpunext.pCurInstr->opcode)
2346 {
2347 case OP_IRET: /* inhibit cleared in generated code */
2348 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2349 case OP_HLT:
2350 break; /* recompile these */
2351
2352 default:
2353 if (cpunext.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2354 {
2355 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2356
2357 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2358 AssertRC(rc);
2359 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2360 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2361 }
2362 break;
2363 }
2364
2365 /* Note: after a cli we must continue to a proper exit point */
2366 if (cpunext.pCurInstr->opcode != OP_CLI)
2367 {
2368 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2369 if (RT_SUCCESS(rc))
2370 {
2371 rc = VINF_SUCCESS;
2372 goto end;
2373 }
2374 break;
2375 }
2376 else
2377 rc = VWRN_CONTINUE_RECOMPILE;
2378 }
2379 else
2380 break; /* done! */
2381 }
2382
2383 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2384
2385
2386 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2387 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2388 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2389 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2390 )
2391 {
2392 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2393 if (addr == 0)
2394 {
2395 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2396 rc = VERR_PATCHING_REFUSED;
2397 break;
2398 }
2399
2400 Log(("Jump encountered target %RRv\n", addr));
2401
2402 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2403 if (!(cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW))
2404 {
2405 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2406 /* First we need to finish this linear code stream until the next exit point. */
2407 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+opsize, pfnPATMR3Recompile, pCacheRec);
2408 if (RT_FAILURE(rc))
2409 {
2410 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2411 break; //fatal error
2412 }
2413 }
2414
2415 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2416 {
2417 /* New code; let's recompile it. */
2418 Log(("patmRecompileCodeStream continue with jump\n"));
2419
2420 /*
2421 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2422 * this patch so we can continue our analysis
2423 *
2424 * We rely on CSAM to detect and resolve conflicts
2425 */
2426 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, addr);
2427 if(pTargetPatch)
2428 {
2429 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2430 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2431 }
2432
2433 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2434 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2435 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2436
2437 if(pTargetPatch)
2438 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2439
2440 if (RT_FAILURE(rc))
2441 {
2442 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2443 break; //done!
2444 }
2445 }
2446 /* Always return to caller here; we're done! */
2447 rc = VINF_SUCCESS;
2448 goto end;
2449 }
2450 else
2451 if (cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW)
2452 {
2453 rc = VINF_SUCCESS;
2454 goto end;
2455 }
2456 pCurInstrGC += opsize;
2457 }
2458end:
2459 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2460 return rc;
2461}
2462
2463
2464/**
2465 * Generate the jump from guest to patch code
2466 *
2467 * @returns VBox status code.
2468 * @param pVM The VM to operate on.
2469 * @param pPatch Patch record
2470 * @param pCacheRec Guest translation lookup cache record
2471 */
2472static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2473{
2474 uint8_t temp[8];
2475 uint8_t *pPB;
2476 int rc;
2477
2478 Assert(pPatch->cbPatchJump <= sizeof(temp));
2479 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2480
2481 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2482 Assert(pPB);
2483
2484#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2485 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2486 {
2487 Assert(pPatch->pPatchJumpDestGC);
2488
2489 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2490 {
2491 // jmp [PatchCode]
2492 if (fAddFixup)
2493 {
2494 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2495 {
2496 Log(("Relocation failed for the jump in the guest code!!\n"));
2497 return VERR_PATCHING_REFUSED;
2498 }
2499 }
2500
2501 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2502 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2503 }
2504 else
2505 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2506 {
2507 // jmp [PatchCode]
2508 if (fAddFixup)
2509 {
2510 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2511 {
2512 Log(("Relocation failed for the jump in the guest code!!\n"));
2513 return VERR_PATCHING_REFUSED;
2514 }
2515 }
2516
2517 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2518 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2519 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2520 }
2521 else
2522 {
2523 Assert(0);
2524 return VERR_PATCHING_REFUSED;
2525 }
2526 }
2527 else
2528#endif
2529 {
2530 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2531
2532 // jmp [PatchCode]
2533 if (fAddFixup)
2534 {
2535 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2536 {
2537 Log(("Relocation failed for the jump in the guest code!!\n"));
2538 return VERR_PATCHING_REFUSED;
2539 }
2540 }
2541 temp[0] = 0xE9; //jmp
2542 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2543 }
2544 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2545 AssertRC(rc);
2546
2547 if (rc == VINF_SUCCESS)
2548 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2549
2550 return rc;
2551}
2552
2553/**
2554 * Remove the jump from guest to patch code
2555 *
2556 * @returns VBox status code.
2557 * @param pVM The VM to operate on.
2558 * @param pPatch Patch record
2559 */
2560static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2561{
2562#ifdef DEBUG
2563 DISCPUSTATE cpu;
2564 char szOutput[256];
2565 uint32_t opsize, i = 0;
2566 bool disret;
2567
2568 while (i < pPatch->cbPrivInstr)
2569 {
2570 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2571 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
2572 if (disret == false)
2573 break;
2574
2575 Log(("Org patch jump: %s", szOutput));
2576 Assert(opsize);
2577 i += opsize;
2578 }
2579#endif
2580
2581 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2582 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2583#ifdef DEBUG
2584 if (rc == VINF_SUCCESS)
2585 {
2586 i = 0;
2587 while(i < pPatch->cbPrivInstr)
2588 {
2589 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2590 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
2591 if (disret == false)
2592 break;
2593
2594 Log(("Org instr: %s", szOutput));
2595 Assert(opsize);
2596 i += opsize;
2597 }
2598 }
2599#endif
2600 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2601 return rc;
2602}
2603
2604/**
2605 * Generate the call from guest to patch code
2606 *
2607 * @returns VBox status code.
2608 * @param pVM The VM to operate on.
2609 * @param pPatch Patch record
2610 * @param pInstrHC HC address where to insert the jump
2611 * @param pCacheRec Guest translation cache record
2612 */
2613static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2614{
2615 uint8_t temp[8];
2616 uint8_t *pPB;
2617 int rc;
2618
2619 Assert(pPatch->cbPatchJump <= sizeof(temp));
2620
2621 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2622 Assert(pPB);
2623
2624 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2625
2626 // jmp [PatchCode]
2627 if (fAddFixup)
2628 {
2629 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2630 {
2631 Log(("Relocation failed for the jump in the guest code!!\n"));
2632 return VERR_PATCHING_REFUSED;
2633 }
2634 }
2635
2636 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2637 temp[0] = pPatch->aPrivInstr[0];
2638 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2639
2640 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2641 AssertRC(rc);
2642
2643 return rc;
2644}
2645
2646
2647/**
2648 * Patch cli/sti pushf/popf instruction block at specified location
2649 *
2650 * @returns VBox status code.
2651 * @param pVM The VM to operate on.
2652 * @param pInstrGC Guest context point to privileged instruction
2653 * @param pInstrHC Host context point to privileged instruction
2654 * @param uOpcode Instruction opcode
2655 * @param uOpSize Size of starting instruction
2656 * @param pPatchRec Patch record
2657 *
2658 * @note returns failure if patching is not allowed or possible
2659 *
2660 */
2661VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2662 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2663{
2664 PPATCHINFO pPatch = &pPatchRec->patch;
2665 int rc = VERR_PATCHING_REFUSED;
2666 DISCPUSTATE cpu;
2667 uint32_t orgOffsetPatchMem = ~0;
2668 RTRCPTR pInstrStart;
2669#ifdef LOG_ENABLED
2670 uint32_t opsize;
2671 char szOutput[256];
2672 bool disret;
2673#endif
2674
2675 /* Save original offset (in case of failures later on) */
2676 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2677 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2678
2679 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2680 switch (uOpcode)
2681 {
2682 case OP_MOV:
2683 break;
2684
2685 case OP_CLI:
2686 case OP_PUSHF:
2687 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2688 /* Note: special precautions are taken when disabling and enabling such patches. */
2689 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2690 break;
2691
2692 default:
2693 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2694 {
2695 AssertMsg(0, ("PATMR3PatchBlock: Invalid opcode %x\n", uOpcode));
2696 return VERR_INVALID_PARAMETER;
2697 }
2698 }
2699
2700 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2701 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2702
2703 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2704 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2705 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2706 )
2707 {
2708 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2709 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2710 rc = VERR_PATCHING_REFUSED;
2711 goto failure;
2712 }
2713
2714 pPatch->nrPatch2GuestRecs = 0;
2715 pInstrStart = pInstrGC;
2716
2717#ifdef PATM_ENABLE_CALL
2718 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2719#endif
2720
2721 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2722 pPatch->uCurPatchOffset = 0;
2723
2724 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2725
2726 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2727 {
2728 Assert(pPatch->flags & PATMFL_INTHANDLER);
2729
2730 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2731 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2732 if (RT_FAILURE(rc))
2733 goto failure;
2734 }
2735
2736 /***************************************************************************************************************************/
2737 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2738 /***************************************************************************************************************************/
2739#ifdef VBOX_WITH_STATISTICS
2740 if (!(pPatch->flags & PATMFL_SYSENTER))
2741 {
2742 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2743 if (RT_FAILURE(rc))
2744 goto failure;
2745 }
2746#endif
2747
2748 PATMP2GLOOKUPREC cacheRec;
2749 RT_ZERO(cacheRec);
2750 cacheRec.pPatch = pPatch;
2751
2752 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2753 /* Free leftover lock if any. */
2754 if (cacheRec.Lock.pvMap)
2755 {
2756 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2757 cacheRec.Lock.pvMap = NULL;
2758 }
2759 if (rc != VINF_SUCCESS)
2760 {
2761 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2762 goto failure;
2763 }
2764
2765 /* Calculated during analysis. */
2766 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2767 {
2768 /* Most likely cause: we encountered an illegal instruction very early on. */
2769 /** @todo could turn it into an int3 callable patch. */
2770 Log(("PATMR3PatchBlock: patch block too small -> refuse\n"));
2771 rc = VERR_PATCHING_REFUSED;
2772 goto failure;
2773 }
2774
2775 /* size of patch block */
2776 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2777
2778
2779 /* Update free pointer in patch memory. */
2780 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2781 /* Round to next 8 byte boundary. */
2782 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2783
2784 /*
2785 * Insert into patch to guest lookup tree
2786 */
2787 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2788 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2789 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2790 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2791 if (!rc)
2792 {
2793 rc = VERR_PATCHING_REFUSED;
2794 goto failure;
2795 }
2796
2797 /* Note that patmr3SetBranchTargets can install additional patches!! */
2798 rc = patmr3SetBranchTargets(pVM, pPatch);
2799 if (rc != VINF_SUCCESS)
2800 {
2801 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2802 goto failure;
2803 }
2804
2805#ifdef LOG_ENABLED
2806 Log(("Patch code ----------------------------------------------------------\n"));
2807 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2808 /* Free leftover lock if any. */
2809 if (cacheRec.Lock.pvMap)
2810 {
2811 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2812 cacheRec.Lock.pvMap = NULL;
2813 }
2814 Log(("Patch code ends -----------------------------------------------------\n"));
2815#endif
2816
2817 /* make a copy of the guest code bytes that will be overwritten */
2818 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2819
2820 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2821 AssertRC(rc);
2822
2823 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2824 {
2825 /*uint8_t ASMInt3 = 0xCC; - unused */
2826
2827 Log(("PATMR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2828 /* Replace first opcode byte with 'int 3'. */
2829 rc = patmActivateInt3Patch(pVM, pPatch);
2830 if (RT_FAILURE(rc))
2831 goto failure;
2832
2833 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2834 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2835
2836 pPatch->flags &= ~PATMFL_INSTR_HINT;
2837 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2838 }
2839 else
2840 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2841 {
2842 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2843 /* now insert a jump in the guest code */
2844 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2845 AssertRC(rc);
2846 if (RT_FAILURE(rc))
2847 goto failure;
2848
2849 }
2850
2851#ifdef LOG_ENABLED
2852 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2853 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
2854 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
2855#endif
2856
2857 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2858 pPatch->pTempInfo->nrIllegalInstr = 0;
2859
2860 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2861
2862 pPatch->uState = PATCH_ENABLED;
2863 return VINF_SUCCESS;
2864
2865failure:
2866 if (pPatchRec->CoreOffset.Key)
2867 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
2868
2869 patmEmptyTree(pVM, &pPatch->FixupTree);
2870 pPatch->nrFixups = 0;
2871
2872 patmEmptyTree(pVM, &pPatch->JumpTree);
2873 pPatch->nrJumpRecs = 0;
2874
2875 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2876 pPatch->pTempInfo->nrIllegalInstr = 0;
2877
2878 /* Turn this cli patch into a dummy. */
2879 pPatch->uState = PATCH_REFUSED;
2880 pPatch->pPatchBlockOffset = 0;
2881
2882 // Give back the patch memory we no longer need
2883 Assert(orgOffsetPatchMem != (uint32_t)~0);
2884 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2885
2886 return rc;
2887}
2888
2889/**
2890 * Patch IDT handler
2891 *
2892 * @returns VBox status code.
2893 * @param pVM The VM to operate on.
2894 * @param pInstrGC Guest context point to privileged instruction
2895 * @param uOpSize Size of starting instruction
2896 * @param pPatchRec Patch record
2897 * @param pCacheRec Cache record ptr
2898 *
2899 * @note returns failure if patching is not allowed or possible
2900 *
2901 */
2902static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
2903{
2904 PPATCHINFO pPatch = &pPatchRec->patch;
2905 bool disret;
2906 DISCPUSTATE cpuPush, cpuJmp;
2907 uint32_t opsize;
2908 RTRCPTR pCurInstrGC = pInstrGC;
2909 uint8_t *pCurInstrHC, *pInstrHC;
2910 uint32_t orgOffsetPatchMem = ~0;
2911
2912 pInstrHC = pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2913 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
2914
2915 /*
2916 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
2917 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
2918 * condition here and only patch the common entypoint once.
2919 */
2920 cpuPush.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2921 disret = PATMR3DISInstr(pVM, pPatch, &cpuPush, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2922 Assert(disret);
2923 if (disret && cpuPush.pCurInstr->opcode == OP_PUSH)
2924 {
2925 RTRCPTR pJmpInstrGC;
2926 int rc;
2927 pCurInstrGC += opsize;
2928
2929 cpuJmp.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2930 disret = PATMR3DISInstr(pVM, pPatch, &cpuJmp, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2931 if ( disret
2932 && cpuJmp.pCurInstr->opcode == OP_JMP
2933 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
2934 )
2935 {
2936 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2937 if (pJmpPatch == 0)
2938 {
2939 /* Patch it first! */
2940 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
2941 if (rc != VINF_SUCCESS)
2942 goto failure;
2943 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2944 Assert(pJmpPatch);
2945 }
2946 if (pJmpPatch->patch.uState != PATCH_ENABLED)
2947 goto failure;
2948
2949 /* save original offset (in case of failures later on) */
2950 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2951
2952 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2953 pPatch->uCurPatchOffset = 0;
2954 pPatch->nrPatch2GuestRecs = 0;
2955
2956#ifdef VBOX_WITH_STATISTICS
2957 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2958 if (RT_FAILURE(rc))
2959 goto failure;
2960#endif
2961
2962 /* Install fake cli patch (to clear the virtual IF) */
2963 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2964 if (RT_FAILURE(rc))
2965 goto failure;
2966
2967 /* Add lookup record for patch to guest address translation (for the push) */
2968 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
2969
2970 /* Duplicate push. */
2971 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
2972 if (RT_FAILURE(rc))
2973 goto failure;
2974
2975 /* Generate jump to common entrypoint. */
2976 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
2977 if (RT_FAILURE(rc))
2978 goto failure;
2979
2980 /* size of patch block */
2981 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2982
2983 /* Update free pointer in patch memory. */
2984 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2985 /* Round to next 8 byte boundary */
2986 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2987
2988 /* There's no jump from guest to patch code. */
2989 pPatch->cbPatchJump = 0;
2990
2991
2992#ifdef LOG_ENABLED
2993 Log(("Patch code ----------------------------------------------------------\n"));
2994 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
2995 Log(("Patch code ends -----------------------------------------------------\n"));
2996#endif
2997 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
2998
2999 /*
3000 * Insert into patch to guest lookup tree
3001 */
3002 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3003 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3004 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3005 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3006
3007 pPatch->uState = PATCH_ENABLED;
3008
3009 return VINF_SUCCESS;
3010 }
3011 }
3012failure:
3013 /* Give back the patch memory we no longer need */
3014 if (orgOffsetPatchMem != (uint32_t)~0)
3015 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3016
3017 return PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3018}
3019
3020/**
3021 * Install a trampoline to call a guest trap handler directly
3022 *
3023 * @returns VBox status code.
3024 * @param pVM The VM to operate on.
3025 * @param pInstrGC Guest context point to privileged instruction
3026 * @param pPatchRec Patch record
3027 * @param pCacheRec Cache record ptr
3028 *
3029 */
3030static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3031{
3032 PPATCHINFO pPatch = &pPatchRec->patch;
3033 int rc = VERR_PATCHING_REFUSED;
3034 uint32_t orgOffsetPatchMem = ~0;
3035#ifdef LOG_ENABLED
3036 bool disret;
3037 DISCPUSTATE cpu;
3038 uint32_t opsize;
3039 char szOutput[256];
3040#endif
3041
3042 // save original offset (in case of failures later on)
3043 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3044
3045 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3046 pPatch->uCurPatchOffset = 0;
3047 pPatch->nrPatch2GuestRecs = 0;
3048
3049#ifdef VBOX_WITH_STATISTICS
3050 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3051 if (RT_FAILURE(rc))
3052 goto failure;
3053#endif
3054
3055 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3056 if (RT_FAILURE(rc))
3057 goto failure;
3058
3059 /* size of patch block */
3060 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3061
3062 /* Update free pointer in patch memory. */
3063 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3064 /* Round to next 8 byte boundary */
3065 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3066
3067 /* There's no jump from guest to patch code. */
3068 pPatch->cbPatchJump = 0;
3069
3070#ifdef LOG_ENABLED
3071 Log(("Patch code ----------------------------------------------------------\n"));
3072 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3073 Log(("Patch code ends -----------------------------------------------------\n"));
3074#endif
3075
3076#ifdef LOG_ENABLED
3077 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3078 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3079 Log(("TRAP handler patch: %s", szOutput));
3080#endif
3081 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3082
3083 /*
3084 * Insert into patch to guest lookup tree
3085 */
3086 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3087 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3088 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3089 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3090
3091 pPatch->uState = PATCH_ENABLED;
3092 return VINF_SUCCESS;
3093
3094failure:
3095 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3096
3097 /* Turn this cli patch into a dummy. */
3098 pPatch->uState = PATCH_REFUSED;
3099 pPatch->pPatchBlockOffset = 0;
3100
3101 /* Give back the patch memory we no longer need */
3102 Assert(orgOffsetPatchMem != (uint32_t)~0);
3103 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3104
3105 return rc;
3106}
3107
3108
3109#ifdef LOG_ENABLED
3110/**
3111 * Check if the instruction is patched as a common idt handler
3112 *
3113 * @returns true or false
3114 * @param pVM The VM to operate on.
3115 * @param pInstrGC Guest context point to the instruction
3116 *
3117 */
3118static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3119{
3120 PPATMPATCHREC pRec;
3121
3122 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3123 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3124 return true;
3125 return false;
3126}
3127#endif //DEBUG
3128
3129
3130/**
3131 * Duplicates a complete function
3132 *
3133 * @returns VBox status code.
3134 * @param pVM The VM to operate on.
3135 * @param pInstrGC Guest context point to privileged instruction
3136 * @param pPatchRec Patch record
3137 * @param pCacheRec Cache record ptr
3138 *
3139 */
3140static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3141{
3142 PPATCHINFO pPatch = &pPatchRec->patch;
3143 int rc = VERR_PATCHING_REFUSED;
3144 DISCPUSTATE cpu;
3145 uint32_t orgOffsetPatchMem = ~0;
3146
3147 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3148 /* Save original offset (in case of failures later on). */
3149 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3150
3151 /* We will not go on indefinitely with call instruction handling. */
3152 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3153 {
3154 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3155 return VERR_PATCHING_REFUSED;
3156 }
3157
3158 pVM->patm.s.ulCallDepth++;
3159
3160#ifdef PATM_ENABLE_CALL
3161 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3162#endif
3163
3164 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3165
3166 pPatch->nrPatch2GuestRecs = 0;
3167 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3168 pPatch->uCurPatchOffset = 0;
3169
3170 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3171
3172 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3173 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3174 if (RT_FAILURE(rc))
3175 goto failure;
3176
3177#ifdef VBOX_WITH_STATISTICS
3178 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3179 if (RT_FAILURE(rc))
3180 goto failure;
3181#endif
3182
3183 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3184 if (rc != VINF_SUCCESS)
3185 {
3186 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3187 goto failure;
3188 }
3189
3190 //size of patch block
3191 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3192
3193 //update free pointer in patch memory
3194 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3195 /* Round to next 8 byte boundary. */
3196 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3197
3198 pPatch->uState = PATCH_ENABLED;
3199
3200 /*
3201 * Insert into patch to guest lookup tree
3202 */
3203 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3204 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3205 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3206 AssertMsg(rc, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3207 if (!rc)
3208 {
3209 rc = VERR_PATCHING_REFUSED;
3210 goto failure;
3211 }
3212
3213 /* Note that patmr3SetBranchTargets can install additional patches!! */
3214 rc = patmr3SetBranchTargets(pVM, pPatch);
3215 if (rc != VINF_SUCCESS)
3216 {
3217 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3218 goto failure;
3219 }
3220
3221#ifdef LOG_ENABLED
3222 Log(("Patch code ----------------------------------------------------------\n"));
3223 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3224 Log(("Patch code ends -----------------------------------------------------\n"));
3225#endif
3226
3227 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3228
3229 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3230 pPatch->pTempInfo->nrIllegalInstr = 0;
3231
3232 pVM->patm.s.ulCallDepth--;
3233 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3234 return VINF_SUCCESS;
3235
3236failure:
3237 if (pPatchRec->CoreOffset.Key)
3238 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3239
3240 patmEmptyTree(pVM, &pPatch->FixupTree);
3241 pPatch->nrFixups = 0;
3242
3243 patmEmptyTree(pVM, &pPatch->JumpTree);
3244 pPatch->nrJumpRecs = 0;
3245
3246 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3247 pPatch->pTempInfo->nrIllegalInstr = 0;
3248
3249 /* Turn this cli patch into a dummy. */
3250 pPatch->uState = PATCH_REFUSED;
3251 pPatch->pPatchBlockOffset = 0;
3252
3253 // Give back the patch memory we no longer need
3254 Assert(orgOffsetPatchMem != (uint32_t)~0);
3255 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3256
3257 pVM->patm.s.ulCallDepth--;
3258 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3259 return rc;
3260}
3261
3262/**
3263 * Creates trampoline code to jump inside an existing patch
3264 *
3265 * @returns VBox status code.
3266 * @param pVM The VM to operate on.
3267 * @param pInstrGC Guest context point to privileged instruction
3268 * @param pPatchRec Patch record
3269 *
3270 */
3271static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3272{
3273 PPATCHINFO pPatch = &pPatchRec->patch;
3274 RTRCPTR pPage, pPatchTargetGC = 0;
3275 uint32_t orgOffsetPatchMem = ~0;
3276 int rc = VERR_PATCHING_REFUSED;
3277 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3278 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3279 bool fInserted = false;
3280
3281 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3282 /* Save original offset (in case of failures later on). */
3283 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3284
3285 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3286 /** @todo we already checked this before */
3287 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3288
3289 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3290 if (pPatchPage)
3291 {
3292 uint32_t i;
3293
3294 for (i=0;i<pPatchPage->cCount;i++)
3295 {
3296 if (pPatchPage->aPatch[i])
3297 {
3298 pPatchToJmp = pPatchPage->aPatch[i];
3299
3300 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3301 && pPatchToJmp->uState == PATCH_ENABLED)
3302 {
3303 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3304 if (pPatchTargetGC)
3305 {
3306 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3307 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3308 Assert(pPatchToGuestRec);
3309
3310 pPatchToGuestRec->fJumpTarget = true;
3311 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3312 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3313 break;
3314 }
3315 }
3316 }
3317 }
3318 }
3319 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3320
3321 /*
3322 * Only record the trampoline patch if this is the first patch to the target
3323 * or we recorded other patches already.
3324 * The goal is to refuse refreshing function duplicates if the guest
3325 * modifies code after a saved state was loaded because it is not possible
3326 * to save the relation between trampoline and target without changing the
3327 * saved satte version.
3328 */
3329 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3330 || pPatchToJmp->pTrampolinePatchesHead)
3331 {
3332 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3333 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3334 if (!pTrampRec)
3335 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3336
3337 pTrampRec->pPatchTrampoline = pPatchRec;
3338 }
3339
3340 pPatch->nrPatch2GuestRecs = 0;
3341 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3342 pPatch->uCurPatchOffset = 0;
3343
3344 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3345 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3346 if (RT_FAILURE(rc))
3347 goto failure;
3348
3349#ifdef VBOX_WITH_STATISTICS
3350 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3351 if (RT_FAILURE(rc))
3352 goto failure;
3353#endif
3354
3355 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3356 if (RT_FAILURE(rc))
3357 goto failure;
3358
3359 /*
3360 * Insert into patch to guest lookup tree
3361 */
3362 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3363 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3364 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3365 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3366 if (!fInserted)
3367 {
3368 rc = VERR_PATCHING_REFUSED;
3369 goto failure;
3370 }
3371
3372 /* size of patch block */
3373 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3374
3375 /* Update free pointer in patch memory. */
3376 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3377 /* Round to next 8 byte boundary */
3378 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3379
3380 /* There's no jump from guest to patch code. */
3381 pPatch->cbPatchJump = 0;
3382
3383 /* Enable the patch. */
3384 pPatch->uState = PATCH_ENABLED;
3385 /* We allow this patch to be called as a function. */
3386 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3387
3388 if (pTrampRec)
3389 {
3390 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3391 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3392 }
3393 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3394 return VINF_SUCCESS;
3395
3396failure:
3397 if (pPatchRec->CoreOffset.Key)
3398 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3399
3400 patmEmptyTree(pVM, &pPatch->FixupTree);
3401 pPatch->nrFixups = 0;
3402
3403 patmEmptyTree(pVM, &pPatch->JumpTree);
3404 pPatch->nrJumpRecs = 0;
3405
3406 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3407 pPatch->pTempInfo->nrIllegalInstr = 0;
3408
3409 /* Turn this cli patch into a dummy. */
3410 pPatch->uState = PATCH_REFUSED;
3411 pPatch->pPatchBlockOffset = 0;
3412
3413 // Give back the patch memory we no longer need
3414 Assert(orgOffsetPatchMem != (uint32_t)~0);
3415 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3416
3417 if (pTrampRec)
3418 MMR3HeapFree(pTrampRec);
3419
3420 return rc;
3421}
3422
3423
3424/**
3425 * Patch branch target function for call/jump at specified location.
3426 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3427 *
3428 * @returns VBox status code.
3429 * @param pVM The VM to operate on.
3430 * @param pCtx Guest context
3431 *
3432 */
3433VMMR3DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3434{
3435 RTRCPTR pBranchTarget, pPage;
3436 int rc;
3437 RTRCPTR pPatchTargetGC = 0;
3438
3439 pBranchTarget = pCtx->edx;
3440 pBranchTarget = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3441
3442 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3443 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3444
3445 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3446 if (pPatchPage)
3447 {
3448 uint32_t i;
3449
3450 for (i=0;i<pPatchPage->cCount;i++)
3451 {
3452 if (pPatchPage->aPatch[i])
3453 {
3454 PPATCHINFO pPatch = pPatchPage->aPatch[i];
3455
3456 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3457 && pPatch->uState == PATCH_ENABLED)
3458 {
3459 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3460 if (pPatchTargetGC)
3461 {
3462 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3463 break;
3464 }
3465 }
3466 }
3467 }
3468 }
3469
3470 if (pPatchTargetGC)
3471 {
3472 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3473 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3474 }
3475 else
3476 {
3477 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3478 }
3479
3480 if (rc == VINF_SUCCESS)
3481 {
3482 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3483 Assert(pPatchTargetGC);
3484 }
3485
3486 if (pPatchTargetGC)
3487 {
3488 pCtx->eax = pPatchTargetGC;
3489 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3490 }
3491 else
3492 {
3493 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3494 pCtx->eax = 0;
3495 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3496 }
3497 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3498 rc = PATMAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3499 AssertRC(rc);
3500
3501 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3502 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3503 return VINF_SUCCESS;
3504}
3505
3506/**
3507 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3508 *
3509 * @returns VBox status code.
3510 * @param pVM The VM to operate on.
3511 * @param pCpu Disassembly CPU structure ptr
3512 * @param pInstrGC Guest context point to privileged instruction
3513 * @param pCacheRec Cache record ptr
3514 *
3515 */
3516static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3517{
3518 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3519 int rc = VERR_PATCHING_REFUSED;
3520 DISCPUSTATE cpu;
3521 RTRCPTR pTargetGC;
3522 PPATMPATCHREC pPatchFunction;
3523 uint32_t opsize;
3524 bool disret;
3525#ifdef LOG_ENABLED
3526 char szOutput[256];
3527#endif
3528
3529 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3530 Assert((pCpu->pCurInstr->opcode == OP_CALL || pCpu->pCurInstr->opcode == OP_JMP) && pCpu->opsize == SIZEOF_NEARJUMP32);
3531
3532 if ((pCpu->pCurInstr->opcode != OP_CALL && pCpu->pCurInstr->opcode != OP_JMP) || pCpu->opsize != SIZEOF_NEARJUMP32)
3533 {
3534 rc = VERR_PATCHING_REFUSED;
3535 goto failure;
3536 }
3537
3538 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3539 if (pTargetGC == 0)
3540 {
3541 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
3542 rc = VERR_PATCHING_REFUSED;
3543 goto failure;
3544 }
3545
3546 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3547 if (pPatchFunction == NULL)
3548 {
3549 for(;;)
3550 {
3551 /* It could be an indirect call (call -> jmp dest).
3552 * Note that it's dangerous to assume the jump will never change...
3553 */
3554 uint8_t *pTmpInstrHC;
3555
3556 pTmpInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3557 Assert(pTmpInstrHC);
3558 if (pTmpInstrHC == 0)
3559 break;
3560
3561 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3562 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pTargetGC, pTmpInstrHC, &opsize, NULL);
3563 if (disret == false || cpu.pCurInstr->opcode != OP_JMP)
3564 break;
3565
3566 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3567 if (pTargetGC == 0)
3568 {
3569 break;
3570 }
3571
3572 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3573 break;
3574 }
3575 if (pPatchFunction == 0)
3576 {
3577 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3578 rc = VERR_PATCHING_REFUSED;
3579 goto failure;
3580 }
3581 }
3582
3583 // make a copy of the guest code bytes that will be overwritten
3584 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3585
3586 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3587 AssertRC(rc);
3588
3589 /* Now replace the original call in the guest code */
3590 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3591 AssertRC(rc);
3592 if (RT_FAILURE(rc))
3593 goto failure;
3594
3595 /* Lowest and highest address for write monitoring. */
3596 pPatch->pInstrGCLowest = pInstrGC;
3597 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3598
3599#ifdef LOG_ENABLED
3600 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3601 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3602 Log(("Call patch: %s", szOutput));
3603#endif
3604
3605 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3606
3607 pPatch->uState = PATCH_ENABLED;
3608 return VINF_SUCCESS;
3609
3610failure:
3611 /* Turn this patch into a dummy. */
3612 pPatch->uState = PATCH_REFUSED;
3613
3614 return rc;
3615}
3616
3617/**
3618 * Replace the address in an MMIO instruction with the cached version.
3619 *
3620 * @returns VBox status code.
3621 * @param pVM The VM to operate on.
3622 * @param pInstrGC Guest context point to privileged instruction
3623 * @param pCpu Disassembly CPU structure ptr
3624 * @param pCacheRec Cache record ptr
3625 *
3626 * @note returns failure if patching is not allowed or possible
3627 *
3628 */
3629static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3630{
3631 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3632 uint8_t *pPB;
3633 int rc = VERR_PATCHING_REFUSED;
3634#ifdef LOG_ENABLED
3635 DISCPUSTATE cpu;
3636 uint32_t opsize;
3637 bool disret;
3638 char szOutput[256];
3639#endif
3640
3641 Assert(pVM->patm.s.mmio.pCachedData);
3642 if (!pVM->patm.s.mmio.pCachedData)
3643 goto failure;
3644
3645 if (pCpu->param2.flags != USE_DISPLACEMENT32)
3646 goto failure;
3647
3648 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3649 if (pPB == 0)
3650 goto failure;
3651
3652 /* Add relocation record for cached data access. */
3653 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3654 {
3655 Log(("Relocation failed for cached mmio address!!\n"));
3656 return VERR_PATCHING_REFUSED;
3657 }
3658#ifdef LOG_ENABLED
3659 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3660 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3661 Log(("MMIO patch old instruction: %s", szOutput));
3662#endif
3663
3664 /* Save original instruction. */
3665 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3666 AssertRC(rc);
3667
3668 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3669
3670 /* Replace address with that of the cached item. */
3671 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->opsize - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3672 AssertRC(rc);
3673 if (RT_FAILURE(rc))
3674 {
3675 goto failure;
3676 }
3677
3678#ifdef LOG_ENABLED
3679 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3680 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3681 Log(("MMIO patch: %s", szOutput));
3682#endif
3683 pVM->patm.s.mmio.pCachedData = 0;
3684 pVM->patm.s.mmio.GCPhys = 0;
3685 pPatch->uState = PATCH_ENABLED;
3686 return VINF_SUCCESS;
3687
3688failure:
3689 /* Turn this patch into a dummy. */
3690 pPatch->uState = PATCH_REFUSED;
3691
3692 return rc;
3693}
3694
3695
3696/**
3697 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3698 *
3699 * @returns VBox status code.
3700 * @param pVM The VM to operate on.
3701 * @param pInstrGC Guest context point to privileged instruction
3702 * @param pPatch Patch record
3703 *
3704 * @note returns failure if patching is not allowed or possible
3705 *
3706 */
3707static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3708{
3709 DISCPUSTATE cpu;
3710 uint32_t opsize;
3711 bool disret;
3712 uint8_t *pInstrHC;
3713#ifdef LOG_ENABLED
3714 char szOutput[256];
3715#endif
3716
3717 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3718
3719 /* Convert GC to HC address. */
3720 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3721 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3722
3723 /* Disassemble mmio instruction. */
3724 cpu.mode = pPatch->uOpMode;
3725 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
3726 if (disret == false)
3727 {
3728 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3729 return VERR_PATCHING_REFUSED;
3730 }
3731
3732 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
3733 if (opsize > MAX_INSTR_SIZE)
3734 return VERR_PATCHING_REFUSED;
3735 if (cpu.param2.flags != USE_DISPLACEMENT32)
3736 return VERR_PATCHING_REFUSED;
3737
3738 /* Add relocation record for cached data access. */
3739 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3740 {
3741 Log(("Relocation failed for cached mmio address!!\n"));
3742 return VERR_PATCHING_REFUSED;
3743 }
3744 /* Replace address with that of the cached item. */
3745 *(RTRCPTR *)&pInstrHC[cpu.opsize - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3746
3747 /* Lowest and highest address for write monitoring. */
3748 pPatch->pInstrGCLowest = pInstrGC;
3749 pPatch->pInstrGCHighest = pInstrGC + cpu.opsize;
3750
3751#ifdef LOG_ENABLED
3752 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3753 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3754 Log(("MMIO patch: %s", szOutput));
3755#endif
3756
3757 pVM->patm.s.mmio.pCachedData = 0;
3758 pVM->patm.s.mmio.GCPhys = 0;
3759 return VINF_SUCCESS;
3760}
3761
3762/**
3763 * Activates an int3 patch
3764 *
3765 * @returns VBox status code.
3766 * @param pVM The VM to operate on.
3767 * @param pPatch Patch record
3768 */
3769static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3770{
3771 uint8_t ASMInt3 = 0xCC;
3772 int rc;
3773
3774 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3775 Assert(pPatch->uState != PATCH_ENABLED);
3776
3777 /* Replace first opcode byte with 'int 3'. */
3778 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &ASMInt3, sizeof(ASMInt3));
3779 AssertRC(rc);
3780
3781 pPatch->cbPatchJump = sizeof(ASMInt3);
3782
3783 return rc;
3784}
3785
3786/**
3787 * Deactivates an int3 patch
3788 *
3789 * @returns VBox status code.
3790 * @param pVM The VM to operate on.
3791 * @param pPatch Patch record
3792 */
3793static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3794{
3795 uint8_t ASMInt3 = 0xCC;
3796 int rc;
3797
3798 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3799 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3800
3801 /* Restore first opcode byte. */
3802 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3803 AssertRC(rc);
3804 return rc;
3805}
3806
3807/**
3808 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically in the guest context.
3809 *
3810 * @returns VBox status code.
3811 * @param pVM The VM to operate on.
3812 * @param pInstrGC Guest context point to privileged instruction
3813 * @param pInstrHC Host context point to privileged instruction
3814 * @param pCpu Disassembly CPU structure ptr
3815 * @param pPatch Patch record
3816 *
3817 * @note returns failure if patching is not allowed or possible
3818 *
3819 */
3820VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3821{
3822 uint8_t ASMInt3 = 0xCC;
3823 int rc;
3824
3825 /* Note: Do not use patch memory here! It might called during patch installation too. */
3826
3827#ifdef LOG_ENABLED
3828 DISCPUSTATE cpu;
3829 char szOutput[256];
3830 uint32_t opsize;
3831
3832 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3833 PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3834 Log(("PATMR3PatchInstrInt3: %s", szOutput));
3835#endif
3836
3837 /* Save the original instruction. */
3838 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3839 AssertRC(rc);
3840 pPatch->cbPatchJump = sizeof(ASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3841
3842 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3843
3844 /* Replace first opcode byte with 'int 3'. */
3845 rc = patmActivateInt3Patch(pVM, pPatch);
3846 if (RT_FAILURE(rc))
3847 goto failure;
3848
3849 /* Lowest and highest address for write monitoring. */
3850 pPatch->pInstrGCLowest = pInstrGC;
3851 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3852
3853 pPatch->uState = PATCH_ENABLED;
3854 return VINF_SUCCESS;
3855
3856failure:
3857 /* Turn this patch into a dummy. */
3858 return VERR_PATCHING_REFUSED;
3859}
3860
3861#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3862/**
3863 * Patch a jump instruction at specified location
3864 *
3865 * @returns VBox status code.
3866 * @param pVM The VM to operate on.
3867 * @param pInstrGC Guest context point to privileged instruction
3868 * @param pInstrHC Host context point to privileged instruction
3869 * @param pCpu Disassembly CPU structure ptr
3870 * @param pPatchRec Patch record
3871 *
3872 * @note returns failure if patching is not allowed or possible
3873 *
3874 */
3875int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3876{
3877 PPATCHINFO pPatch = &pPatchRec->patch;
3878 int rc = VERR_PATCHING_REFUSED;
3879#ifdef LOG_ENABLED
3880 bool disret;
3881 DISCPUSTATE cpu;
3882 uint32_t opsize;
3883 char szOutput[256];
3884#endif
3885
3886 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3887 pPatch->uCurPatchOffset = 0;
3888 pPatch->cbPatchBlockSize = 0;
3889 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3890
3891 /*
3892 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3893 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3894 */
3895 switch (pCpu->pCurInstr->opcode)
3896 {
3897 case OP_JO:
3898 case OP_JNO:
3899 case OP_JC:
3900 case OP_JNC:
3901 case OP_JE:
3902 case OP_JNE:
3903 case OP_JBE:
3904 case OP_JNBE:
3905 case OP_JS:
3906 case OP_JNS:
3907 case OP_JP:
3908 case OP_JNP:
3909 case OP_JL:
3910 case OP_JNL:
3911 case OP_JLE:
3912 case OP_JNLE:
3913 case OP_JMP:
3914 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3915 Assert(pCpu->param1.flags & USE_IMMEDIATE32_REL);
3916 if (!(pCpu->param1.flags & USE_IMMEDIATE32_REL))
3917 goto failure;
3918
3919 Assert(pCpu->opsize == SIZEOF_NEARJUMP32 || pCpu->opsize == SIZEOF_NEAR_COND_JUMP32);
3920 if (pCpu->opsize != SIZEOF_NEARJUMP32 && pCpu->opsize != SIZEOF_NEAR_COND_JUMP32)
3921 goto failure;
3922
3923 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->opsize))
3924 {
3925 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
3926 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
3927 rc = VERR_PATCHING_REFUSED;
3928 goto failure;
3929 }
3930
3931 break;
3932
3933 default:
3934 goto failure;
3935 }
3936
3937 // make a copy of the guest code bytes that will be overwritten
3938 Assert(pCpu->opsize <= sizeof(pPatch->aPrivInstr));
3939 Assert(pCpu->opsize >= SIZEOF_NEARJUMP32);
3940 pPatch->cbPatchJump = pCpu->opsize;
3941
3942 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3943 AssertRC(rc);
3944
3945 /* Now insert a jump in the guest code. */
3946 /*
3947 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
3948 * references the target instruction in the conflict patch.
3949 */
3950 RTRCPTR pJmpDest = PATMR3GuestGCPtrToPatchGCPtr(pVM, pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval);
3951
3952 AssertMsg(pJmpDest, ("PATMR3GuestGCPtrToPatchGCPtr failed for %RRv\n", pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval));
3953 pPatch->pPatchJumpDestGC = pJmpDest;
3954
3955 PATMP2GLOOKUPREC cacheRec;
3956 RT_ZERO(cacheRec);
3957 cacheRec.pPatch = pPatch;
3958
3959 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
3960 /* Free leftover lock if any. */
3961 if (cacheRec.Lock.pvMap)
3962 {
3963 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
3964 cacheRec.Lock.pvMap = NULL;
3965 }
3966 AssertRC(rc);
3967 if (RT_FAILURE(rc))
3968 goto failure;
3969
3970 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
3971
3972#ifdef LOG_ENABLED
3973 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3974 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3975 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
3976#endif
3977
3978 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3979
3980 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
3981
3982 /* Lowest and highest address for write monitoring. */
3983 pPatch->pInstrGCLowest = pInstrGC;
3984 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
3985
3986 pPatch->uState = PATCH_ENABLED;
3987 return VINF_SUCCESS;
3988
3989failure:
3990 /* Turn this cli patch into a dummy. */
3991 pPatch->uState = PATCH_REFUSED;
3992
3993 return rc;
3994}
3995#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
3996
3997
3998/**
3999 * Gives hint to PATM about supervisor guest instructions
4000 *
4001 * @returns VBox status code.
4002 * @param pVM The VM to operate on.
4003 * @param pInstr Guest context point to privileged instruction
4004 * @param flags Patch flags
4005 */
4006VMMR3DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4007{
4008 Assert(pInstrGC);
4009 Assert(flags == PATMFL_CODE32);
4010
4011 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4012 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4013}
4014
4015/**
4016 * Patch privileged instruction at specified location
4017 *
4018 * @returns VBox status code.
4019 * @param pVM The VM to operate on.
4020 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4021 * @param flags Patch flags
4022 *
4023 * @note returns failure if patching is not allowed or possible
4024 */
4025VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4026{
4027 DISCPUSTATE cpu;
4028 R3PTRTYPE(uint8_t *) pInstrHC;
4029 uint32_t opsize;
4030 PPATMPATCHREC pPatchRec;
4031 PCPUMCTX pCtx = 0;
4032 bool disret;
4033 int rc;
4034 PVMCPU pVCpu = VMMGetCpu0(pVM);
4035
4036 if ( !pVM
4037 || pInstrGC == 0
4038 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4039 {
4040 AssertFailed();
4041 return VERR_INVALID_PARAMETER;
4042 }
4043
4044 if (PATMIsEnabled(pVM) == false)
4045 return VERR_PATCHING_REFUSED;
4046
4047 /* Test for patch conflict only with patches that actually change guest code. */
4048 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4049 {
4050 PPATCHINFO pConflictPatch = PATMFindActivePatchByEntrypoint(pVM, pInstrGC);
4051 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4052 if (pConflictPatch != 0)
4053 return VERR_PATCHING_REFUSED;
4054 }
4055
4056 if (!(flags & PATMFL_CODE32))
4057 {
4058 /** @todo Only 32 bits code right now */
4059 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4060 return VERR_NOT_IMPLEMENTED;
4061 }
4062
4063 /* We ran out of patch memory; don't bother anymore. */
4064 if (pVM->patm.s.fOutOfMemory == true)
4065 return VERR_PATCHING_REFUSED;
4066
4067 /* Make sure the code selector is wide open; otherwise refuse. */
4068 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4069 if (CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0)
4070 {
4071 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4072 if (pInstrGCFlat != pInstrGC)
4073 {
4074 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4075 return VERR_PATCHING_REFUSED;
4076 }
4077 }
4078
4079 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4080 if (!(flags & PATMFL_GUEST_SPECIFIC))
4081 {
4082 /* New code. Make sure CSAM has a go at it first. */
4083 CSAMR3CheckCode(pVM, pInstrGC);
4084 }
4085
4086 /* Note: obsolete */
4087 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4088 && (flags & PATMFL_MMIO_ACCESS))
4089 {
4090 RTRCUINTPTR offset;
4091 void *pvPatchCoreOffset;
4092
4093 /* Find the patch record. */
4094 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4095 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4096 if (pvPatchCoreOffset == NULL)
4097 {
4098 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4099 return VERR_PATCH_NOT_FOUND; //fatal error
4100 }
4101 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4102
4103 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4104 }
4105
4106 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4107
4108 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4109 if (pPatchRec)
4110 {
4111 Assert(!(flags & PATMFL_TRAMPOLINE));
4112
4113 /* Hints about existing patches are ignored. */
4114 if (flags & PATMFL_INSTR_HINT)
4115 return VERR_PATCHING_REFUSED;
4116
4117 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4118 {
4119 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4120 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4121 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4122 }
4123
4124 if (pPatchRec->patch.uState == PATCH_DISABLED)
4125 {
4126 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4127 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4128 {
4129 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4130 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4131 }
4132 else
4133 Log(("Enabling patch %RRv again\n", pInstrGC));
4134
4135 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4136 rc = PATMR3EnablePatch(pVM, pInstrGC);
4137 if (RT_SUCCESS(rc))
4138 return VWRN_PATCH_ENABLED;
4139
4140 return rc;
4141 }
4142 if ( pPatchRec->patch.uState == PATCH_ENABLED
4143 || pPatchRec->patch.uState == PATCH_DIRTY)
4144 {
4145 /*
4146 * The patch might have been overwritten.
4147 */
4148 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4149 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4150 {
4151 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4152 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4153 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4154 {
4155 if (flags & PATMFL_IDTHANDLER)
4156 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4157
4158 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4159 }
4160 }
4161 rc = PATMR3RemovePatch(pVM, pInstrGC);
4162 if (RT_FAILURE(rc))
4163 return VERR_PATCHING_REFUSED;
4164 }
4165 else
4166 {
4167 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4168 /* already tried it once! */
4169 return VERR_PATCHING_REFUSED;
4170 }
4171 }
4172
4173 RTGCPHYS GCPhys;
4174 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4175 if (rc != VINF_SUCCESS)
4176 {
4177 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4178 return rc;
4179 }
4180 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4181 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4182 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4183 {
4184 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4185 return VERR_PATCHING_REFUSED;
4186 }
4187
4188 /* Initialize cache record for guest address translations. */
4189 PATMP2GLOOKUPREC cacheRec;
4190 RT_ZERO(cacheRec);
4191
4192 pInstrHC = PATMGCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4193 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4194
4195 /* Allocate patch record. */
4196 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4197 if (RT_FAILURE(rc))
4198 {
4199 Log(("Out of memory!!!!\n"));
4200 return VERR_NO_MEMORY;
4201 }
4202 pPatchRec->Core.Key = pInstrGC;
4203 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4204 /* Insert patch record into the lookup tree. */
4205 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4206 Assert(rc);
4207
4208 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4209 pPatchRec->patch.flags = flags;
4210 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4211 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4212
4213 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4214 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4215
4216 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4217 {
4218 /*
4219 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4220 */
4221 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4222 if (pPatchNear)
4223 {
4224 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4225 {
4226 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4227
4228 pPatchRec->patch.uState = PATCH_UNUSABLE;
4229 /*
4230 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4231 */
4232 return VERR_PATCHING_REFUSED;
4233 }
4234 }
4235 }
4236
4237 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4238 if (pPatchRec->patch.pTempInfo == 0)
4239 {
4240 Log(("Out of memory!!!!\n"));
4241 return VERR_NO_MEMORY;
4242 }
4243
4244 cpu.mode = pPatchRec->patch.uOpMode;
4245 disret = PATMR3DISInstr(pVM, &pPatchRec->patch, &cpu, pInstrGC, NULL, &opsize, NULL);
4246 if (disret == false)
4247 {
4248 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4249 return VERR_PATCHING_REFUSED;
4250 }
4251
4252 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
4253 if (opsize > MAX_INSTR_SIZE)
4254 return VERR_PATCHING_REFUSED;
4255
4256 pPatchRec->patch.cbPrivInstr = opsize;
4257 pPatchRec->patch.opcode = cpu.pCurInstr->opcode;
4258
4259 /* Restricted hinting for now. */
4260 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->opcode == OP_CLI);
4261
4262 /* Initialize cache record patch pointer. */
4263 cacheRec.pPatch = &pPatchRec->patch;
4264
4265 /* Allocate statistics slot */
4266 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4267 {
4268 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4269 }
4270 else
4271 {
4272 Log(("WARNING: Patch index wrap around!!\n"));
4273 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4274 }
4275
4276 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4277 {
4278 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4279 }
4280 else
4281 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4282 {
4283 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4284 }
4285 else
4286 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4287 {
4288 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4289 }
4290 else
4291 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4292 {
4293 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4294 }
4295 else
4296 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4297 {
4298 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4299 }
4300 else
4301 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4302 {
4303 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4304 }
4305 else
4306 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4307 {
4308 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4309 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4310
4311 rc = patmIdtHandler(pVM, pInstrGC, opsize, pPatchRec, &cacheRec);
4312#ifdef VBOX_WITH_STATISTICS
4313 if ( rc == VINF_SUCCESS
4314 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4315 {
4316 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4317 }
4318#endif
4319 }
4320 else
4321 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4322 {
4323 switch (cpu.pCurInstr->opcode)
4324 {
4325 case OP_SYSENTER:
4326 case OP_PUSH:
4327 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4328 if (rc == VINF_SUCCESS)
4329 {
4330 if (rc == VINF_SUCCESS)
4331 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4332 return rc;
4333 }
4334 break;
4335
4336 default:
4337 rc = VERR_NOT_IMPLEMENTED;
4338 break;
4339 }
4340 }
4341 else
4342 {
4343 switch (cpu.pCurInstr->opcode)
4344 {
4345 case OP_SYSENTER:
4346 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4347 if (rc == VINF_SUCCESS)
4348 {
4349 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4350 return VINF_SUCCESS;
4351 }
4352 break;
4353
4354#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4355 case OP_JO:
4356 case OP_JNO:
4357 case OP_JC:
4358 case OP_JNC:
4359 case OP_JE:
4360 case OP_JNE:
4361 case OP_JBE:
4362 case OP_JNBE:
4363 case OP_JS:
4364 case OP_JNS:
4365 case OP_JP:
4366 case OP_JNP:
4367 case OP_JL:
4368 case OP_JNL:
4369 case OP_JLE:
4370 case OP_JNLE:
4371 case OP_JECXZ:
4372 case OP_LOOP:
4373 case OP_LOOPNE:
4374 case OP_LOOPE:
4375 case OP_JMP:
4376 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4377 {
4378 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4379 break;
4380 }
4381 return VERR_NOT_IMPLEMENTED;
4382#endif
4383
4384 case OP_PUSHF:
4385 case OP_CLI:
4386 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4387 rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->opcode, opsize, pPatchRec);
4388 break;
4389
4390 case OP_STR:
4391 case OP_SGDT:
4392 case OP_SLDT:
4393 case OP_SIDT:
4394 case OP_CPUID:
4395 case OP_LSL:
4396 case OP_LAR:
4397 case OP_SMSW:
4398 case OP_VERW:
4399 case OP_VERR:
4400 case OP_IRET:
4401 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4402 break;
4403
4404 default:
4405 return VERR_NOT_IMPLEMENTED;
4406 }
4407 }
4408
4409 if (rc != VINF_SUCCESS)
4410 {
4411 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4412 {
4413 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4414 pPatchRec->patch.nrPatch2GuestRecs = 0;
4415 }
4416 pVM->patm.s.uCurrentPatchIdx--;
4417 }
4418 else
4419 {
4420 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4421 AssertRCReturn(rc, rc);
4422
4423 /* Keep track upper and lower boundaries of patched instructions */
4424 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4425 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4426 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4427 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4428
4429 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4430 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4431
4432 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4433 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4434
4435 rc = VINF_SUCCESS;
4436
4437 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4438 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4439 {
4440 rc = PATMR3DisablePatch(pVM, pInstrGC);
4441 AssertRCReturn(rc, rc);
4442 }
4443
4444#ifdef VBOX_WITH_STATISTICS
4445 /* Register statistics counter */
4446 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4447 {
4448 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4449 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4450#ifndef DEBUG_sandervl
4451 /* Full breakdown for the GUI. */
4452 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4453 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4454 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4455 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4456 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4457 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4458 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4459 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4460 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4461 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4462 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4463 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4464 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4465 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4466 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4467 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4468#endif
4469 }
4470#endif
4471 }
4472 /* Free leftover lock if any. */
4473 if (cacheRec.Lock.pvMap)
4474 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4475 return rc;
4476}
4477
4478/**
4479 * Query instruction size
4480 *
4481 * @returns VBox status code.
4482 * @param pVM The VM to operate on.
4483 * @param pPatch Patch record
4484 * @param pInstrGC Instruction address
4485 */
4486static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4487{
4488 uint8_t *pInstrHC;
4489 PGMPAGEMAPLOCK Lock;
4490
4491 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4492 if (rc == VINF_SUCCESS)
4493 {
4494 DISCPUSTATE cpu;
4495 bool disret;
4496 uint32_t opsize;
4497
4498 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4499 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL, PATMREAD_ORGCODE | PATMREAD_NOCHECK);
4500 PGMPhysReleasePageMappingLock(pVM, &Lock);
4501 if (disret)
4502 return opsize;
4503 }
4504 return 0;
4505}
4506
4507/**
4508 * Add patch to page record
4509 *
4510 * @returns VBox status code.
4511 * @param pVM The VM to operate on.
4512 * @param pPage Page address
4513 * @param pPatch Patch record
4514 */
4515int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4516{
4517 PPATMPATCHPAGE pPatchPage;
4518 int rc;
4519
4520 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4521
4522 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4523 if (pPatchPage)
4524 {
4525 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4526 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4527 {
4528 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4529 PPATCHINFO *paPatchOld = pPatchPage->aPatch;
4530
4531 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4532 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4533 if (RT_FAILURE(rc))
4534 {
4535 Log(("Out of memory!!!!\n"));
4536 return VERR_NO_MEMORY;
4537 }
4538 memcpy(pPatchPage->aPatch, paPatchOld, cMaxPatchesOld*sizeof(PPATCHINFO));
4539 MMHyperFree(pVM, paPatchOld);
4540 }
4541 pPatchPage->aPatch[pPatchPage->cCount] = pPatch;
4542 pPatchPage->cCount++;
4543 }
4544 else
4545 {
4546 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4547 if (RT_FAILURE(rc))
4548 {
4549 Log(("Out of memory!!!!\n"));
4550 return VERR_NO_MEMORY;
4551 }
4552 pPatchPage->Core.Key = pPage;
4553 pPatchPage->cCount = 1;
4554 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4555
4556 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4557 if (RT_FAILURE(rc))
4558 {
4559 Log(("Out of memory!!!!\n"));
4560 MMHyperFree(pVM, pPatchPage);
4561 return VERR_NO_MEMORY;
4562 }
4563 pPatchPage->aPatch[0] = pPatch;
4564
4565 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4566 Assert(rc);
4567 pVM->patm.s.cPageRecords++;
4568
4569 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4570 }
4571 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4572
4573 /* Get the closest guest instruction (from below) */
4574 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4575 Assert(pGuestToPatchRec);
4576 if (pGuestToPatchRec)
4577 {
4578 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4579 if ( pPatchPage->pLowestAddrGC == 0
4580 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4581 {
4582 RTRCUINTPTR offset;
4583
4584 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4585
4586 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4587 /* If we're too close to the page boundary, then make sure an instruction from the previous page doesn't cross the boundary itself. */
4588 if (offset && offset < MAX_INSTR_SIZE)
4589 {
4590 /* Get the closest guest instruction (from above) */
4591 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4592
4593 if (pGuestToPatchRec)
4594 {
4595 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4596 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4597 {
4598 pPatchPage->pLowestAddrGC = pPage;
4599 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4600 }
4601 }
4602 }
4603 }
4604 }
4605
4606 /* Get the closest guest instruction (from above) */
4607 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4608 Assert(pGuestToPatchRec);
4609 if (pGuestToPatchRec)
4610 {
4611 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4612 if ( pPatchPage->pHighestAddrGC == 0
4613 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4614 {
4615 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4616 /* Increase by instruction size. */
4617 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4618//// Assert(size);
4619 pPatchPage->pHighestAddrGC += size;
4620 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4621 }
4622 }
4623
4624 return VINF_SUCCESS;
4625}
4626
4627/**
4628 * Remove patch from page record
4629 *
4630 * @returns VBox status code.
4631 * @param pVM The VM to operate on.
4632 * @param pPage Page address
4633 * @param pPatch Patch record
4634 */
4635int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4636{
4637 PPATMPATCHPAGE pPatchPage;
4638 int rc;
4639
4640 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4641 Assert(pPatchPage);
4642
4643 if (!pPatchPage)
4644 return VERR_INVALID_PARAMETER;
4645
4646 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4647
4648 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4649 if (pPatchPage->cCount > 1)
4650 {
4651 uint32_t i;
4652
4653 /* Used by multiple patches */
4654 for (i=0;i<pPatchPage->cCount;i++)
4655 {
4656 if (pPatchPage->aPatch[i] == pPatch)
4657 {
4658 pPatchPage->aPatch[i] = 0;
4659 break;
4660 }
4661 }
4662 /* close the gap between the remaining pointers. */
4663 if (i < pPatchPage->cCount - 1)
4664 {
4665 memcpy(&pPatchPage->aPatch[i], &pPatchPage->aPatch[i+1], sizeof(PPATCHINFO)*(pPatchPage->cCount - (i+1)));
4666 }
4667 AssertMsg(i < pPatchPage->cCount, ("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4668
4669 pPatchPage->cCount--;
4670 }
4671 else
4672 {
4673 PPATMPATCHPAGE pPatchNode;
4674
4675 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4676
4677 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4678 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4679 Assert(pPatchNode && pPatchNode == pPatchPage);
4680
4681 Assert(pPatchPage->aPatch);
4682 rc = MMHyperFree(pVM, pPatchPage->aPatch);
4683 AssertRC(rc);
4684 rc = MMHyperFree(pVM, pPatchPage);
4685 AssertRC(rc);
4686 pVM->patm.s.cPageRecords--;
4687 }
4688 return VINF_SUCCESS;
4689}
4690
4691/**
4692 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4693 *
4694 * @returns VBox status code.
4695 * @param pVM The VM to operate on.
4696 * @param pPatch Patch record
4697 */
4698int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4699{
4700 int rc;
4701 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4702
4703 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4704 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4705 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4706
4707 /** @todo optimize better (large gaps between current and next used page) */
4708 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4709 {
4710 /* Get the closest guest instruction (from above) */
4711 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4712 if ( pGuestToPatchRec
4713 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4714 )
4715 {
4716 /* Code in page really patched -> add record */
4717 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4718 AssertRC(rc);
4719 }
4720 }
4721 pPatch->flags |= PATMFL_CODE_MONITORED;
4722 return VINF_SUCCESS;
4723}
4724
4725/**
4726 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4727 *
4728 * @returns VBox status code.
4729 * @param pVM The VM to operate on.
4730 * @param pPatch Patch record
4731 */
4732int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4733{
4734 int rc;
4735 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4736
4737 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4738 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4739 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4740
4741 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4742 {
4743 /* Get the closest guest instruction (from above) */
4744 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4745 if ( pGuestToPatchRec
4746 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4747 )
4748 {
4749 /* Code in page really patched -> remove record */
4750 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4751 AssertRC(rc);
4752 }
4753 }
4754 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4755 return VINF_SUCCESS;
4756}
4757
4758/**
4759 * Notifies PATM about a (potential) write to code that has been patched.
4760 *
4761 * @returns VBox status code.
4762 * @param pVM The VM to operate on.
4763 * @param GCPtr GC pointer to write address
4764 * @param cbWrite Nr of bytes to write
4765 *
4766 */
4767VMMR3DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4768{
4769 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4770
4771 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4772
4773 Assert(VM_IS_EMT(pVM));
4774
4775 /* Quick boundary check */
4776 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4777 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4778 )
4779 return VINF_SUCCESS;
4780
4781 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4782
4783 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4784 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4785
4786 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4787 {
4788loop_start:
4789 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4790 if (pPatchPage)
4791 {
4792 uint32_t i;
4793 bool fValidPatchWrite = false;
4794
4795 /* Quick check to see if the write is in the patched part of the page */
4796 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4797 || pPatchPage->pHighestAddrGC < GCPtr)
4798 {
4799 break;
4800 }
4801
4802 for (i=0;i<pPatchPage->cCount;i++)
4803 {
4804 if (pPatchPage->aPatch[i])
4805 {
4806 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4807 RTRCPTR pPatchInstrGC;
4808 //unused: bool fForceBreak = false;
4809
4810 Assert(pPatchPage->aPatch[i]->flags & PATMFL_CODE_MONITORED);
4811 /** @todo inefficient and includes redundant checks for multiple pages. */
4812 for (uint32_t j=0; j<cbWrite; j++)
4813 {
4814 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4815
4816 if ( pPatch->cbPatchJump
4817 && pGuestPtrGC >= pPatch->pPrivInstrGC
4818 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4819 {
4820 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4821 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4822 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4823 if (rc == VINF_SUCCESS)
4824 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4825 goto loop_start;
4826
4827 continue;
4828 }
4829
4830 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4831 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4832 if (!pPatchInstrGC)
4833 {
4834 RTRCPTR pClosestInstrGC;
4835 uint32_t size;
4836
4837 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4838 if (pPatchInstrGC)
4839 {
4840 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4841 Assert(pClosestInstrGC <= pGuestPtrGC);
4842 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4843 /* Check if this is not a write into a gap between two patches */
4844 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4845 pPatchInstrGC = 0;
4846 }
4847 }
4848 if (pPatchInstrGC)
4849 {
4850 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4851
4852 fValidPatchWrite = true;
4853
4854 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4855 Assert(pPatchToGuestRec);
4856 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4857 {
4858 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4859
4860 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4861 {
4862 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4863
4864 PATMR3MarkDirtyPatch(pVM, pPatch);
4865
4866 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4867 goto loop_start;
4868 }
4869 else
4870 {
4871 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4872 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4873
4874 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4875 pPatchToGuestRec->fDirty = true;
4876
4877 *pInstrHC = 0xCC;
4878
4879 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4880 }
4881 }
4882 /* else already marked dirty */
4883 }
4884 }
4885 }
4886 } /* for each patch */
4887
4888 if (fValidPatchWrite == false)
4889 {
4890 /* Write to a part of the page that either:
4891 * - doesn't contain any code (shared code/data); rather unlikely
4892 * - old code page that's no longer in active use.
4893 */
4894invalid_write_loop_start:
4895 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4896
4897 if (pPatchPage)
4898 {
4899 for (i=0;i<pPatchPage->cCount;i++)
4900 {
4901 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4902
4903 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
4904 {
4905 /* Note: possibly dangerous assumption that all future writes will be harmless. */
4906 if (pPatch->flags & PATMFL_IDTHANDLER)
4907 {
4908 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4909
4910 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
4911 int rc = patmRemovePatchPages(pVM, pPatch);
4912 AssertRC(rc);
4913 }
4914 else
4915 {
4916 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4917 PATMR3MarkDirtyPatch(pVM, pPatch);
4918 }
4919 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4920 goto invalid_write_loop_start;
4921 }
4922 } /* for */
4923 }
4924 }
4925 }
4926 }
4927 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
4928 return VINF_SUCCESS;
4929
4930}
4931
4932/**
4933 * Disable all patches in a flushed page
4934 *
4935 * @returns VBox status code
4936 * @param pVM The VM to operate on.
4937 * @param addr GC address of the page to flush
4938 */
4939/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
4940 */
4941VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
4942{
4943 addr &= PAGE_BASE_GC_MASK;
4944
4945 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
4946 if (pPatchPage)
4947 {
4948 int i;
4949
4950 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
4951 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
4952 {
4953 if (pPatchPage->aPatch[i])
4954 {
4955 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4956
4957 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
4958 PATMR3MarkDirtyPatch(pVM, pPatch);
4959 }
4960 }
4961 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
4962 }
4963 return VINF_SUCCESS;
4964}
4965
4966/**
4967 * Checks if the instructions at the specified address has been patched already.
4968 *
4969 * @returns boolean, patched or not
4970 * @param pVM The VM to operate on.
4971 * @param pInstrGC Guest context pointer to instruction
4972 */
4973VMMR3DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
4974{
4975 PPATMPATCHREC pPatchRec;
4976 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4977 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
4978 return true;
4979 return false;
4980}
4981
4982/**
4983 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
4984 *
4985 * @returns VBox status code.
4986 * @param pVM The VM to operate on.
4987 * @param pInstrGC GC address of instr
4988 * @param pByte opcode byte pointer (OUT)
4989 *
4990 */
4991VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
4992{
4993 PPATMPATCHREC pPatchRec;
4994
4995 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
4996
4997 /* Shortcut. */
4998 if ( !PATMIsEnabled(pVM)
4999 || pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5000 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5001 {
5002 return VERR_PATCH_NOT_FOUND;
5003 }
5004
5005 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5006 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5007 if ( pPatchRec
5008 && pPatchRec->patch.uState == PATCH_ENABLED
5009 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5010 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5011 {
5012 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5013 *pByte = pPatchRec->patch.aPrivInstr[offset];
5014
5015 if (pPatchRec->patch.cbPatchJump == 1)
5016 {
5017 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5018 }
5019 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5020 return VINF_SUCCESS;
5021 }
5022 return VERR_PATCH_NOT_FOUND;
5023}
5024
5025/**
5026 * Disable patch for privileged instruction at specified location
5027 *
5028 * @returns VBox status code.
5029 * @param pVM The VM to operate on.
5030 * @param pInstr Guest context point to privileged instruction
5031 *
5032 * @note returns failure if patching is not allowed or possible
5033 *
5034 */
5035VMMR3DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5036{
5037 PPATMPATCHREC pPatchRec;
5038 PPATCHINFO pPatch;
5039
5040 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5041 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5042 if (pPatchRec)
5043 {
5044 int rc = VINF_SUCCESS;
5045
5046 pPatch = &pPatchRec->patch;
5047
5048 /* Already disabled? */
5049 if (pPatch->uState == PATCH_DISABLED)
5050 return VINF_SUCCESS;
5051
5052 /* Clear the IDT entries for the patch we're disabling. */
5053 /* Note: very important as we clear IF in the patch itself */
5054 /** @todo this needs to be changed */
5055 if (pPatch->flags & PATMFL_IDTHANDLER)
5056 {
5057 uint32_t iGate;
5058
5059 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5060 if (iGate != (uint32_t)~0)
5061 {
5062 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5063 if (++cIDTHandlersDisabled < 256)
5064 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5065 }
5066 }
5067
5068 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5069 if ( pPatch->pPatchBlockOffset
5070 && pPatch->uState == PATCH_ENABLED)
5071 {
5072 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5073 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5074 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5075 }
5076
5077 /* IDT or function patches haven't changed any guest code. */
5078 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5079 {
5080 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5081 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5082
5083 if (pPatch->uState != PATCH_REFUSED)
5084 {
5085 uint8_t temp[16];
5086
5087 Assert(pPatch->cbPatchJump < sizeof(temp));
5088
5089 /* Let's first check if the guest code is still the same. */
5090 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5091 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5092 if (rc == VINF_SUCCESS)
5093 {
5094 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5095
5096 if ( temp[0] != 0xE9 /* jmp opcode */
5097 || *(RTRCINTPTR *)(&temp[1]) != displ
5098 )
5099 {
5100 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5101 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5102 /* Remove it completely */
5103 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5104 rc = PATMR3RemovePatch(pVM, pInstrGC);
5105 AssertRC(rc);
5106 return VWRN_PATCH_REMOVED;
5107 }
5108 patmRemoveJumpToPatch(pVM, pPatch);
5109 }
5110 else
5111 {
5112 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5113 pPatch->uState = PATCH_DISABLE_PENDING;
5114 }
5115 }
5116 else
5117 {
5118 AssertMsgFailed(("Patch was refused!\n"));
5119 return VERR_PATCH_ALREADY_DISABLED;
5120 }
5121 }
5122 else
5123 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5124 {
5125 uint8_t temp[16];
5126
5127 Assert(pPatch->cbPatchJump < sizeof(temp));
5128
5129 /* Let's first check if the guest code is still the same. */
5130 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5131 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5132 if (rc == VINF_SUCCESS)
5133 {
5134 if (temp[0] != 0xCC)
5135 {
5136 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5137 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5138 /* Remove it completely */
5139 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5140 rc = PATMR3RemovePatch(pVM, pInstrGC);
5141 AssertRC(rc);
5142 return VWRN_PATCH_REMOVED;
5143 }
5144 patmDeactivateInt3Patch(pVM, pPatch);
5145 }
5146 }
5147
5148 if (rc == VINF_SUCCESS)
5149 {
5150 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5151 if (pPatch->uState == PATCH_DISABLE_PENDING)
5152 {
5153 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5154 pPatch->uState = PATCH_UNUSABLE;
5155 }
5156 else
5157 if (pPatch->uState != PATCH_DIRTY)
5158 {
5159 pPatch->uOldState = pPatch->uState;
5160 pPatch->uState = PATCH_DISABLED;
5161 }
5162 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5163 }
5164
5165 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5166 return VINF_SUCCESS;
5167 }
5168 Log(("Patch not found!\n"));
5169 return VERR_PATCH_NOT_FOUND;
5170}
5171
5172/**
5173 * Permanently disable patch for privileged instruction at specified location
5174 *
5175 * @returns VBox status code.
5176 * @param pVM The VM to operate on.
5177 * @param pInstr Guest context instruction pointer
5178 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5179 * @param pConflictPatch Conflicting patch
5180 *
5181 */
5182static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5183{
5184#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5185 PATCHINFO patch;
5186 DISCPUSTATE cpu;
5187 R3PTRTYPE(uint8_t *) pInstrHC;
5188 uint32_t opsize;
5189 bool disret;
5190 int rc;
5191
5192 RT_ZERO(patch);
5193 pInstrHC = PATMGCVirtToHCVirt(pVM, &patch, pInstrGC);
5194 cpu.mode = (pConflictPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5195 disret = PATMR3DISInstr(pVM, &patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
5196 /*
5197 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5198 * with one that jumps right into the conflict patch.
5199 * Otherwise we must disable the conflicting patch to avoid serious problems.
5200 */
5201 if ( disret == true
5202 && (pConflictPatch->flags & PATMFL_CODE32)
5203 && (cpu.pCurInstr->opcode == OP_JMP || (cpu.pCurInstr->optype & OPTYPE_COND_CONTROLFLOW))
5204 && (cpu.param1.flags & USE_IMMEDIATE32_REL))
5205 {
5206 /* Hint patches must be enabled first. */
5207 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5208 {
5209 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5210 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5211 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5212 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5213 /* Enabling might fail if the patched code has changed in the meantime. */
5214 if (rc != VINF_SUCCESS)
5215 return rc;
5216 }
5217
5218 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5219 if (RT_SUCCESS(rc))
5220 {
5221 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5222 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5223 return VINF_SUCCESS;
5224 }
5225 }
5226#endif
5227
5228 if (pConflictPatch->opcode == OP_CLI)
5229 {
5230 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5231 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5232 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5233 if (rc == VWRN_PATCH_REMOVED)
5234 return VINF_SUCCESS;
5235 if (RT_SUCCESS(rc))
5236 {
5237 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5238 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5239 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5240 if (rc == VERR_PATCH_NOT_FOUND)
5241 return VINF_SUCCESS; /* removed already */
5242
5243 AssertRC(rc);
5244 if (RT_SUCCESS(rc))
5245 {
5246 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5247 return VINF_SUCCESS;
5248 }
5249 }
5250 /* else turned into unusable patch (see below) */
5251 }
5252 else
5253 {
5254 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5255 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5256 if (rc == VWRN_PATCH_REMOVED)
5257 return VINF_SUCCESS;
5258 }
5259
5260 /* No need to monitor the code anymore. */
5261 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5262 {
5263 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5264 AssertRC(rc);
5265 }
5266 pConflictPatch->uState = PATCH_UNUSABLE;
5267 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5268 return VERR_PATCH_DISABLED;
5269}
5270
5271/**
5272 * Enable patch for privileged instruction at specified location
5273 *
5274 * @returns VBox status code.
5275 * @param pVM The VM to operate on.
5276 * @param pInstr Guest context point to privileged instruction
5277 *
5278 * @note returns failure if patching is not allowed or possible
5279 *
5280 */
5281VMMR3DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5282{
5283 PPATMPATCHREC pPatchRec;
5284 PPATCHINFO pPatch;
5285
5286 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5287 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5288 if (pPatchRec)
5289 {
5290 int rc = VINF_SUCCESS;
5291
5292 pPatch = &pPatchRec->patch;
5293
5294 if (pPatch->uState == PATCH_DISABLED)
5295 {
5296 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5297 {
5298 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5299 uint8_t temp[16];
5300
5301 Assert(pPatch->cbPatchJump < sizeof(temp));
5302
5303 /* Let's first check if the guest code is still the same. */
5304 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5305 AssertRC(rc2);
5306 if (rc2 == VINF_SUCCESS)
5307 {
5308 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5309 {
5310 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5311 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5312 /* Remove it completely */
5313 rc = PATMR3RemovePatch(pVM, pInstrGC);
5314 AssertRC(rc);
5315 return VERR_PATCH_NOT_FOUND;
5316 }
5317
5318 PATMP2GLOOKUPREC cacheRec;
5319 RT_ZERO(cacheRec);
5320 cacheRec.pPatch = pPatch;
5321
5322 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5323 /* Free leftover lock if any. */
5324 if (cacheRec.Lock.pvMap)
5325 {
5326 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5327 cacheRec.Lock.pvMap = NULL;
5328 }
5329 AssertRC(rc2);
5330 if (RT_FAILURE(rc2))
5331 return rc2;
5332
5333#ifdef DEBUG
5334 {
5335 DISCPUSTATE cpu;
5336 char szOutput[256];
5337 uint32_t opsize, i = 0;
5338 bool disret;
5339 i = 0;
5340 while(i < pPatch->cbPatchJump)
5341 {
5342 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5343 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
5344 Log(("Renewed patch instr: %s", szOutput));
5345 i += opsize;
5346 }
5347 }
5348#endif
5349 }
5350 }
5351 else
5352 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5353 {
5354 uint8_t temp[16];
5355
5356 Assert(pPatch->cbPatchJump < sizeof(temp));
5357
5358 /* Let's first check if the guest code is still the same. */
5359 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5360 AssertRC(rc2);
5361
5362 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5363 {
5364 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5365 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5366 rc = PATMR3RemovePatch(pVM, pInstrGC);
5367 AssertRC(rc);
5368 return VERR_PATCH_NOT_FOUND;
5369 }
5370
5371 rc2 = patmActivateInt3Patch(pVM, pPatch);
5372 if (RT_FAILURE(rc2))
5373 return rc2;
5374 }
5375
5376 pPatch->uState = pPatch->uOldState; //restore state
5377
5378 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5379 if (pPatch->pPatchBlockOffset)
5380 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5381
5382 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5383 }
5384 else
5385 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5386
5387 return rc;
5388 }
5389 return VERR_PATCH_NOT_FOUND;
5390}
5391
5392/**
5393 * Remove patch for privileged instruction at specified location
5394 *
5395 * @returns VBox status code.
5396 * @param pVM The VM to operate on.
5397 * @param pPatchRec Patch record
5398 * @param fForceRemove Remove *all* patches
5399 */
5400int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5401{
5402 PPATCHINFO pPatch;
5403
5404 pPatch = &pPatchRec->patch;
5405
5406 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5407 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5408 {
5409 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5410 return VERR_ACCESS_DENIED;
5411 }
5412 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5413
5414 /* Note: NEVER EVER REUSE PATCH MEMORY */
5415 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5416
5417 if (pPatchRec->patch.pPatchBlockOffset)
5418 {
5419 PAVLOU32NODECORE pNode;
5420
5421 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5422 Assert(pNode);
5423 }
5424
5425 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5426 {
5427 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5428 AssertRC(rc);
5429 }
5430
5431#ifdef VBOX_WITH_STATISTICS
5432 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5433 {
5434 STAMR3Deregister(pVM, &pPatchRec->patch);
5435#ifndef DEBUG_sandervl
5436 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5437 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5438 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5439 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5440 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5441 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5442 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5443 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5444 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5445 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5446 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5447 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5448 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5449 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5450#endif
5451 }
5452#endif
5453
5454 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5455 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5456 pPatch->nrPatch2GuestRecs = 0;
5457 Assert(pPatch->Patch2GuestAddrTree == 0);
5458
5459 patmEmptyTree(pVM, &pPatch->FixupTree);
5460 pPatch->nrFixups = 0;
5461 Assert(pPatch->FixupTree == 0);
5462
5463 if (pPatchRec->patch.pTempInfo)
5464 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5465
5466 /* Note: might fail, because it has already been removed (e.g. during reset). */
5467 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5468
5469 /* Free the patch record */
5470 MMHyperFree(pVM, pPatchRec);
5471 return VINF_SUCCESS;
5472}
5473
5474/**
5475 * RTAvlU32DoWithAll() worker.
5476 * Checks whether the current trampoline instruction is the jump to the target patch
5477 * and updates the displacement to jump to the new target.
5478 *
5479 * @returns VBox status code.
5480 * @retval VERR_ALREADY_EXISTS if the jump was found.
5481 * @param pNode The current patch to guest record to check.
5482 * @param pvUser The refresh state.
5483 */
5484static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5485{
5486 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5487 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5488 PVM pVM = pRefreshPatchState->pVM;
5489
5490 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5491
5492 /*
5493 * Check if the patch instruction starts with a jump.
5494 * ASSUMES that there is no other patch to guest record that starts
5495 * with a jump.
5496 */
5497 if (*pPatchInstr == 0xE9)
5498 {
5499 /* Jump found, update the displacement. */
5500 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5501 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5502 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5503
5504 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5505 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5506
5507 *(uint32_t *)&pPatchInstr[1] = displ;
5508 return VERR_ALREADY_EXISTS; /** @todo better return code */
5509 }
5510
5511 return VINF_SUCCESS;
5512}
5513
5514/**
5515 * Attempt to refresh the patch by recompiling its entire code block
5516 *
5517 * @returns VBox status code.
5518 * @param pVM The VM to operate on.
5519 * @param pPatchRec Patch record
5520 */
5521int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5522{
5523 PPATCHINFO pPatch;
5524 int rc;
5525 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5526 PTRAMPREC pTrampolinePatchesHead = NULL;
5527
5528 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5529
5530 pPatch = &pPatchRec->patch;
5531 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5532 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5533 {
5534 if (!pPatch->pTrampolinePatchesHead)
5535 {
5536 /*
5537 * It is sometimes possible that there are trampoline patches to this patch
5538 * but they are not recorded (after a saved state load for example).
5539 * Refuse to refresh those patches.
5540 * Can hurt performance in theory if the patched code is modified by the guest
5541 * and is executed often. However most of the time states are saved after the guest
5542 * code was modified and is not updated anymore afterwards so this shouldn't be a
5543 * big problem.
5544 */
5545 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5546 return VERR_PATCHING_REFUSED;
5547 }
5548 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5549 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5550 }
5551
5552 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5553
5554 rc = PATMR3DisablePatch(pVM, pInstrGC);
5555 AssertRC(rc);
5556
5557 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5558 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5559#ifdef VBOX_WITH_STATISTICS
5560 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5561 {
5562 STAMR3Deregister(pVM, &pPatchRec->patch);
5563#ifndef DEBUG_sandervl
5564 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5565 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5566 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5567 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5568 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5569 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5570 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5571 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5572 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5573 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5574 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5575 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5576 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5577 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5578#endif
5579 }
5580#endif
5581
5582 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5583
5584 /* Attempt to install a new patch. */
5585 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5586 if (RT_SUCCESS(rc))
5587 {
5588 RTRCPTR pPatchTargetGC;
5589 PPATMPATCHREC pNewPatchRec;
5590
5591 /* Determine target address in new patch */
5592 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5593 Assert(pPatchTargetGC);
5594 if (!pPatchTargetGC)
5595 {
5596 rc = VERR_PATCHING_REFUSED;
5597 goto failure;
5598 }
5599
5600 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5601 pPatch->uCurPatchOffset = 0;
5602
5603 /* insert jump to new patch in old patch block */
5604 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5605 if (RT_FAILURE(rc))
5606 goto failure;
5607
5608 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5609 Assert(pNewPatchRec); /* can't fail */
5610
5611 /* Remove old patch (only do that when everything is finished) */
5612 int rc2 = PATMRemovePatch(pVM, pPatchRec, true /* force removal */);
5613 AssertRC(rc2);
5614
5615 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5616 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5617
5618 LogRel(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5619 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5620
5621 /* Used by another patch, so don't remove it! */
5622 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5623
5624 if (pTrampolinePatchesHead)
5625 {
5626 /* Update all trampoline patches to jump to the new patch. */
5627 PTRAMPREC pTrampRec = NULL;
5628 PATMREFRESHPATCH RefreshPatch;
5629
5630 RefreshPatch.pVM = pVM;
5631 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5632
5633 pTrampRec = pTrampolinePatchesHead;
5634
5635 while (pTrampRec)
5636 {
5637 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5638
5639 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5640 /*
5641 * We have to find the right patch2guest record because there might be others
5642 * for statistics.
5643 */
5644 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5645 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5646 Assert(rc == VERR_ALREADY_EXISTS);
5647 rc = VINF_SUCCESS;
5648 pTrampRec = pTrampRec->pNext;
5649 }
5650 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5651 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5652 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5653 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5654 }
5655 }
5656
5657failure:
5658 if (RT_FAILURE(rc))
5659 {
5660 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5661
5662 /* Remove the new inactive patch */
5663 rc = PATMR3RemovePatch(pVM, pInstrGC);
5664 AssertRC(rc);
5665
5666 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5667 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5668
5669 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5670 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5671 AssertRC(rc2);
5672
5673 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5674 }
5675 return rc;
5676}
5677
5678/**
5679 * Find patch for privileged instruction at specified location
5680 *
5681 * @returns Patch structure pointer if found; else NULL
5682 * @param pVM The VM to operate on.
5683 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5684 * @param fIncludeHints Include hinted patches or not
5685 *
5686 */
5687PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5688{
5689 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5690 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5691 if (pPatchRec)
5692 {
5693 if ( pPatchRec->patch.uState == PATCH_ENABLED
5694 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5695 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5696 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5697 {
5698 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5699 return &pPatchRec->patch;
5700 }
5701 else
5702 if ( fIncludeHints
5703 && pPatchRec->patch.uState == PATCH_DISABLED
5704 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5705 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5706 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5707 {
5708 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5709 return &pPatchRec->patch;
5710 }
5711 }
5712 return NULL;
5713}
5714
5715/**
5716 * Checks whether the GC address is inside a generated patch jump
5717 *
5718 * @returns true -> yes, false -> no
5719 * @param pVM The VM to operate on.
5720 * @param pAddr Guest context address
5721 * @param pPatchAddr Guest context patch address (if true)
5722 */
5723VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5724{
5725 RTRCPTR addr;
5726 PPATCHINFO pPatch;
5727
5728 if (PATMIsEnabled(pVM) == false)
5729 return false;
5730
5731 if (pPatchAddr == NULL)
5732 pPatchAddr = &addr;
5733
5734 *pPatchAddr = 0;
5735
5736 pPatch = PATMFindActivePatchByEntrypoint(pVM, pAddr);
5737 if (pPatch)
5738 *pPatchAddr = pPatch->pPrivInstrGC;
5739
5740 return *pPatchAddr == 0 ? false : true;
5741}
5742
5743/**
5744 * Remove patch for privileged instruction at specified location
5745 *
5746 * @returns VBox status code.
5747 * @param pVM The VM to operate on.
5748 * @param pInstr Guest context point to privileged instruction
5749 *
5750 * @note returns failure if patching is not allowed or possible
5751 *
5752 */
5753VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5754{
5755 PPATMPATCHREC pPatchRec;
5756
5757 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5758 if (pPatchRec)
5759 {
5760 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5761 if (rc == VWRN_PATCH_REMOVED)
5762 return VINF_SUCCESS;
5763
5764 return PATMRemovePatch(pVM, pPatchRec, false);
5765 }
5766 AssertFailed();
5767 return VERR_PATCH_NOT_FOUND;
5768}
5769
5770/**
5771 * Mark patch as dirty
5772 *
5773 * @returns VBox status code.
5774 * @param pVM The VM to operate on.
5775 * @param pPatch Patch record
5776 *
5777 * @note returns failure if patching is not allowed or possible
5778 *
5779 */
5780VMMR3DECL(int) PATMR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5781{
5782 if (pPatch->pPatchBlockOffset)
5783 {
5784 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5785 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5786 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5787 }
5788
5789 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5790 /* Put back the replaced instruction. */
5791 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5792 if (rc == VWRN_PATCH_REMOVED)
5793 return VINF_SUCCESS;
5794
5795 /* Note: we don't restore patch pages for patches that are not enabled! */
5796 /* Note: be careful when changing this behaviour!! */
5797
5798 /* The patch pages are no longer marked for self-modifying code detection */
5799 if (pPatch->flags & PATMFL_CODE_MONITORED)
5800 {
5801 rc = patmRemovePatchPages(pVM, pPatch);
5802 AssertRCReturn(rc, rc);
5803 }
5804 pPatch->uState = PATCH_DIRTY;
5805
5806 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5807 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5808
5809 return VINF_SUCCESS;
5810}
5811
5812/**
5813 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5814 *
5815 * @returns VBox status code.
5816 * @param pVM The VM to operate on.
5817 * @param pPatch Patch block structure pointer
5818 * @param pPatchGC GC address in patch block
5819 */
5820RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5821{
5822 Assert(pPatch->Patch2GuestAddrTree);
5823 /* Get the closest record from below. */
5824 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5825 if (pPatchToGuestRec)
5826 return pPatchToGuestRec->pOrgInstrGC;
5827
5828 return 0;
5829}
5830
5831/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5832 *
5833 * @returns corresponding GC pointer in patch block
5834 * @param pVM The VM to operate on.
5835 * @param pPatch Current patch block pointer
5836 * @param pInstrGC Guest context pointer to privileged instruction
5837 *
5838 */
5839RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5840{
5841 if (pPatch->Guest2PatchAddrTree)
5842 {
5843 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
5844 if (pGuestToPatchRec)
5845 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5846 }
5847
5848 return 0;
5849}
5850
5851/* Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no identical match)
5852 *
5853 * @returns corresponding GC pointer in patch block
5854 * @param pVM The VM to operate on.
5855 * @param pPatch Current patch block pointer
5856 * @param pInstrGC Guest context pointer to privileged instruction
5857 *
5858 */
5859RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5860{
5861 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
5862 if (pGuestToPatchRec)
5863 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5864
5865 return 0;
5866}
5867
5868/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5869 *
5870 * @returns corresponding GC pointer in patch block
5871 * @param pVM The VM to operate on.
5872 * @param pInstrGC Guest context pointer to privileged instruction
5873 *
5874 */
5875VMMR3DECL(RTRCPTR) PATMR3GuestGCPtrToPatchGCPtr(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
5876{
5877 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5878 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
5879 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
5880 else
5881 return 0;
5882}
5883
5884/**
5885 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5886 *
5887 * @returns original GC instruction pointer or 0 if not found
5888 * @param pVM The VM to operate on.
5889 * @param pPatchGC GC address in patch block
5890 * @param pEnmState State of the translated address (out)
5891 *
5892 */
5893VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
5894{
5895 PPATMPATCHREC pPatchRec;
5896 void *pvPatchCoreOffset;
5897 RTRCPTR pPrivInstrGC;
5898
5899 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
5900 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5901 if (pvPatchCoreOffset == 0)
5902 {
5903 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
5904 return 0;
5905 }
5906 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
5907 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
5908 if (pEnmState)
5909 {
5910 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
5911 || pPatchRec->patch.uState == PATCH_DIRTY
5912 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
5913 || pPatchRec->patch.uState == PATCH_UNUSABLE),
5914 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
5915
5916 if ( !pPrivInstrGC
5917 || pPatchRec->patch.uState == PATCH_UNUSABLE
5918 || pPatchRec->patch.uState == PATCH_REFUSED)
5919 {
5920 pPrivInstrGC = 0;
5921 *pEnmState = PATMTRANS_FAILED;
5922 }
5923 else
5924 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
5925 {
5926 *pEnmState = PATMTRANS_INHIBITIRQ;
5927 }
5928 else
5929 if ( pPatchRec->patch.uState == PATCH_ENABLED
5930 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
5931 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
5932 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5933 {
5934 *pEnmState = PATMTRANS_OVERWRITTEN;
5935 }
5936 else
5937 if (PATMFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
5938 {
5939 *pEnmState = PATMTRANS_OVERWRITTEN;
5940 }
5941 else
5942 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
5943 {
5944 *pEnmState = PATMTRANS_PATCHSTART;
5945 }
5946 else
5947 *pEnmState = PATMTRANS_SAFE;
5948 }
5949 return pPrivInstrGC;
5950}
5951
5952/**
5953 * Returns the GC pointer of the patch for the specified GC address
5954 *
5955 * @returns VBox status code.
5956 * @param pVM The VM to operate on.
5957 * @param pAddrGC Guest context address
5958 */
5959VMMR3DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
5960{
5961 PPATMPATCHREC pPatchRec;
5962
5963 /* Find the patch record. */
5964 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
5965 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
5966 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
5967 return PATCHCODE_PTR_GC(&pPatchRec->patch);
5968 else
5969 return 0;
5970}
5971
5972/**
5973 * Attempt to recover dirty instructions
5974 *
5975 * @returns VBox status code.
5976 * @param pVM The VM to operate on.
5977 * @param pCtx CPU context
5978 * @param pPatch Patch record
5979 * @param pPatchToGuestRec Patch to guest address record
5980 * @param pEip GC pointer of trapping instruction
5981 */
5982static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
5983{
5984 DISCPUSTATE CpuOld, CpuNew;
5985 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
5986 int rc;
5987 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
5988 uint32_t cbDirty;
5989 PRECPATCHTOGUEST pRec;
5990 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
5991 PVMCPU pVCpu = VMMGetCpu0(pVM);
5992 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
5993
5994 pRec = pPatchToGuestRec;
5995 pCurInstrGC = pOrgInstrGC;
5996 pCurPatchInstrGC = pEip;
5997 cbDirty = 0;
5998 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5999
6000 /* Find all adjacent dirty instructions */
6001 while (true)
6002 {
6003 if (pRec->fJumpTarget)
6004 {
6005 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6006 pRec->fDirty = false;
6007 return VERR_PATCHING_REFUSED;
6008 }
6009
6010 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6011 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6012 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6013
6014 /* Only harmless instructions are acceptable. */
6015 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6016 if ( RT_FAILURE(rc)
6017 || !(CpuOld.pCurInstr->optype & OPTYPE_HARMLESS))
6018 {
6019 if (RT_SUCCESS(rc))
6020 cbDirty += CpuOld.opsize;
6021 else
6022 if (!cbDirty)
6023 cbDirty = 1;
6024 break;
6025 }
6026
6027#ifdef DEBUG
6028 char szBuf[256];
6029 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6030 szBuf, sizeof(szBuf), NULL);
6031 Log(("DIRTY: %s\n", szBuf));
6032#endif
6033 /* Mark as clean; if we fail we'll let it always fault. */
6034 pRec->fDirty = false;
6035
6036 /* Remove old lookup record. */
6037 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6038 pPatchToGuestRec = NULL;
6039
6040 pCurPatchInstrGC += CpuOld.opsize;
6041 cbDirty += CpuOld.opsize;
6042
6043 /* Let's see if there's another dirty instruction right after. */
6044 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6045 if (!pRec || !pRec->fDirty)
6046 break; /* no more dirty instructions */
6047
6048 /* In case of complex instructions the next guest instruction could be quite far off. */
6049 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6050 }
6051
6052 if ( RT_SUCCESS(rc)
6053 && (CpuOld.pCurInstr->optype & OPTYPE_HARMLESS)
6054 )
6055 {
6056 uint32_t cbLeft;
6057
6058 pCurPatchInstrHC = pPatchInstrHC;
6059 pCurPatchInstrGC = pEip;
6060 cbLeft = cbDirty;
6061
6062 while (cbLeft && RT_SUCCESS(rc))
6063 {
6064 bool fValidInstr;
6065
6066 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6067
6068 fValidInstr = !!(CpuNew.pCurInstr->optype & OPTYPE_HARMLESS);
6069 if ( !fValidInstr
6070 && (CpuNew.pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
6071 )
6072 {
6073 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6074
6075 if ( pTargetGC >= pOrgInstrGC
6076 && pTargetGC <= pOrgInstrGC + cbDirty
6077 )
6078 {
6079 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6080 fValidInstr = true;
6081 }
6082 }
6083
6084 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6085 if ( rc == VINF_SUCCESS
6086 && CpuNew.opsize <= cbLeft /* must still fit */
6087 && fValidInstr
6088 )
6089 {
6090#ifdef DEBUG
6091 char szBuf[256];
6092 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6093 szBuf, sizeof(szBuf), NULL);
6094 Log(("NEW: %s\n", szBuf));
6095#endif
6096
6097 /* Copy the new instruction. */
6098 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.opsize);
6099 AssertRC(rc);
6100
6101 /* Add a new lookup record for the duplicated instruction. */
6102 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6103 }
6104 else
6105 {
6106#ifdef DEBUG
6107 char szBuf[256];
6108 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6109 szBuf, sizeof(szBuf), NULL);
6110 Log(("NEW: %s (FAILED)\n", szBuf));
6111#endif
6112 /* Restore the old lookup record for the duplicated instruction. */
6113 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6114
6115 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6116 rc = VERR_PATCHING_REFUSED;
6117 break;
6118 }
6119 pCurInstrGC += CpuNew.opsize;
6120 pCurPatchInstrHC += CpuNew.opsize;
6121 pCurPatchInstrGC += CpuNew.opsize;
6122 cbLeft -= CpuNew.opsize;
6123
6124 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6125 if (!cbLeft)
6126 {
6127 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6128 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6129 {
6130 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6131 if (pRec)
6132 {
6133 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6134 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6135
6136 Assert(!pRec->fDirty);
6137
6138 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6139 if (cbFiller >= SIZEOF_NEARJUMP32)
6140 {
6141 pPatchFillHC[0] = 0xE9;
6142 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6143#ifdef DEBUG
6144 char szBuf[256];
6145 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6146 szBuf, sizeof(szBuf), NULL);
6147 Log(("FILL: %s\n", szBuf));
6148#endif
6149 }
6150 else
6151 {
6152 for (unsigned i = 0; i < cbFiller; i++)
6153 {
6154 pPatchFillHC[i] = 0x90; /* NOP */
6155#ifdef DEBUG
6156 char szBuf[256];
6157 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC + i,
6158 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6159 Log(("FILL: %s\n", szBuf));
6160#endif
6161 }
6162 }
6163 }
6164 }
6165 }
6166 }
6167 }
6168 else
6169 rc = VERR_PATCHING_REFUSED;
6170
6171 if (RT_SUCCESS(rc))
6172 {
6173 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6174 }
6175 else
6176 {
6177 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6178 Assert(cbDirty);
6179
6180 /* Mark the whole instruction stream with breakpoints. */
6181 if (cbDirty)
6182 memset(pPatchInstrHC, 0xCC, cbDirty);
6183
6184 if ( pVM->patm.s.fOutOfMemory == false
6185 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6186 {
6187 rc = patmR3RefreshPatch(pVM, pPatch);
6188 if (RT_FAILURE(rc))
6189 {
6190 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6191 }
6192 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6193 rc = VERR_PATCHING_REFUSED;
6194 }
6195 }
6196 return rc;
6197}
6198
6199/**
6200 * Handle trap inside patch code
6201 *
6202 * @returns VBox status code.
6203 * @param pVM The VM to operate on.
6204 * @param pCtx CPU context
6205 * @param pEip GC pointer of trapping instruction
6206 * @param ppNewEip GC pointer to new instruction
6207 */
6208VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6209{
6210 PPATMPATCHREC pPatch = 0;
6211 void *pvPatchCoreOffset;
6212 RTRCUINTPTR offset;
6213 RTRCPTR pNewEip;
6214 int rc ;
6215 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6216 PVMCPU pVCpu = VMMGetCpu0(pVM);
6217
6218 Assert(pVM->cCpus == 1);
6219
6220 pNewEip = 0;
6221 *ppNewEip = 0;
6222
6223 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6224
6225 /* Find the patch record. */
6226 /* Note: there might not be a patch to guest translation record (global function) */
6227 offset = pEip - pVM->patm.s.pPatchMemGC;
6228 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6229 if (pvPatchCoreOffset)
6230 {
6231 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6232
6233 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6234
6235 if (pPatch->patch.uState == PATCH_DIRTY)
6236 {
6237 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6238 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6239 {
6240 /* Function duplication patches set fPIF to 1 on entry */
6241 pVM->patm.s.pGCStateHC->fPIF = 1;
6242 }
6243 }
6244 else
6245 if (pPatch->patch.uState == PATCH_DISABLED)
6246 {
6247 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6248 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6249 {
6250 /* Function duplication patches set fPIF to 1 on entry */
6251 pVM->patm.s.pGCStateHC->fPIF = 1;
6252 }
6253 }
6254 else
6255 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6256 {
6257 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6258
6259 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6260 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6261 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6262 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6263 }
6264
6265 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6266 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6267
6268 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6269 pPatch->patch.cTraps++;
6270 PATM_STAT_FAULT_INC(&pPatch->patch);
6271 }
6272 else
6273 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6274
6275 /* Check if we were interrupted in PATM generated instruction code. */
6276 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6277 {
6278 DISCPUSTATE Cpu;
6279 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6280 AssertRC(rc);
6281
6282 if ( rc == VINF_SUCCESS
6283 && ( Cpu.pCurInstr->opcode == OP_PUSHF
6284 || Cpu.pCurInstr->opcode == OP_PUSH
6285 || Cpu.pCurInstr->opcode == OP_CALL)
6286 )
6287 {
6288 uint64_t fFlags;
6289
6290 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6291
6292 if (Cpu.pCurInstr->opcode == OP_PUSH)
6293 {
6294 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6295 if ( rc == VINF_SUCCESS
6296 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6297 {
6298 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6299
6300 /* Reset the PATM stack. */
6301 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6302
6303 pVM->patm.s.pGCStateHC->fPIF = 1;
6304
6305 Log(("Faulting push -> go back to the original instruction\n"));
6306
6307 /* continue at the original instruction */
6308 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6309 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6310 return VINF_SUCCESS;
6311 }
6312 }
6313
6314 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6315 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6316 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6317 if (rc == VINF_SUCCESS)
6318 {
6319 /* The guest page *must* be present. */
6320 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6321 if ( rc == VINF_SUCCESS
6322 && (fFlags & X86_PTE_P))
6323 {
6324 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6325 return VINF_PATCH_CONTINUE;
6326 }
6327 }
6328 }
6329 else
6330 if (pPatch->patch.pPrivInstrGC == pNewEip)
6331 {
6332 /* Invalidated patch or first instruction overwritten.
6333 * We can ignore the fPIF state in this case.
6334 */
6335 /* Reset the PATM stack. */
6336 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6337
6338 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6339
6340 pVM->patm.s.pGCStateHC->fPIF = 1;
6341
6342 /* continue at the original instruction */
6343 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6344 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6345 return VINF_SUCCESS;
6346 }
6347
6348 char szBuf[256];
6349 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6350
6351 /* Very bad. We crashed in emitted code. Probably stack? */
6352 if (pPatch)
6353 {
6354 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6355 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6356 }
6357 else
6358 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6359 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6360 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6361 }
6362
6363 /* From here on, we must have a valid patch to guest translation. */
6364 if (pvPatchCoreOffset == 0)
6365 {
6366 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6367 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6368 return VERR_PATCH_NOT_FOUND;
6369 }
6370
6371 /* Take care of dirty/changed instructions. */
6372 if (pPatchToGuestRec->fDirty)
6373 {
6374 Assert(pPatchToGuestRec->Core.Key == offset);
6375 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6376
6377 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6378 if (RT_SUCCESS(rc))
6379 {
6380 /* Retry the current instruction. */
6381 pNewEip = pEip;
6382 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6383 }
6384 else
6385 {
6386 /* Reset the PATM stack. */
6387 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6388
6389 rc = VINF_SUCCESS; /* Continue at original instruction. */
6390 }
6391
6392 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6393 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6394 return rc;
6395 }
6396
6397#ifdef VBOX_STRICT
6398 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6399 {
6400 DISCPUSTATE cpu;
6401 bool disret;
6402 uint32_t opsize;
6403 PATMP2GLOOKUPREC cacheRec;
6404 RT_ZERO(cacheRec);
6405 cacheRec.pPatch = &pPatch->patch;
6406
6407 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6408 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6409 if (cacheRec.Lock.pvMap)
6410 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6411
6412 if (disret && cpu.pCurInstr->opcode == OP_RETN)
6413 {
6414 RTRCPTR retaddr;
6415 PCPUMCTX pCtx2;
6416
6417 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6418
6419 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6420 AssertRC(rc);
6421
6422 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6423 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6424 }
6425 }
6426#endif
6427
6428 /* Return original address, correct by subtracting the CS base address. */
6429 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6430
6431 /* Reset the PATM stack. */
6432 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6433
6434 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6435 {
6436 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6437 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6438#ifdef VBOX_STRICT
6439 DISCPUSTATE cpu;
6440 bool disret;
6441 uint32_t opsize;
6442 PATMP2GLOOKUPREC cacheRec;
6443 RT_ZERO(cacheRec);
6444 cacheRec.pPatch = &pPatch->patch;
6445
6446 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6447 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_ORGCODE);
6448 if (cacheRec.Lock.pvMap)
6449 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6450
6451 if (disret && (cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_INT3))
6452 {
6453 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6454 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6455 if (cacheRec.Lock.pvMap)
6456 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6457
6458 Assert(cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_IRET);
6459 }
6460#endif
6461 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6462 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6463 }
6464
6465 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6466#ifdef LOG_ENABLED
6467 CPUMR3DisasmInstr(pVM, pVCpu, pCtx, pNewEip, "PATCHRET: ");
6468#endif
6469 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6470 {
6471 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6472 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6473 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6474 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6475 return VERR_PATCH_DISABLED;
6476 }
6477
6478#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6479 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6480 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6481 {
6482 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6483 //we are only wasting time, back out the patch
6484 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6485 pTrapRec->pNextPatchInstr = 0;
6486 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6487 return VERR_PATCH_DISABLED;
6488 }
6489#endif
6490
6491 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6492 return VINF_SUCCESS;
6493}
6494
6495
6496/**
6497 * Handle page-fault in monitored page
6498 *
6499 * @returns VBox status code.
6500 * @param pVM The VM to operate on.
6501 */
6502VMMR3DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6503{
6504 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6505
6506 addr &= PAGE_BASE_GC_MASK;
6507
6508 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6509 AssertRC(rc); NOREF(rc);
6510
6511 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6512 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6513 {
6514 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6515 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6516 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6517 if (rc == VWRN_PATCH_REMOVED)
6518 return VINF_SUCCESS;
6519
6520 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6521
6522 if (addr == pPatchRec->patch.pPrivInstrGC)
6523 addr++;
6524 }
6525
6526 for(;;)
6527 {
6528 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6529
6530 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6531 break;
6532
6533 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6534 {
6535 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6536 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6537 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6538 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6539 }
6540 addr = pPatchRec->patch.pPrivInstrGC + 1;
6541 }
6542
6543 pVM->patm.s.pvFaultMonitor = 0;
6544 return VINF_SUCCESS;
6545}
6546
6547
6548#ifdef VBOX_WITH_STATISTICS
6549
6550static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6551{
6552 if (pPatch->flags & PATMFL_SYSENTER)
6553 {
6554 return "SYSENT";
6555 }
6556 else
6557 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6558 {
6559 static char szTrap[16];
6560 uint32_t iGate;
6561
6562 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6563 if (iGate < 256)
6564 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6565 else
6566 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6567 return szTrap;
6568 }
6569 else
6570 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6571 return "DUPFUNC";
6572 else
6573 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6574 return "FUNCCALL";
6575 else
6576 if (pPatch->flags & PATMFL_TRAMPOLINE)
6577 return "TRAMP";
6578 else
6579 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6580}
6581
6582static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6583{
6584 switch(pPatch->uState)
6585 {
6586 case PATCH_ENABLED:
6587 return "ENA";
6588 case PATCH_DISABLED:
6589 return "DIS";
6590 case PATCH_DIRTY:
6591 return "DIR";
6592 case PATCH_UNUSABLE:
6593 return "UNU";
6594 case PATCH_REFUSED:
6595 return "REF";
6596 case PATCH_DISABLE_PENDING:
6597 return "DIP";
6598 default:
6599 AssertFailed();
6600 return " ";
6601 }
6602}
6603
6604/**
6605 * Resets the sample.
6606 * @param pVM The VM handle.
6607 * @param pvSample The sample registered using STAMR3RegisterCallback.
6608 */
6609static void patmResetStat(PVM pVM, void *pvSample)
6610{
6611 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6612 Assert(pPatch);
6613
6614 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6615 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6616}
6617
6618/**
6619 * Prints the sample into the buffer.
6620 *
6621 * @param pVM The VM handle.
6622 * @param pvSample The sample registered using STAMR3RegisterCallback.
6623 * @param pszBuf The buffer to print into.
6624 * @param cchBuf The size of the buffer.
6625 */
6626static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6627{
6628 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6629 Assert(pPatch);
6630
6631 Assert(pPatch->uState != PATCH_REFUSED);
6632 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6633
6634 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6635 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6636 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6637}
6638
6639/**
6640 * Returns the GC address of the corresponding patch statistics counter
6641 *
6642 * @returns Stat address
6643 * @param pVM The VM to operate on.
6644 * @param pPatch Patch structure
6645 */
6646RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6647{
6648 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6649 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6650}
6651
6652#endif /* VBOX_WITH_STATISTICS */
6653
6654#ifdef VBOX_WITH_DEBUGGER
6655/**
6656 * The '.patmoff' command.
6657 *
6658 * @returns VBox status.
6659 * @param pCmd Pointer to the command descriptor (as registered).
6660 * @param pCmdHlp Pointer to command helper functions.
6661 * @param pVM Pointer to the current VM (if any).
6662 * @param paArgs Pointer to (readonly) array of arguments.
6663 * @param cArgs Number of arguments in the array.
6664 */
6665static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
6666{
6667 /*
6668 * Validate input.
6669 */
6670 if (!pVM)
6671 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6672
6673 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6674 PATMR3AllowPatching(pVM, false);
6675 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6676}
6677
6678/**
6679 * The '.patmon' command.
6680 *
6681 * @returns VBox status.
6682 * @param pCmd Pointer to the command descriptor (as registered).
6683 * @param pCmdHlp Pointer to command helper functions.
6684 * @param pVM Pointer to the current VM (if any).
6685 * @param paArgs Pointer to (readonly) array of arguments.
6686 * @param cArgs Number of arguments in the array.
6687 */
6688static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
6689{
6690 /*
6691 * Validate input.
6692 */
6693 if (!pVM)
6694 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6695
6696 PATMR3AllowPatching(pVM, true);
6697 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6698 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6699}
6700#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette