VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/PATM.cpp@ 28134

Last change on this file since 28134 was 26263, checked in by vboxsync, 15 years ago

VMM: More warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 245.4 KB
Line 
1/* $Id: PATM.cpp 26263 2010-02-05 02:24:13Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * NOTE: Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20 * Clara, CA 95054 USA or visit http://www.sun.com if you need
21 * additional information or have any questions.
22 */
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_PATM
28#include <VBox/patm.h>
29#include <VBox/stam.h>
30#include <VBox/pgm.h>
31#include <VBox/cpum.h>
32#include <VBox/cpumdis.h>
33#include <VBox/iom.h>
34#include <VBox/mm.h>
35#include <VBox/ssm.h>
36#include <VBox/trpm.h>
37#include <VBox/cfgm.h>
38#include <VBox/param.h>
39#include <VBox/selm.h>
40#include <iprt/avl.h>
41#include "PATMInternal.h"
42#include "PATMPatch.h"
43#include <VBox/vm.h>
44#include <VBox/csam.h>
45
46#include <VBox/dbg.h>
47#include <VBox/err.h>
48#include <VBox/log.h>
49#include <iprt/assert.h>
50#include <iprt/asm.h>
51#include <VBox/dis.h>
52#include <VBox/disopcode.h>
53
54#include <iprt/string.h>
55#include "PATMA.h"
56
57//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
58//#define PATM_DISABLE_ALL
59
60/*******************************************************************************
61* Internal Functions *
62*******************************************************************************/
63
64static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
65static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
66static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
67
68#ifdef LOG_ENABLED // keep gcc quiet
69static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
70#endif
71#ifdef VBOX_WITH_STATISTICS
72static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
73static void patmResetStat(PVM pVM, void *pvSample);
74static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
75#endif
76
77#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
78#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
79
80static int patmReinit(PVM pVM);
81static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
82
83#ifdef VBOX_WITH_DEBUGGER
84static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
85static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
86static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
87
88/** Command descriptors. */
89static const DBGCCMD g_aCmds[] =
90{
91 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, pResultDesc, fFlags, pfnHandler pszSyntax, ....pszDescription */
92 { "patmon", 0, 0, NULL, 0, NULL, 0, patmr3CmdOn, "", "Enable patching." },
93 { "patmoff", 0, 0, NULL, 0, NULL, 0, patmr3CmdOff, "", "Disable patching." },
94};
95#endif
96
97/* Don't want to break saved states, so put it here as a global variable. */
98static unsigned int cIDTHandlersDisabled = 0;
99
100/**
101 * Initializes the PATM.
102 *
103 * @returns VBox status code.
104 * @param pVM The VM to operate on.
105 */
106VMMR3DECL(int) PATMR3Init(PVM pVM)
107{
108 int rc;
109
110 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
111
112 /* These values can't change as they are hardcoded in patch code (old saved states!) */
113 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
114 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
115 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
116 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
117
118 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
119 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
120
121 /* Allocate patch memory and GC patch state memory. */
122 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
123 /* Add another page in case the generated code is much larger than expected. */
124 /** @todo bad safety precaution */
125 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
126 if (RT_FAILURE(rc))
127 {
128 Log(("MMHyperAlloc failed with %Rrc\n", rc));
129 return rc;
130 }
131 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
132
133 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
134 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
135 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
136
137 /*
138 * Hypervisor memory for GC status data (read/write)
139 *
140 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
141 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
142 *
143 */
144 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /** @note hardcoded dependencies on this exist. */
145 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
146 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
147
148 /* Hypervisor memory for patch statistics */
149 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
150 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
151
152 /* Memory for patch lookup trees. */
153 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
154 AssertRCReturn(rc, rc);
155 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
156
157#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
158 /* Check CFGM option. */
159 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
160 if (RT_FAILURE(rc))
161# ifdef PATM_DISABLE_ALL
162 pVM->fPATMEnabled = false;
163# else
164 pVM->fPATMEnabled = true;
165# endif
166#endif
167
168 rc = patmReinit(pVM);
169 AssertRC(rc);
170 if (RT_FAILURE(rc))
171 return rc;
172
173 /*
174 * Register save and load state notificators.
175 */
176 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
177 NULL, NULL, NULL,
178 NULL, patmR3Save, NULL,
179 NULL, patmR3Load, NULL);
180 AssertRCReturn(rc, rc);
181
182#ifdef VBOX_WITH_DEBUGGER
183 /*
184 * Debugger commands.
185 */
186 static bool s_fRegisteredCmds = false;
187 if (!s_fRegisteredCmds)
188 {
189 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
190 if (RT_SUCCESS(rc2))
191 s_fRegisteredCmds = true;
192 }
193#endif
194
195#ifdef VBOX_WITH_STATISTICS
196 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
197 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
198 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
199 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
200 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
201 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
202 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
203 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
204
205 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
206 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
207
208 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
209 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
210 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
211
212 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
213 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
214 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
215 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
216 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
217
218 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
219 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
220
221 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
222 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
223
224 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
225 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
226 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
227
228 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
229 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
230 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
231
232 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
233 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
234
235 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
236 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
237 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
238 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
239
240 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
241 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
242
243 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
244 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
245
246 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
247 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
248 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
249
250 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
251 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
252 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
253 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
254
255 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
256 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
257 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
258 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
259 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
260
261 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
262#endif /* VBOX_WITH_STATISTICS */
263
264 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
265 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
266 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
267 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
268 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
269 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
270 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
271 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
272
273 return rc;
274}
275
276/**
277 * Finalizes HMA page attributes.
278 *
279 * @returns VBox status code.
280 * @param pVM The VM handle.
281 */
282VMMR3DECL(int) PATMR3InitFinalize(PVM pVM)
283{
284 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
285 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
286 if (RT_FAILURE(rc))
287 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
288
289 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
290 if (RT_FAILURE(rc))
291 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
292
293 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
294 if (RT_FAILURE(rc))
295 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
296
297 return rc;
298}
299
300/**
301 * (Re)initializes PATM
302 *
303 * @param pVM The VM.
304 */
305static int patmReinit(PVM pVM)
306{
307 int rc;
308
309 /*
310 * Assert alignment and sizes.
311 */
312 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
313 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
314
315 /*
316 * Setup any fixed pointers and offsets.
317 */
318 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
319
320#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
321#ifndef PATM_DISABLE_ALL
322 pVM->fPATMEnabled = true;
323#endif
324#endif
325
326 Assert(pVM->patm.s.pGCStateHC);
327 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
328 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
329
330 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
331 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
332
333 Assert(pVM->patm.s.pGCStackHC);
334 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
335 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
336 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
337 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
338
339 Assert(pVM->patm.s.pStatsHC);
340 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
341 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
342
343 Assert(pVM->patm.s.pPatchMemHC);
344 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
345 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
346 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
347
348 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
349 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
350
351 Assert(pVM->patm.s.PatchLookupTreeHC);
352 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
353
354 /*
355 * (Re)Initialize PATM structure
356 */
357 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
358 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
359 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
360 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
361 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
362 pVM->patm.s.pvFaultMonitor = 0;
363 pVM->patm.s.deltaReloc = 0;
364
365 /* Lowest and highest patched instruction */
366 pVM->patm.s.pPatchedInstrGCLowest = ~0;
367 pVM->patm.s.pPatchedInstrGCHighest = 0;
368
369 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
370 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
371 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
372
373 pVM->patm.s.pfnSysEnterPatchGC = 0;
374 pVM->patm.s.pfnSysEnterGC = 0;
375
376 pVM->patm.s.fOutOfMemory = false;
377
378 pVM->patm.s.pfnHelperCallGC = 0;
379
380 /* Generate all global functions to be used by future patches. */
381 /* We generate a fake patch in order to use the existing code for relocation. */
382 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
383 if (RT_FAILURE(rc))
384 {
385 Log(("Out of memory!!!!\n"));
386 return VERR_NO_MEMORY;
387 }
388 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
389 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
390 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
391
392 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
393 AssertRC(rc);
394
395 /* Update free pointer in patch memory. */
396 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
397 /* Round to next 8 byte boundary. */
398 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
399 return rc;
400}
401
402
403/**
404 * Applies relocations to data and code managed by this
405 * component. This function will be called at init and
406 * whenever the VMM need to relocate it self inside the GC.
407 *
408 * The PATM will update the addresses used by the switcher.
409 *
410 * @param pVM The VM.
411 */
412VMMR3DECL(void) PATMR3Relocate(PVM pVM)
413{
414 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
415 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
416
417 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
418 if (delta)
419 {
420 PCPUMCTX pCtx;
421
422 /* Update CPUMCTX guest context pointer. */
423 pVM->patm.s.pCPUMCtxGC += delta;
424
425 pVM->patm.s.deltaReloc = delta;
426
427 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
428
429 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
430
431 /* If we are running patch code right now, then also adjust EIP. */
432 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
433 pCtx->eip += delta;
434
435 pVM->patm.s.pGCStateGC = GCPtrNew;
436 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
437
438 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
439
440 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
441
442 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
443
444 if (pVM->patm.s.pfnSysEnterPatchGC)
445 pVM->patm.s.pfnSysEnterPatchGC += delta;
446
447 /* Deal with the global patch functions. */
448 pVM->patm.s.pfnHelperCallGC += delta;
449 pVM->patm.s.pfnHelperRetGC += delta;
450 pVM->patm.s.pfnHelperIretGC += delta;
451 pVM->patm.s.pfnHelperJumpGC += delta;
452
453 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
454 }
455}
456
457
458/**
459 * Terminates the PATM.
460 *
461 * Termination means cleaning up and freeing all resources,
462 * the VM it self is at this point powered off or suspended.
463 *
464 * @returns VBox status code.
465 * @param pVM The VM to operate on.
466 */
467VMMR3DECL(int) PATMR3Term(PVM pVM)
468{
469 /* Memory was all allocated from the two MM heaps and requires no freeing. */
470 return VINF_SUCCESS;
471}
472
473
474/**
475 * PATM reset callback.
476 *
477 * @returns VBox status code.
478 * @param pVM The VM which is reset.
479 */
480VMMR3DECL(int) PATMR3Reset(PVM pVM)
481{
482 Log(("PATMR3Reset\n"));
483
484 /* Free all patches. */
485 while (true)
486 {
487 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
488 if (pPatchRec)
489 {
490 PATMRemovePatch(pVM, pPatchRec, true);
491 }
492 else
493 break;
494 }
495 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
496 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
497 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
498 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
499
500 int rc = patmReinit(pVM);
501 if (RT_SUCCESS(rc))
502 rc = PATMR3InitFinalize(pVM); /* paranoia */
503
504 return rc;
505}
506
507/**
508 * Read callback for disassembly function; supports reading bytes that cross a page boundary
509 *
510 * @returns VBox status code.
511 * @param pSrc GC source pointer
512 * @param pDest HC destination pointer
513 * @param size Number of bytes to read
514 * @param pvUserdata Callback specific user data (pCpu)
515 *
516 */
517int patmReadBytes(RTUINTPTR pSrc, uint8_t *pDest, unsigned size, void *pvUserdata)
518{
519 DISCPUSTATE *pCpu = (DISCPUSTATE *)pvUserdata;
520 PATMDISASM *pDisInfo = (PATMDISASM *)pCpu->apvUserData[0];
521 int orgsize = size;
522
523 Assert(size);
524 if (size == 0)
525 return VERR_INVALID_PARAMETER;
526
527 /*
528 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
529 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
530 */
531 /** @todo could change in the future! */
532 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
533 {
534 for (int i=0;i<orgsize;i++)
535 {
536 int rc = PATMR3QueryOpcode(pDisInfo->pVM, (RTRCPTR)pSrc, pDest);
537 if (RT_SUCCESS(rc))
538 {
539 pSrc++;
540 pDest++;
541 size--;
542 }
543 else break;
544 }
545 if (size == 0)
546 return VINF_SUCCESS;
547#ifdef VBOX_STRICT
548 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
549 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
550 {
551 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc, NULL) == false);
552 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc+size-1, NULL) == false);
553 }
554#endif
555 }
556
557
558 if (PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(pSrc + size - 1) && !PATMIsPatchGCAddr(pDisInfo->pVM, pSrc))
559 {
560 return PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], pDest, pSrc, size);
561 }
562 else
563 {
564 uint8_t *pInstrHC = pDisInfo->pInstrHC;
565
566 Assert(pInstrHC);
567
568 /* pInstrHC is the base address; adjust according to the GC pointer. */
569 pInstrHC = pInstrHC + (pSrc - pDisInfo->pInstrGC);
570
571 memcpy(pDest, (void *)pInstrHC, size);
572 }
573
574 return VINF_SUCCESS;
575}
576
577/**
578 * Callback function for RTAvloU32DoWithAll
579 *
580 * Updates all fixups in the patches
581 *
582 * @returns VBox status code.
583 * @param pNode Current node
584 * @param pParam The VM to operate on.
585 */
586static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
587{
588 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
589 PVM pVM = (PVM)pParam;
590 RTRCINTPTR delta;
591#ifdef LOG_ENABLED
592 DISCPUSTATE cpu;
593 char szOutput[256];
594 uint32_t opsize;
595 bool disret;
596#endif
597 int rc;
598
599 /* Nothing to do if the patch is not active. */
600 if (pPatch->patch.uState == PATCH_REFUSED)
601 return 0;
602
603#ifdef LOG_ENABLED
604 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
605 {
606 /** @note pPrivInstrHC is probably not valid anymore */
607 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatch->patch.pPrivInstrGC, (PRTR3PTR)&pPatch->patch.pPrivInstrHC);
608 if (rc == VINF_SUCCESS)
609 {
610 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
611 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, pPatch->patch.pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
612 Log(("Org patch jump: %s", szOutput));
613 }
614 }
615#endif
616
617 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
618 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
619
620 /*
621 * Apply fixups
622 */
623 PRELOCREC pRec = 0;
624 AVLPVKEY key = 0;
625
626 while (true)
627 {
628 /* Get the record that's closest from above */
629 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
630 if (pRec == 0)
631 break;
632
633 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
634
635 switch (pRec->uType)
636 {
637 case FIXUP_ABSOLUTE:
638 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
639 if (!pRec->pSource || PATMIsPatchGCAddr(pVM, pRec->pSource))
640 {
641 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
642 }
643 else
644 {
645 uint8_t curInstr[15];
646 uint8_t oldInstr[15];
647 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
648
649 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
650
651 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
652 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
653
654 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
655 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
656
657 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
658
659 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
660 {
661 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
662
663 Log(("PATM: Patch page not present -> check later!\n"));
664 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
665 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
666 }
667 else
668 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
669 {
670 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
671 /*
672 * Disable patch; this is not a good solution
673 */
674 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
675 pPatch->patch.uState = PATCH_DISABLED;
676 }
677 else
678 if (RT_SUCCESS(rc))
679 {
680 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
681 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
682 AssertRC(rc);
683 }
684 }
685 break;
686
687 case FIXUP_REL_JMPTOPATCH:
688 {
689 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
690
691 if ( pPatch->patch.uState == PATCH_ENABLED
692 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
693 {
694 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
695 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
696 RTRCPTR pJumpOffGC;
697 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
698 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
699
700 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
701
702 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
703#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
704 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
705 {
706 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
707
708 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
709 oldJump[0] = pPatch->patch.aPrivInstr[0];
710 oldJump[1] = pPatch->patch.aPrivInstr[1];
711 *(RTRCUINTPTR *)&oldJump[2] = displOld;
712 }
713 else
714#endif
715 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
716 {
717 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
718 oldJump[0] = 0xE9;
719 *(RTRCUINTPTR *)&oldJump[1] = displOld;
720 }
721 else
722 {
723 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
724 continue; //this should never happen!!
725 }
726 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
727
728 /*
729 * Read old patch jump and compare it to the one we previously installed
730 */
731 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
732 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
733
734 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
735 {
736 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
737
738 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
739 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
740 }
741 else
742 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
743 {
744 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
745 /*
746 * Disable patch; this is not a good solution
747 */
748 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
749 pPatch->patch.uState = PATCH_DISABLED;
750 }
751 else
752 if (RT_SUCCESS(rc))
753 {
754 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
755 AssertRC(rc);
756 }
757 else
758 {
759 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
760 }
761 }
762 else
763 {
764 Log(("Skip the guest jump to patch code for this disabled patch %08X - %08X\n", pPatch->patch.pPrivInstrHC, pRec->pRelocPos));
765 }
766
767 pRec->pDest = pTarget;
768 break;
769 }
770
771 case FIXUP_REL_JMPTOGUEST:
772 {
773 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
774 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
775
776 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
777 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
778 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
779 pRec->pSource = pSource;
780 break;
781 }
782
783 default:
784 AssertMsg(0, ("Invalid fixup type!!\n"));
785 return VERR_INVALID_PARAMETER;
786 }
787 }
788
789#ifdef LOG_ENABLED
790 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
791 {
792 /** @note pPrivInstrHC is probably not valid anymore */
793 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatch->patch.pPrivInstrGC, (PRTR3PTR)&pPatch->patch.pPrivInstrHC);
794 if (rc == VINF_SUCCESS)
795 {
796 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
797 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, pPatch->patch.pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
798 Log(("Rel patch jump: %s", szOutput));
799 }
800 }
801#endif
802 return 0;
803}
804
805/**
806 * \#PF Handler callback for virtual access handler ranges.
807 *
808 * Important to realize that a physical page in a range can have aliases, and
809 * for ALL and WRITE handlers these will also trigger.
810 *
811 * @returns VINF_SUCCESS if the handler have carried out the operation.
812 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
813 * @param pVM VM Handle.
814 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
815 * @param pvPtr The HC mapping of that address.
816 * @param pvBuf What the guest is reading/writing.
817 * @param cbBuf How much it's reading/writing.
818 * @param enmAccessType The access type.
819 * @param pvUser User argument.
820 */
821DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
822{
823 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
824 /** @todo could be the wrong virtual address (alias) */
825 pVM->patm.s.pvFaultMonitor = GCPtr;
826 PATMR3HandleMonitoredPage(pVM);
827 return VINF_PGM_HANDLER_DO_DEFAULT;
828}
829
830
831#ifdef VBOX_WITH_DEBUGGER
832/**
833 * Callback function for RTAvloU32DoWithAll
834 *
835 * Enables the patch that's being enumerated
836 *
837 * @returns 0 (continue enumeration).
838 * @param pNode Current node
839 * @param pVM The VM to operate on.
840 */
841static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
842{
843 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
844
845 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
846 return 0;
847}
848#endif /* VBOX_WITH_DEBUGGER */
849
850
851#ifdef VBOX_WITH_DEBUGGER
852/**
853 * Callback function for RTAvloU32DoWithAll
854 *
855 * Disables the patch that's being enumerated
856 *
857 * @returns 0 (continue enumeration).
858 * @param pNode Current node
859 * @param pVM The VM to operate on.
860 */
861static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
862{
863 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
864
865 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
866 return 0;
867}
868#endif
869
870/**
871 * Returns the host context pointer and size of the patch memory block
872 *
873 * @returns VBox status code.
874 * @param pVM The VM to operate on.
875 * @param pcb Size of the patch memory block
876 */
877VMMR3DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
878{
879 if (pcb)
880 {
881 *pcb = pVM->patm.s.cbPatchMem;
882 }
883 return pVM->patm.s.pPatchMemHC;
884}
885
886
887/**
888 * Returns the guest context pointer and size of the patch memory block
889 *
890 * @returns VBox status code.
891 * @param pVM The VM to operate on.
892 * @param pcb Size of the patch memory block
893 */
894VMMR3DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
895{
896 if (pcb)
897 {
898 *pcb = pVM->patm.s.cbPatchMem;
899 }
900 return pVM->patm.s.pPatchMemGC;
901}
902
903
904/**
905 * Returns the host context pointer of the GC context structure
906 *
907 * @returns VBox status code.
908 * @param pVM The VM to operate on.
909 */
910VMMR3DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
911{
912 return pVM->patm.s.pGCStateHC;
913}
914
915
916/**
917 * Checks whether the HC address is part of our patch region
918 *
919 * @returns VBox status code.
920 * @param pVM The VM to operate on.
921 * @param pAddrGC Guest context address
922 */
923VMMR3DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, R3PTRTYPE(uint8_t *) pAddrHC)
924{
925 return (pAddrHC >= pVM->patm.s.pPatchMemHC && pAddrHC < pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) ? true : false;
926}
927
928
929/**
930 * Allows or disallow patching of privileged instructions executed by the guest OS
931 *
932 * @returns VBox status code.
933 * @param pVM The VM to operate on.
934 * @param fAllowPatching Allow/disallow patching
935 */
936VMMR3DECL(int) PATMR3AllowPatching(PVM pVM, uint32_t fAllowPatching)
937{
938 pVM->fPATMEnabled = (fAllowPatching) ? true : false;
939 return VINF_SUCCESS;
940}
941
942/**
943 * Convert a GC patch block pointer to a HC patch pointer
944 *
945 * @returns HC pointer or NULL if it's not a GC patch pointer
946 * @param pVM The VM to operate on.
947 * @param pAddrGC GC pointer
948 */
949VMMR3DECL(R3PTRTYPE(void *)) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
950{
951 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
952 {
953 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
954 }
955 return NULL;
956}
957
958/**
959 * Query PATM state (enabled/disabled)
960 *
961 * @returns 0 - disabled, 1 - enabled
962 * @param pVM The VM to operate on.
963 */
964VMMR3DECL(int) PATMR3IsEnabled(PVM pVM)
965{
966 return pVM->fPATMEnabled;
967}
968
969
970/**
971 * Convert guest context address to host context pointer
972 *
973 * @returns VBox status code.
974 * @param pVM The VM to operate on.
975 * @param pPatch Patch block structure pointer
976 * @param pGCPtr Guest context pointer
977 *
978 * @returns Host context pointer or NULL in case of an error
979 *
980 */
981R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pGCPtr)
982{
983 int rc;
984 R3PTRTYPE(uint8_t *) pHCPtr;
985 uint32_t offset;
986
987 if (PATMIsPatchGCAddr(pVM, pGCPtr))
988 {
989 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
990 }
991
992 offset = pGCPtr & PAGE_OFFSET_MASK;
993 if (pPatch->cacheRec.pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
994 {
995 return pPatch->cacheRec.pPatchLocStartHC + offset;
996 }
997
998 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pGCPtr, (void **)&pHCPtr);
999 if (rc != VINF_SUCCESS)
1000 {
1001 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1002 return NULL;
1003 }
1004////invalid? Assert(sizeof(R3PTRTYPE(uint8_t*)) == sizeof(uint32_t));
1005
1006 pPatch->cacheRec.pPatchLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1007 pPatch->cacheRec.pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1008 return pHCPtr;
1009}
1010
1011
1012/* Calculates and fills in all branch targets
1013 *
1014 * @returns VBox status code.
1015 * @param pVM The VM to operate on.
1016 * @param pPatch Current patch block pointer
1017 *
1018 */
1019static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1020{
1021 int32_t displ;
1022
1023 PJUMPREC pRec = 0;
1024 int nrJumpRecs = 0;
1025
1026 /*
1027 * Set all branch targets inside the patch block.
1028 * We remove all jump records as they are no longer needed afterwards.
1029 */
1030 while (true)
1031 {
1032 RCPTRTYPE(uint8_t *) pInstrGC;
1033 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1034
1035 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1036 if (pRec == 0)
1037 break;
1038
1039 nrJumpRecs++;
1040
1041 /* HC in patch block to GC in patch block. */
1042 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1043
1044 if (pRec->opcode == OP_CALL)
1045 {
1046 /* Special case: call function replacement patch from this patch block.
1047 */
1048 PPATMPATCHREC pFunctionRec = PATMQueryFunctionPatch(pVM, pRec->pTargetGC);
1049 if (!pFunctionRec)
1050 {
1051 int rc;
1052
1053 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1054 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1055 else
1056 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1057
1058 if (RT_FAILURE(rc))
1059 {
1060 uint8_t *pPatchHC;
1061 RTRCPTR pPatchGC;
1062 RTRCPTR pOrgInstrGC;
1063
1064 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1065 Assert(pOrgInstrGC);
1066
1067 /* Failure for some reason -> mark exit point with int 3. */
1068 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1069
1070 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1071 Assert(pPatchGC);
1072
1073 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1074
1075 /* Set a breakpoint at the very beginning of the recompiled instruction */
1076 *pPatchHC = 0xCC;
1077
1078 continue;
1079 }
1080 }
1081 else
1082 {
1083 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1084 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1085 }
1086
1087 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1088 }
1089 else
1090 {
1091 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1092 }
1093
1094 if (pBranchTargetGC == 0)
1095 {
1096 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1097 return VERR_PATCHING_REFUSED;
1098 }
1099 /* Our jumps *always* have a dword displacement (to make things easier). */
1100 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1101 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1102 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1103 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1104 }
1105 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1106 Assert(pPatch->JumpTree == 0);
1107 return VINF_SUCCESS;
1108}
1109
1110/* Add an illegal instruction record
1111 *
1112 * @param pVM The VM to operate on.
1113 * @param pPatch Patch structure ptr
1114 * @param pInstrGC Guest context pointer to privileged instruction
1115 *
1116 */
1117static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1118{
1119 PAVLPVNODECORE pRec;
1120
1121 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1122 Assert(pRec);
1123 pRec->Key = (AVLPVKEY)pInstrGC;
1124
1125 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1126 Assert(ret); NOREF(ret);
1127 pPatch->pTempInfo->nrIllegalInstr++;
1128}
1129
1130static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1131{
1132 PAVLPVNODECORE pRec;
1133
1134 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)pInstrGC);
1135 if (pRec)
1136 return true;
1137 return false;
1138}
1139
1140/**
1141 * Add a patch to guest lookup record
1142 *
1143 * @param pVM The VM to operate on.
1144 * @param pPatch Patch structure ptr
1145 * @param pPatchInstrHC Guest context pointer to patch block
1146 * @param pInstrGC Guest context pointer to privileged instruction
1147 * @param enmType Lookup type
1148 * @param fDirty Dirty flag
1149 *
1150 */
1151 /** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
1152void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1153{
1154 bool ret;
1155 PRECPATCHTOGUEST pPatchToGuestRec;
1156 PRECGUESTTOPATCH pGuestToPatchRec;
1157 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1158
1159 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1160 {
1161 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1162 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1163 return; /* already there */
1164
1165 Assert(!pPatchToGuestRec);
1166 }
1167#ifdef VBOX_STRICT
1168 else
1169 {
1170 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1171 Assert(!pPatchToGuestRec);
1172 }
1173#endif
1174
1175 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1176 Assert(pPatchToGuestRec);
1177 pPatchToGuestRec->Core.Key = PatchOffset;
1178 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1179 pPatchToGuestRec->enmType = enmType;
1180 pPatchToGuestRec->fDirty = fDirty;
1181
1182 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1183 Assert(ret);
1184
1185 /* GC to patch address */
1186 if (enmType == PATM_LOOKUP_BOTHDIR)
1187 {
1188 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1189 if (!pGuestToPatchRec)
1190 {
1191 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1192 pGuestToPatchRec->Core.Key = pInstrGC;
1193 pGuestToPatchRec->PatchOffset = PatchOffset;
1194
1195 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1196 Assert(ret);
1197 }
1198 }
1199
1200 pPatch->nrPatch2GuestRecs++;
1201}
1202
1203
1204/**
1205 * Removes a patch to guest lookup record
1206 *
1207 * @param pVM The VM to operate on.
1208 * @param pPatch Patch structure ptr
1209 * @param pPatchInstrGC Guest context pointer to patch block
1210 */
1211void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1212{
1213 PAVLU32NODECORE pNode;
1214 PAVLU32NODECORE pNode2;
1215 PRECPATCHTOGUEST pPatchToGuestRec;
1216 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1217
1218 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1219 Assert(pPatchToGuestRec);
1220 if (pPatchToGuestRec)
1221 {
1222 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1223 {
1224 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1225
1226 Assert(pGuestToPatchRec->Core.Key);
1227 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1228 Assert(pNode2);
1229 }
1230 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1231 Assert(pNode);
1232
1233 MMR3HeapFree(pPatchToGuestRec);
1234 pPatch->nrPatch2GuestRecs--;
1235 }
1236}
1237
1238
1239/**
1240 * RTAvlPVDestroy callback.
1241 */
1242static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1243{
1244 MMR3HeapFree(pNode);
1245 return 0;
1246}
1247
1248/**
1249 * Empty the specified tree (PV tree, MMR3 heap)
1250 *
1251 * @param pVM The VM to operate on.
1252 * @param ppTree Tree to empty
1253 */
1254void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1255{
1256 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1257}
1258
1259
1260/**
1261 * RTAvlU32Destroy callback.
1262 */
1263static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1264{
1265 MMR3HeapFree(pNode);
1266 return 0;
1267}
1268
1269/**
1270 * Empty the specified tree (U32 tree, MMR3 heap)
1271 *
1272 * @param pVM The VM to operate on.
1273 * @param ppTree Tree to empty
1274 */
1275void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1276{
1277 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1278}
1279
1280
1281/**
1282 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1283 *
1284 * @returns VBox status code.
1285 * @param pVM The VM to operate on.
1286 * @param pCpu CPU disassembly state
1287 * @param pInstrGC Guest context pointer to privileged instruction
1288 * @param pCurInstrGC Guest context pointer to the current instruction
1289 * @param pUserData User pointer (callback specific)
1290 *
1291 */
1292static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1293{
1294 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1295 bool fIllegalInstr = false;
1296
1297 //Preliminary heuristics:
1298 //- no call instructions without a fixed displacement between cli and sti/popf
1299 //- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1300 //- no nested pushf/cli
1301 //- sti/popf should be the (eventual) target of all branches
1302 //- no near or far returns; no int xx, no into
1303 //
1304 // Note: Later on we can impose less stricter guidelines if the need arises
1305
1306 /* Bail out if the patch gets too big. */
1307 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1308 {
1309 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1310 fIllegalInstr = true;
1311 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1312 }
1313 else
1314 {
1315 /* No unconditinal jumps or calls without fixed displacements. */
1316 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1317 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1318 )
1319 {
1320 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1321 if ( pCpu->param1.size == 6 /* far call/jmp */
1322 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1323 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1324 )
1325 {
1326 fIllegalInstr = true;
1327 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1328 }
1329 }
1330
1331 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1332 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->opcode == OP_JMP)
1333 {
1334 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC + pCpu->opsize < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1335 {
1336 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1337 /* We turn this one into a int 3 callable patch. */
1338 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1339 }
1340 }
1341 else
1342 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1343 if (pPatch->opcode == OP_PUSHF)
1344 {
1345 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->opcode == OP_PUSHF)
1346 {
1347 fIllegalInstr = true;
1348 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1349 }
1350 }
1351
1352 // no far returns
1353 if (pCpu->pCurInstr->opcode == OP_RETF)
1354 {
1355 pPatch->pTempInfo->nrRetInstr++;
1356 fIllegalInstr = true;
1357 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1358 }
1359 else
1360 // no int xx or into either
1361 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1362 {
1363 fIllegalInstr = true;
1364 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1365 }
1366 }
1367
1368 pPatch->cbPatchBlockSize += pCpu->opsize;
1369
1370 /* Illegal instruction -> end of analysis phase for this code block */
1371 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1372 return VINF_SUCCESS;
1373
1374 /* Check for exit points. */
1375 switch (pCpu->pCurInstr->opcode)
1376 {
1377 case OP_SYSEXIT:
1378 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1379
1380 case OP_SYSENTER:
1381 case OP_ILLUD2:
1382 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1383 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1384 return VINF_SUCCESS;
1385
1386 case OP_STI:
1387 case OP_POPF:
1388 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1389 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1390 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1391 {
1392 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1393 return VERR_PATCHING_REFUSED;
1394 }
1395 if (pPatch->opcode == OP_PUSHF)
1396 {
1397 if (pCpu->pCurInstr->opcode == OP_POPF)
1398 {
1399 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1400 return VINF_SUCCESS;
1401
1402 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1403 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1404 pPatch->flags |= PATMFL_CHECK_SIZE;
1405 }
1406 break; //sti doesn't mark the end of a pushf block; only popf does
1407 }
1408 //else no break
1409 case OP_RETN: /* exit point for function replacement */
1410 return VINF_SUCCESS;
1411
1412 case OP_IRET:
1413 return VINF_SUCCESS; /* exitpoint */
1414
1415 case OP_CPUID:
1416 case OP_CALL:
1417 case OP_JMP:
1418 break;
1419
1420 default:
1421 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1422 {
1423 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1424 return VINF_SUCCESS; /* exit point */
1425 }
1426 break;
1427 }
1428
1429 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1430 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW))
1431 {
1432 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1433 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->opsize));
1434 return VINF_SUCCESS;
1435 }
1436
1437 return VWRN_CONTINUE_ANALYSIS;
1438}
1439
1440/**
1441 * Analyses the instructions inside a function for compliance
1442 *
1443 * @returns VBox status code.
1444 * @param pVM The VM to operate on.
1445 * @param pCpu CPU disassembly state
1446 * @param pInstrGC Guest context pointer to privileged instruction
1447 * @param pCurInstrGC Guest context pointer to the current instruction
1448 * @param pUserData User pointer (callback specific)
1449 *
1450 */
1451static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1452{
1453 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1454 bool fIllegalInstr = false;
1455
1456 //Preliminary heuristics:
1457 //- no call instructions
1458 //- ret ends a block
1459
1460 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1461
1462 // bail out if the patch gets too big
1463 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1464 {
1465 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1466 fIllegalInstr = true;
1467 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1468 }
1469 else
1470 {
1471 // no unconditinal jumps or calls without fixed displacements
1472 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1473 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1474 )
1475 {
1476 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1477 if ( pCpu->param1.size == 6 /* far call/jmp */
1478 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1479 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1480 )
1481 {
1482 fIllegalInstr = true;
1483 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1484 }
1485 }
1486 else /* no far returns */
1487 if (pCpu->pCurInstr->opcode == OP_RETF)
1488 {
1489 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1490 fIllegalInstr = true;
1491 }
1492 else /* no int xx or into either */
1493 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1494 {
1495 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1496 fIllegalInstr = true;
1497 }
1498
1499 #if 0
1500 ///@todo we can handle certain in/out and privileged instructions in the guest context
1501 if (pCpu->pCurInstr->optype & OPTYPE_PRIVILEGED && pCpu->pCurInstr->opcode != OP_STI)
1502 {
1503 Log(("Illegal instructions for function patch!!\n"));
1504 return VERR_PATCHING_REFUSED;
1505 }
1506 #endif
1507 }
1508
1509 pPatch->cbPatchBlockSize += pCpu->opsize;
1510
1511 /* Illegal instruction -> end of analysis phase for this code block */
1512 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1513 {
1514 return VINF_SUCCESS;
1515 }
1516
1517 // Check for exit points
1518 switch (pCpu->pCurInstr->opcode)
1519 {
1520 case OP_ILLUD2:
1521 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1522 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1523 return VINF_SUCCESS;
1524
1525 case OP_IRET:
1526 case OP_SYSEXIT: /* will fault or emulated in GC */
1527 case OP_RETN:
1528 return VINF_SUCCESS;
1529
1530 case OP_POPF:
1531 case OP_STI:
1532 return VWRN_CONTINUE_ANALYSIS;
1533 default:
1534 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1535 {
1536 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1537 return VINF_SUCCESS; /* exit point */
1538 }
1539 return VWRN_CONTINUE_ANALYSIS;
1540 }
1541
1542 return VWRN_CONTINUE_ANALYSIS;
1543}
1544
1545/**
1546 * Recompiles the instructions in a code block
1547 *
1548 * @returns VBox status code.
1549 * @param pVM The VM to operate on.
1550 * @param pCpu CPU disassembly state
1551 * @param pInstrGC Guest context pointer to privileged instruction
1552 * @param pCurInstrGC Guest context pointer to the current instruction
1553 * @param pUserData User pointer (callback specific)
1554 *
1555 */
1556static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1557{
1558 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1559 int rc = VINF_SUCCESS;
1560 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1561
1562 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1563
1564 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1565 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1566 {
1567 /*
1568 * Been there, done that; so insert a jump (we don't want to duplicate code)
1569 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1570 */
1571 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1572 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->prefix & PREFIX_OPSIZE));
1573 }
1574
1575 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1576 {
1577 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pUserData);
1578 }
1579 else
1580 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pUserData);
1581
1582 if (RT_FAILURE(rc))
1583 return rc;
1584
1585 /** @note Never do a direct return unless a failure is encountered! */
1586
1587 /* Clear recompilation of next instruction flag; we are doing that right here. */
1588 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1589 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1590
1591 /* Add lookup record for patch to guest address translation */
1592 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1593
1594 /* Update lowest and highest instruction address for this patch */
1595 if (pCurInstrGC < pPatch->pInstrGCLowest)
1596 pPatch->pInstrGCLowest = pCurInstrGC;
1597 else
1598 if (pCurInstrGC > pPatch->pInstrGCHighest)
1599 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->opsize;
1600
1601 /* Illegal instruction -> end of recompile phase for this code block. */
1602 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1603 {
1604 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1605 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1606 goto end;
1607 }
1608
1609 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1610 * Indirect calls are handled below.
1611 */
1612 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1613 && (pCpu->pCurInstr->opcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1614 && (OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J))
1615 {
1616 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1617 if (pTargetGC == 0)
1618 {
1619 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
1620 return VERR_PATCHING_REFUSED;
1621 }
1622
1623 if (pCpu->pCurInstr->opcode == OP_CALL)
1624 {
1625 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1626 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1627 if (RT_FAILURE(rc))
1628 goto end;
1629 }
1630 else
1631 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->opcode, !!(pCpu->prefix & PREFIX_OPSIZE));
1632
1633 if (RT_SUCCESS(rc))
1634 rc = VWRN_CONTINUE_RECOMPILE;
1635
1636 goto end;
1637 }
1638
1639 switch (pCpu->pCurInstr->opcode)
1640 {
1641 case OP_CLI:
1642 {
1643 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1644 * until we've found the proper exit point(s).
1645 */
1646 if ( pCurInstrGC != pInstrGC
1647 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1648 )
1649 {
1650 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1651 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1652 }
1653 /* Set by irq inhibition; no longer valid now. */
1654 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1655
1656 rc = patmPatchGenCli(pVM, pPatch);
1657 if (RT_SUCCESS(rc))
1658 rc = VWRN_CONTINUE_RECOMPILE;
1659 break;
1660 }
1661
1662 case OP_MOV:
1663 if (pCpu->pCurInstr->optype & OPTYPE_POTENTIALLY_DANGEROUS)
1664 {
1665 /* mov ss, src? */
1666 if ( (pCpu->param1.flags & USE_REG_SEG)
1667 && (pCpu->param1.base.reg_seg == DIS_SELREG_SS))
1668 {
1669 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1670 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1671 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1672 }
1673#if 0 /* necessary for Haiku */
1674 else
1675 if ( (pCpu->param2.flags & USE_REG_SEG)
1676 && (pCpu->param2.base.reg_seg == USE_REG_SS)
1677 && (pCpu->param1.flags & (USE_REG_GEN32|USE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1678 {
1679 /* mov GPR, ss */
1680 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1681 if (RT_SUCCESS(rc))
1682 rc = VWRN_CONTINUE_RECOMPILE;
1683 break;
1684 }
1685#endif
1686 }
1687 goto duplicate_instr;
1688
1689 case OP_POP:
1690 if (pCpu->pCurInstr->param1 == OP_PARM_REG_SS)
1691 {
1692 Assert(pCpu->pCurInstr->optype & OPTYPE_INHIBIT_IRQS);
1693
1694 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1695 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1696 }
1697 goto duplicate_instr;
1698
1699 case OP_STI:
1700 {
1701 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1702
1703 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1704 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1705 {
1706 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1707 fInhibitIRQInstr = true;
1708 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1709 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1710 }
1711 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1712
1713 if (RT_SUCCESS(rc))
1714 {
1715 DISCPUSTATE cpu = *pCpu;
1716 unsigned opsize;
1717 int disret;
1718 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1719 R3PTRTYPE(uint8_t *) pNextInstrHC;
1720
1721 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1722
1723 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1724 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pNextInstrGC);
1725 if (pNextInstrHC == NULL)
1726 {
1727 AssertFailed();
1728 return VERR_PATCHING_REFUSED;
1729 }
1730
1731 // Disassemble the next instruction
1732 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pNextInstrGC, pNextInstrHC, &opsize, NULL);
1733 if (disret == false)
1734 {
1735 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1736 return VERR_PATCHING_REFUSED;
1737 }
1738 pReturnInstrGC = pNextInstrGC + opsize;
1739
1740 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1741 || pReturnInstrGC <= pInstrGC
1742 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1743 )
1744 {
1745 /* Not an exit point for function duplication patches */
1746 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1747 && RT_SUCCESS(rc))
1748 {
1749 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1750 rc = VWRN_CONTINUE_RECOMPILE;
1751 }
1752 else
1753 rc = VINF_SUCCESS; //exit point
1754 }
1755 else {
1756 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1757 rc = VERR_PATCHING_REFUSED; //not allowed!!
1758 }
1759 }
1760 break;
1761 }
1762
1763 case OP_POPF:
1764 {
1765 bool fGenerateJmpBack = (pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32);
1766
1767 /* Not an exit point for IDT handler or function replacement patches */
1768 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1769 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1770 fGenerateJmpBack = false;
1771
1772 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->opsize, !!(pCpu->prefix & PREFIX_OPSIZE), fGenerateJmpBack);
1773 if (RT_SUCCESS(rc))
1774 {
1775 if (fGenerateJmpBack == false)
1776 {
1777 /* Not an exit point for IDT handler or function replacement patches */
1778 rc = VWRN_CONTINUE_RECOMPILE;
1779 }
1780 else
1781 {
1782 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1783 rc = VINF_SUCCESS; /* exit point! */
1784 }
1785 }
1786 break;
1787 }
1788
1789 case OP_PUSHF:
1790 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->prefix & PREFIX_OPSIZE));
1791 if (RT_SUCCESS(rc))
1792 rc = VWRN_CONTINUE_RECOMPILE;
1793 break;
1794
1795 case OP_PUSH:
1796 if (pCpu->pCurInstr->param1 == OP_PARM_REG_CS)
1797 {
1798 rc = patmPatchGenPushCS(pVM, pPatch);
1799 if (RT_SUCCESS(rc))
1800 rc = VWRN_CONTINUE_RECOMPILE;
1801 break;
1802 }
1803 goto duplicate_instr;
1804
1805 case OP_IRET:
1806 Log(("IRET at %RRv\n", pCurInstrGC));
1807 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->prefix & PREFIX_OPSIZE));
1808 if (RT_SUCCESS(rc))
1809 {
1810 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1811 rc = VINF_SUCCESS; /* exit point by definition */
1812 }
1813 break;
1814
1815 case OP_ILLUD2:
1816 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1817 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1818 if (RT_SUCCESS(rc))
1819 rc = VINF_SUCCESS; /* exit point by definition */
1820 Log(("Illegal opcode (0xf 0xb)\n"));
1821 break;
1822
1823 case OP_CPUID:
1824 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1825 if (RT_SUCCESS(rc))
1826 rc = VWRN_CONTINUE_RECOMPILE;
1827 break;
1828
1829 case OP_STR:
1830 case OP_SLDT:
1831 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1832 if (RT_SUCCESS(rc))
1833 rc = VWRN_CONTINUE_RECOMPILE;
1834 break;
1835
1836 case OP_SGDT:
1837 case OP_SIDT:
1838 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1839 if (RT_SUCCESS(rc))
1840 rc = VWRN_CONTINUE_RECOMPILE;
1841 break;
1842
1843 case OP_RETN:
1844 /* retn is an exit point for function patches */
1845 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
1846 if (RT_SUCCESS(rc))
1847 rc = VINF_SUCCESS; /* exit point by definition */
1848 break;
1849
1850 case OP_SYSEXIT:
1851 /* Duplicate it, so it can be emulated in GC (or fault). */
1852 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1853 if (RT_SUCCESS(rc))
1854 rc = VINF_SUCCESS; /* exit point by definition */
1855 break;
1856
1857 case OP_CALL:
1858 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1859 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1860 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1861 */
1862 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1863 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far calls! */)
1864 {
1865 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
1866 if (RT_SUCCESS(rc))
1867 {
1868 rc = VWRN_CONTINUE_RECOMPILE;
1869 }
1870 break;
1871 }
1872 goto gen_illegal_instr;
1873
1874 case OP_JMP:
1875 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1876 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1877 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1878 */
1879 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1880 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far jumps! */)
1881 {
1882 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
1883 if (RT_SUCCESS(rc))
1884 rc = VINF_SUCCESS; /* end of branch */
1885 break;
1886 }
1887 goto gen_illegal_instr;
1888
1889 case OP_INT3:
1890 case OP_INT:
1891 case OP_INTO:
1892 goto gen_illegal_instr;
1893
1894 case OP_MOV_DR:
1895 /** @note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1896 if (pCpu->pCurInstr->param2 == OP_PARM_Dd)
1897 {
1898 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
1899 if (RT_SUCCESS(rc))
1900 rc = VWRN_CONTINUE_RECOMPILE;
1901 break;
1902 }
1903 goto duplicate_instr;
1904
1905 case OP_MOV_CR:
1906 /** @note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1907 if (pCpu->pCurInstr->param2 == OP_PARM_Cd)
1908 {
1909 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
1910 if (RT_SUCCESS(rc))
1911 rc = VWRN_CONTINUE_RECOMPILE;
1912 break;
1913 }
1914 goto duplicate_instr;
1915
1916 default:
1917 if (pCpu->pCurInstr->optype & (OPTYPE_CONTROLFLOW | OPTYPE_PRIVILEGED_NOTRAP))
1918 {
1919gen_illegal_instr:
1920 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1921 if (RT_SUCCESS(rc))
1922 rc = VINF_SUCCESS; /* exit point by definition */
1923 }
1924 else
1925 {
1926duplicate_instr:
1927 Log(("patmPatchGenDuplicate\n"));
1928 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1929 if (RT_SUCCESS(rc))
1930 rc = VWRN_CONTINUE_RECOMPILE;
1931 }
1932 break;
1933 }
1934
1935end:
1936
1937 if ( !fInhibitIRQInstr
1938 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
1939 {
1940 int rc2;
1941 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1942
1943 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
1944 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
1945 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
1946 {
1947 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
1948
1949 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
1950 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1951 rc = VINF_SUCCESS; /* end of the line */
1952 }
1953 else
1954 {
1955 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
1956 }
1957 if (RT_FAILURE(rc2))
1958 rc = rc2;
1959 }
1960
1961 if (RT_SUCCESS(rc))
1962 {
1963 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1964 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
1965 && pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32
1966 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
1967 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
1968 )
1969 {
1970 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1971
1972 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1973 Log(("patmRecompileCallback: end found for single instruction patch at %RRv opsize %d\n", pNextInstrGC, pCpu->opsize));
1974
1975 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
1976 AssertRC(rc);
1977 }
1978 }
1979 return rc;
1980}
1981
1982
1983#ifdef LOG_ENABLED
1984
1985/* Add a disasm jump record (temporary for prevent duplicate analysis)
1986 *
1987 * @param pVM The VM to operate on.
1988 * @param pPatch Patch structure ptr
1989 * @param pInstrGC Guest context pointer to privileged instruction
1990 *
1991 */
1992static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1993{
1994 PAVLPVNODECORE pRec;
1995
1996 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1997 Assert(pRec);
1998 pRec->Key = (AVLPVKEY)pInstrGC;
1999
2000 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2001 Assert(ret);
2002}
2003
2004/**
2005 * Checks if jump target has been analysed before.
2006 *
2007 * @returns VBox status code.
2008 * @param pPatch Patch struct
2009 * @param pInstrGC Jump target
2010 *
2011 */
2012static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2013{
2014 PAVLPVNODECORE pRec;
2015
2016 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)pInstrGC);
2017 if (pRec)
2018 return true;
2019 return false;
2020}
2021
2022/**
2023 * For proper disassembly of the final patch block
2024 *
2025 * @returns VBox status code.
2026 * @param pVM The VM to operate on.
2027 * @param pCpu CPU disassembly state
2028 * @param pInstrGC Guest context pointer to privileged instruction
2029 * @param pCurInstrGC Guest context pointer to the current instruction
2030 * @param pUserData User pointer (callback specific)
2031 *
2032 */
2033int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
2034{
2035 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2036
2037 if (pCpu->pCurInstr->opcode == OP_INT3)
2038 {
2039 /* Could be an int3 inserted in a call patch. Check to be sure */
2040 DISCPUSTATE cpu;
2041 uint8_t *pOrgJumpHC;
2042 RTRCPTR pOrgJumpGC;
2043 uint32_t dummy;
2044
2045 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2046 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2047 pOrgJumpHC = PATMGCVirtToHCVirt(pVM, pPatch, pOrgJumpGC);
2048
2049 bool disret = PATMR3DISInstr(pVM, pPatch, &cpu, pOrgJumpGC, pOrgJumpHC, &dummy, NULL);
2050 if (!disret || cpu.pCurInstr->opcode != OP_CALL || cpu.param1.size != 4 /* only near calls */)
2051 return VINF_SUCCESS;
2052
2053 return VWRN_CONTINUE_ANALYSIS;
2054 }
2055
2056 if ( pCpu->pCurInstr->opcode == OP_ILLUD2
2057 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2058 {
2059 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2060 return VWRN_CONTINUE_ANALYSIS;
2061 }
2062
2063 if ( (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2064 || pCpu->pCurInstr->opcode == OP_INT
2065 || pCpu->pCurInstr->opcode == OP_IRET
2066 || pCpu->pCurInstr->opcode == OP_RETN
2067 || pCpu->pCurInstr->opcode == OP_RETF
2068 )
2069 {
2070 return VINF_SUCCESS;
2071 }
2072
2073 if (pCpu->pCurInstr->opcode == OP_ILLUD2)
2074 return VINF_SUCCESS;
2075
2076 return VWRN_CONTINUE_ANALYSIS;
2077}
2078
2079
2080/**
2081 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2082 *
2083 * @returns VBox status code.
2084 * @param pVM The VM to operate on.
2085 * @param pInstrGC Guest context pointer to the initial privileged instruction
2086 * @param pCurInstrGC Guest context pointer to the current instruction
2087 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2088 * @param pUserData User pointer (callback specific)
2089 *
2090 */
2091int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, void *pUserData)
2092{
2093 DISCPUSTATE cpu;
2094 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2095 int rc = VWRN_CONTINUE_ANALYSIS;
2096 uint32_t opsize, delta;
2097 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2098 bool disret;
2099 char szOutput[256];
2100
2101 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2102
2103 /* We need this to determine branch targets (and for disassembling). */
2104 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2105
2106 while(rc == VWRN_CONTINUE_ANALYSIS)
2107 {
2108 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2109
2110 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2111 if (pCurInstrHC == NULL)
2112 {
2113 rc = VERR_PATCHING_REFUSED;
2114 goto end;
2115 }
2116
2117 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2118 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2119 {
2120 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2121
2122 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2123 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2124 else
2125 Log(("DIS %s", szOutput));
2126
2127 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2128 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2129 {
2130 rc = VINF_SUCCESS;
2131 goto end;
2132 }
2133 }
2134 else
2135 Log(("DIS: %s", szOutput));
2136
2137 if (disret == false)
2138 {
2139 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2140 rc = VINF_SUCCESS;
2141 goto end;
2142 }
2143
2144 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pUserData);
2145 if (rc != VWRN_CONTINUE_ANALYSIS) {
2146 break; //done!
2147 }
2148
2149 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2150 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2151 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2152 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2153 )
2154 {
2155 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2156 RTRCPTR pOrgTargetGC;
2157
2158 if (pTargetGC == 0)
2159 {
2160 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2161 rc = VERR_PATCHING_REFUSED;
2162 break;
2163 }
2164
2165 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2166 {
2167 //jump back to guest code
2168 rc = VINF_SUCCESS;
2169 goto end;
2170 }
2171 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2172
2173 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2174 {
2175 rc = VINF_SUCCESS;
2176 goto end;
2177 }
2178
2179 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2180 {
2181 /* New jump, let's check it. */
2182 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2183
2184 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2185 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pUserData);
2186 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2187
2188 if (rc != VINF_SUCCESS) {
2189 break; //done!
2190 }
2191 }
2192 if (cpu.pCurInstr->opcode == OP_JMP)
2193 {
2194 /* Unconditional jump; return to caller. */
2195 rc = VINF_SUCCESS;
2196 goto end;
2197 }
2198
2199 rc = VWRN_CONTINUE_ANALYSIS;
2200 }
2201 pCurInstrGC += opsize;
2202 }
2203end:
2204 return rc;
2205}
2206
2207/**
2208 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2209 *
2210 * @returns VBox status code.
2211 * @param pVM The VM to operate on.
2212 * @param pInstrGC Guest context pointer to the initial privileged instruction
2213 * @param pCurInstrGC Guest context pointer to the current instruction
2214 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2215 * @param pUserData User pointer (callback specific)
2216 *
2217 */
2218int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, void *pUserData)
2219{
2220 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2221
2222 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pUserData);
2223 /* Free all disasm jump records. */
2224 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2225 return rc;
2226}
2227
2228#endif /* LOG_ENABLED */
2229
2230/**
2231 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2232 * If so, this patch is permanently disabled.
2233 *
2234 * @param pVM The VM to operate on.
2235 * @param pInstrGC Guest context pointer to instruction
2236 * @param pConflictGC Guest context pointer to check
2237 *
2238 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2239 *
2240 */
2241VMMR3DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2242{
2243 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2244 if (pTargetPatch)
2245 {
2246 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2247 }
2248 return VERR_PATCH_NO_CONFLICT;
2249}
2250
2251/**
2252 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2253 *
2254 * @returns VBox status code.
2255 * @param pVM The VM to operate on.
2256 * @param pInstrGC Guest context pointer to privileged instruction
2257 * @param pCurInstrGC Guest context pointer to the current instruction
2258 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2259 * @param pUserData User pointer (callback specific)
2260 *
2261 */
2262static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, void *pUserData)
2263{
2264 DISCPUSTATE cpu;
2265 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2266 int rc = VWRN_CONTINUE_ANALYSIS;
2267 uint32_t opsize;
2268 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2269 bool disret;
2270#ifdef LOG_ENABLED
2271 char szOutput[256];
2272#endif
2273
2274 while (rc == VWRN_CONTINUE_RECOMPILE)
2275 {
2276 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2277
2278 ////Log(("patmRecompileCodeStream %RRv %RRv\n", pInstrGC, pCurInstrGC));
2279
2280 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2281 if (pCurInstrHC == NULL)
2282 {
2283 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2284 goto end;
2285 }
2286#ifdef LOG_ENABLED
2287 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput);
2288 Log(("Recompile: %s", szOutput));
2289#else
2290 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2291#endif
2292 if (disret == false)
2293 {
2294 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2295
2296 /* Add lookup record for patch to guest address translation */
2297 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2298 patmPatchGenIllegalInstr(pVM, pPatch);
2299 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2300 goto end;
2301 }
2302
2303 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pUserData);
2304 if (rc != VWRN_CONTINUE_RECOMPILE)
2305 {
2306 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2307 if ( rc == VINF_SUCCESS
2308 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2309 {
2310 DISCPUSTATE cpunext;
2311 uint32_t opsizenext;
2312 uint8_t *pNextInstrHC;
2313 RTRCPTR pNextInstrGC = pCurInstrGC + opsize;
2314
2315 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2316
2317 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2318 * Recompile the next instruction as well
2319 */
2320 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pNextInstrGC);
2321 if (pNextInstrHC == NULL)
2322 {
2323 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2324 goto end;
2325 }
2326 cpunext.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2327 disret = PATMR3DISInstr(pVM, pPatch, &cpunext, pNextInstrGC, pNextInstrHC, &opsizenext, NULL);
2328 if (disret == false)
2329 {
2330 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2331 goto end;
2332 }
2333 switch(cpunext.pCurInstr->opcode)
2334 {
2335 case OP_IRET: /* inhibit cleared in generated code */
2336 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2337 case OP_HLT:
2338 break; /* recompile these */
2339
2340 default:
2341 if (cpunext.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2342 {
2343 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2344
2345 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2346 AssertRC(rc);
2347 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2348 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2349 }
2350 break;
2351 }
2352
2353 /** @note after a cli we must continue to a proper exit point */
2354 if (cpunext.pCurInstr->opcode != OP_CLI)
2355 {
2356 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pUserData);
2357 if (RT_SUCCESS(rc))
2358 {
2359 rc = VINF_SUCCESS;
2360 goto end;
2361 }
2362 break;
2363 }
2364 else
2365 rc = VWRN_CONTINUE_RECOMPILE;
2366 }
2367 else
2368 break; /* done! */
2369 }
2370
2371 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2372
2373
2374 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2375 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2376 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2377 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2378 )
2379 {
2380 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2381 if (addr == 0)
2382 {
2383 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2384 rc = VERR_PATCHING_REFUSED;
2385 break;
2386 }
2387
2388 Log(("Jump encountered target %RRv\n", addr));
2389
2390 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2391 if (!(cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW))
2392 {
2393 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2394 /* First we need to finish this linear code stream until the next exit point. */
2395 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+opsize, pfnPATMR3Recompile, pUserData);
2396 if (RT_FAILURE(rc))
2397 {
2398 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2399 break; //fatal error
2400 }
2401 }
2402
2403 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2404 {
2405 /* New code; let's recompile it. */
2406 Log(("patmRecompileCodeStream continue with jump\n"));
2407
2408 /*
2409 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2410 * this patch so we can continue our analysis
2411 *
2412 * We rely on CSAM to detect and resolve conflicts
2413 */
2414 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, addr);
2415 if(pTargetPatch)
2416 {
2417 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2418 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2419 }
2420
2421 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2422 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pUserData);
2423 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2424
2425 if(pTargetPatch)
2426 {
2427 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2428 }
2429
2430 if (RT_FAILURE(rc))
2431 {
2432 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2433 break; //done!
2434 }
2435 }
2436 /* Always return to caller here; we're done! */
2437 rc = VINF_SUCCESS;
2438 goto end;
2439 }
2440 else
2441 if (cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW)
2442 {
2443 rc = VINF_SUCCESS;
2444 goto end;
2445 }
2446 pCurInstrGC += opsize;
2447 }
2448end:
2449 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2450 return rc;
2451}
2452
2453
2454/**
2455 * Generate the jump from guest to patch code
2456 *
2457 * @returns VBox status code.
2458 * @param pVM The VM to operate on.
2459 * @param pPatch Patch record
2460 */
2461static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, bool fAddFixup = true)
2462{
2463 uint8_t temp[8];
2464 uint8_t *pPB;
2465 int rc;
2466
2467 Assert(pPatch->cbPatchJump <= sizeof(temp));
2468 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2469
2470 pPB = pPatch->pPrivInstrHC;
2471
2472#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2473 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2474 {
2475 Assert(pPatch->pPatchJumpDestGC);
2476
2477 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2478 {
2479 // jmp [PatchCode]
2480 if (fAddFixup)
2481 {
2482 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2483 {
2484 Log(("Relocation failed for the jump in the guest code!!\n"));
2485 return VERR_PATCHING_REFUSED;
2486 }
2487 }
2488
2489 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2490 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2491 }
2492 else
2493 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2494 {
2495 // jmp [PatchCode]
2496 if (fAddFixup)
2497 {
2498 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2499 {
2500 Log(("Relocation failed for the jump in the guest code!!\n"));
2501 return VERR_PATCHING_REFUSED;
2502 }
2503 }
2504
2505 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2506 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2507 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2508 }
2509 else
2510 {
2511 Assert(0);
2512 return VERR_PATCHING_REFUSED;
2513 }
2514 }
2515 else
2516#endif
2517 {
2518 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2519
2520 // jmp [PatchCode]
2521 if (fAddFixup)
2522 {
2523 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2524 {
2525 Log(("Relocation failed for the jump in the guest code!!\n"));
2526 return VERR_PATCHING_REFUSED;
2527 }
2528 }
2529 temp[0] = 0xE9; //jmp
2530 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2531 }
2532 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2533 AssertRC(rc);
2534
2535 if (rc == VINF_SUCCESS)
2536 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2537
2538 return rc;
2539}
2540
2541/**
2542 * Remove the jump from guest to patch code
2543 *
2544 * @returns VBox status code.
2545 * @param pVM The VM to operate on.
2546 * @param pPatch Patch record
2547 */
2548static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2549{
2550#ifdef DEBUG
2551 DISCPUSTATE cpu;
2552 char szOutput[256];
2553 uint32_t opsize, i = 0;
2554 bool disret;
2555
2556 while(i < pPatch->cbPrivInstr)
2557 {
2558 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2559 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
2560 if (disret == false)
2561 break;
2562
2563 Log(("Org patch jump: %s", szOutput));
2564 Assert(opsize);
2565 i += opsize;
2566 }
2567#endif
2568
2569 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2570 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2571#ifdef DEBUG
2572 if (rc == VINF_SUCCESS)
2573 {
2574 i = 0;
2575 while(i < pPatch->cbPrivInstr)
2576 {
2577 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2578 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
2579 if (disret == false)
2580 break;
2581
2582 Log(("Org instr: %s", szOutput));
2583 Assert(opsize);
2584 i += opsize;
2585 }
2586 }
2587#endif
2588 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2589 return rc;
2590}
2591
2592/**
2593 * Generate the call from guest to patch code
2594 *
2595 * @returns VBox status code.
2596 * @param pVM The VM to operate on.
2597 * @param pPatch Patch record
2598 */
2599static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, bool fAddFixup = true)
2600{
2601 uint8_t temp[8];
2602 uint8_t *pPB;
2603 int rc;
2604
2605 Assert(pPatch->cbPatchJump <= sizeof(temp));
2606
2607 pPB = pPatch->pPrivInstrHC;
2608
2609 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2610
2611 // jmp [PatchCode]
2612 if (fAddFixup)
2613 {
2614 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2615 {
2616 Log(("Relocation failed for the jump in the guest code!!\n"));
2617 return VERR_PATCHING_REFUSED;
2618 }
2619 }
2620
2621 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2622 temp[0] = pPatch->aPrivInstr[0];
2623 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2624
2625 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2626 AssertRC(rc);
2627
2628 return rc;
2629}
2630
2631
2632/**
2633 * Patch cli/sti pushf/popf instruction block at specified location
2634 *
2635 * @returns VBox status code.
2636 * @param pVM The VM to operate on.
2637 * @param pInstrGC Guest context point to privileged instruction
2638 * @param pInstrHC Host context point to privileged instruction
2639 * @param uOpcode Instruction opcode
2640 * @param uOpSize Size of starting instruction
2641 * @param pPatchRec Patch record
2642 *
2643 * @note returns failure if patching is not allowed or possible
2644 *
2645 */
2646VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2647 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2648{
2649 PPATCHINFO pPatch = &pPatchRec->patch;
2650 int rc = VERR_PATCHING_REFUSED;
2651 DISCPUSTATE cpu;
2652 uint32_t orgOffsetPatchMem = ~0;
2653 RTRCPTR pInstrStart;
2654#ifdef LOG_ENABLED
2655 uint32_t opsize;
2656 char szOutput[256];
2657 bool disret;
2658#endif
2659
2660 /* Save original offset (in case of failures later on) */
2661 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2662 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2663
2664 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2665 switch (uOpcode)
2666 {
2667 case OP_MOV:
2668 break;
2669
2670 case OP_CLI:
2671 case OP_PUSHF:
2672 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2673 /** @note special precautions are taken when disabling and enabling such patches. */
2674 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2675 break;
2676
2677 default:
2678 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2679 {
2680 AssertMsg(0, ("PATMR3PatchBlock: Invalid opcode %x\n", uOpcode));
2681 return VERR_INVALID_PARAMETER;
2682 }
2683 }
2684
2685 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2686 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2687
2688 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2689 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2690 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2691 )
2692 {
2693 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2694#ifdef DEBUG_sandervl
2695//// AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
2696#endif
2697 rc = VERR_PATCHING_REFUSED;
2698 goto failure;
2699 }
2700
2701 pPatch->nrPatch2GuestRecs = 0;
2702 pInstrStart = pInstrGC;
2703
2704#ifdef PATM_ENABLE_CALL
2705 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2706#endif
2707
2708 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2709 pPatch->uCurPatchOffset = 0;
2710
2711 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2712
2713 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2714 {
2715 Assert(pPatch->flags & PATMFL_INTHANDLER);
2716
2717 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2718 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2719 if (RT_FAILURE(rc))
2720 goto failure;
2721 }
2722
2723 /***************************************************************************************************************************/
2724 /** @note We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2725 /***************************************************************************************************************************/
2726#ifdef VBOX_WITH_STATISTICS
2727 if (!(pPatch->flags & PATMFL_SYSENTER))
2728 {
2729 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2730 if (RT_FAILURE(rc))
2731 goto failure;
2732 }
2733#endif
2734
2735 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pPatch);
2736 if (rc != VINF_SUCCESS)
2737 {
2738 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2739 goto failure;
2740 }
2741
2742 /* Calculated during analysis. */
2743 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2744 {
2745 /* Most likely cause: we encountered an illegal instruction very early on. */
2746 /** @todo could turn it into an int3 callable patch. */
2747 Log(("PATMR3PatchBlock: patch block too small -> refuse\n"));
2748 rc = VERR_PATCHING_REFUSED;
2749 goto failure;
2750 }
2751
2752 /* size of patch block */
2753 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2754
2755
2756 /* Update free pointer in patch memory. */
2757 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2758 /* Round to next 8 byte boundary. */
2759 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2760
2761 /*
2762 * Insert into patch to guest lookup tree
2763 */
2764 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2765 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2766 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2767 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2768 if (!rc)
2769 {
2770 rc = VERR_PATCHING_REFUSED;
2771 goto failure;
2772 }
2773
2774 /* Note that patmr3SetBranchTargets can install additional patches!! */
2775 rc = patmr3SetBranchTargets(pVM, pPatch);
2776 if (rc != VINF_SUCCESS)
2777 {
2778 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2779 goto failure;
2780 }
2781
2782#ifdef LOG_ENABLED
2783 Log(("Patch code ----------------------------------------------------------\n"));
2784 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
2785 Log(("Patch code ends -----------------------------------------------------\n"));
2786#endif
2787
2788 /* make a copy of the guest code bytes that will be overwritten */
2789 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2790
2791 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2792 AssertRC(rc);
2793
2794 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2795 {
2796 /*uint8_t ASMInt3 = 0xCC; - unused */
2797
2798 Log(("PATMR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2799 /* Replace first opcode byte with 'int 3'. */
2800 rc = patmActivateInt3Patch(pVM, pPatch);
2801 if (RT_FAILURE(rc))
2802 goto failure;
2803
2804 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2805 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2806
2807 pPatch->flags &= ~PATMFL_INSTR_HINT;
2808 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2809 }
2810 else
2811 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2812 {
2813 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2814 /* now insert a jump in the guest code */
2815 rc = patmGenJumpToPatch(pVM, pPatch, true);
2816 AssertRC(rc);
2817 if (RT_FAILURE(rc))
2818 goto failure;
2819
2820 }
2821
2822#ifdef LOG_ENABLED
2823 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2824 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2825 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
2826#endif
2827
2828 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2829 pPatch->pTempInfo->nrIllegalInstr = 0;
2830
2831 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2832
2833 pPatch->uState = PATCH_ENABLED;
2834 return VINF_SUCCESS;
2835
2836failure:
2837 if (pPatchRec->CoreOffset.Key)
2838 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
2839
2840 patmEmptyTree(pVM, &pPatch->FixupTree);
2841 pPatch->nrFixups = 0;
2842
2843 patmEmptyTree(pVM, &pPatch->JumpTree);
2844 pPatch->nrJumpRecs = 0;
2845
2846 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2847 pPatch->pTempInfo->nrIllegalInstr = 0;
2848
2849 /* Turn this cli patch into a dummy. */
2850 pPatch->uState = PATCH_REFUSED;
2851 pPatch->pPatchBlockOffset = 0;
2852
2853 // Give back the patch memory we no longer need
2854 Assert(orgOffsetPatchMem != (uint32_t)~0);
2855 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2856
2857 return rc;
2858}
2859
2860/**
2861 * Patch IDT handler
2862 *
2863 * @returns VBox status code.
2864 * @param pVM The VM to operate on.
2865 * @param pInstrGC Guest context point to privileged instruction
2866 * @param pInstrHC Host context point to privileged instruction
2867 * @param uOpSize Size of starting instruction
2868 * @param pPatchRec Patch record
2869 *
2870 * @note returns failure if patching is not allowed or possible
2871 *
2872 */
2873static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2874 uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2875{
2876 PPATCHINFO pPatch = &pPatchRec->patch;
2877 bool disret;
2878 DISCPUSTATE cpuPush, cpuJmp;
2879 uint32_t opsize;
2880 RTRCPTR pCurInstrGC = pInstrGC;
2881 uint8_t *pCurInstrHC = pInstrHC;
2882 uint32_t orgOffsetPatchMem = ~0;
2883
2884 /*
2885 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
2886 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
2887 * condition here and only patch the common entypoint once.
2888 */
2889 cpuPush.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2890 disret = PATMR3DISInstr(pVM, pPatch, &cpuPush, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2891 Assert(disret);
2892 if (disret && cpuPush.pCurInstr->opcode == OP_PUSH)
2893 {
2894 RTRCPTR pJmpInstrGC;
2895 int rc;
2896
2897 pCurInstrGC += opsize;
2898 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2899
2900 cpuJmp.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2901 disret = PATMR3DISInstr(pVM, pPatch, &cpuJmp, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2902 if ( disret
2903 && cpuJmp.pCurInstr->opcode == OP_JMP
2904 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
2905 )
2906 {
2907 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2908 if (pJmpPatch == 0)
2909 {
2910 /* Patch it first! */
2911 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
2912 if (rc != VINF_SUCCESS)
2913 goto failure;
2914 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2915 Assert(pJmpPatch);
2916 }
2917 if (pJmpPatch->patch.uState != PATCH_ENABLED)
2918 goto failure;
2919
2920 /* save original offset (in case of failures later on) */
2921 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2922
2923 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2924 pPatch->uCurPatchOffset = 0;
2925 pPatch->nrPatch2GuestRecs = 0;
2926
2927#ifdef VBOX_WITH_STATISTICS
2928 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2929 if (RT_FAILURE(rc))
2930 goto failure;
2931#endif
2932
2933 /* Install fake cli patch (to clear the virtual IF) */
2934 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2935 if (RT_FAILURE(rc))
2936 goto failure;
2937
2938 /* Add lookup record for patch to guest address translation (for the push) */
2939 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
2940
2941 /* Duplicate push. */
2942 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
2943 if (RT_FAILURE(rc))
2944 goto failure;
2945
2946 /* Generate jump to common entrypoint. */
2947 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
2948 if (RT_FAILURE(rc))
2949 goto failure;
2950
2951 /* size of patch block */
2952 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2953
2954 /* Update free pointer in patch memory. */
2955 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2956 /* Round to next 8 byte boundary */
2957 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2958
2959 /* There's no jump from guest to patch code. */
2960 pPatch->cbPatchJump = 0;
2961
2962
2963#ifdef LOG_ENABLED
2964 Log(("Patch code ----------------------------------------------------------\n"));
2965 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
2966 Log(("Patch code ends -----------------------------------------------------\n"));
2967#endif
2968 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
2969
2970 /*
2971 * Insert into patch to guest lookup tree
2972 */
2973 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2974 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2975 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2976 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2977
2978 pPatch->uState = PATCH_ENABLED;
2979
2980 return VINF_SUCCESS;
2981 }
2982 }
2983failure:
2984 /* Give back the patch memory we no longer need */
2985 if (orgOffsetPatchMem != (uint32_t)~0)
2986 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2987
2988 return PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
2989}
2990
2991/**
2992 * Install a trampoline to call a guest trap handler directly
2993 *
2994 * @returns VBox status code.
2995 * @param pVM The VM to operate on.
2996 * @param pInstrGC Guest context point to privileged instruction
2997 * @param pPatchRec Patch record
2998 *
2999 */
3000static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3001{
3002 PPATCHINFO pPatch = &pPatchRec->patch;
3003 int rc = VERR_PATCHING_REFUSED;
3004 uint32_t orgOffsetPatchMem = ~0;
3005#ifdef LOG_ENABLED
3006 bool disret;
3007 DISCPUSTATE cpu;
3008 uint32_t opsize;
3009 char szOutput[256];
3010#endif
3011
3012 // save original offset (in case of failures later on)
3013 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3014
3015 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3016 pPatch->uCurPatchOffset = 0;
3017 pPatch->nrPatch2GuestRecs = 0;
3018
3019#ifdef VBOX_WITH_STATISTICS
3020 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3021 if (RT_FAILURE(rc))
3022 goto failure;
3023#endif
3024
3025 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3026 if (RT_FAILURE(rc))
3027 goto failure;
3028
3029 /* size of patch block */
3030 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3031
3032 /* Update free pointer in patch memory. */
3033 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3034 /* Round to next 8 byte boundary */
3035 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3036
3037 /* There's no jump from guest to patch code. */
3038 pPatch->cbPatchJump = 0;
3039
3040#ifdef LOG_ENABLED
3041 Log(("Patch code ----------------------------------------------------------\n"));
3042 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
3043 Log(("Patch code ends -----------------------------------------------------\n"));
3044#endif
3045
3046#ifdef LOG_ENABLED
3047 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3048 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3049 Log(("TRAP handler patch: %s", szOutput));
3050#endif
3051 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3052
3053 /*
3054 * Insert into patch to guest lookup tree
3055 */
3056 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3057 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3058 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3059 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3060
3061 pPatch->uState = PATCH_ENABLED;
3062 return VINF_SUCCESS;
3063
3064failure:
3065 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3066
3067 /* Turn this cli patch into a dummy. */
3068 pPatch->uState = PATCH_REFUSED;
3069 pPatch->pPatchBlockOffset = 0;
3070
3071 /* Give back the patch memory we no longer need */
3072 Assert(orgOffsetPatchMem != (uint32_t)~0);
3073 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3074
3075 return rc;
3076}
3077
3078
3079#ifdef LOG_ENABLED
3080/**
3081 * Check if the instruction is patched as a common idt handler
3082 *
3083 * @returns true or false
3084 * @param pVM The VM to operate on.
3085 * @param pInstrGC Guest context point to the instruction
3086 *
3087 */
3088static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3089{
3090 PPATMPATCHREC pRec;
3091
3092 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3093 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3094 return true;
3095 return false;
3096}
3097#endif //DEBUG
3098
3099
3100/**
3101 * Duplicates a complete function
3102 *
3103 * @returns VBox status code.
3104 * @param pVM The VM to operate on.
3105 * @param pInstrGC Guest context point to privileged instruction
3106 * @param pPatchRec Patch record
3107 *
3108 */
3109static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3110{
3111 PPATCHINFO pPatch = &pPatchRec->patch;
3112 int rc = VERR_PATCHING_REFUSED;
3113 DISCPUSTATE cpu;
3114 uint32_t orgOffsetPatchMem = ~0;
3115
3116 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3117 /* Save original offset (in case of failures later on). */
3118 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3119
3120 /* We will not go on indefinitely with call instruction handling. */
3121 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3122 {
3123 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3124 return VERR_PATCHING_REFUSED;
3125 }
3126
3127 pVM->patm.s.ulCallDepth++;
3128
3129#ifdef PATM_ENABLE_CALL
3130 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3131#endif
3132
3133 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3134
3135 pPatch->nrPatch2GuestRecs = 0;
3136 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3137 pPatch->uCurPatchOffset = 0;
3138
3139 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3140
3141 /** @note Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3142 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3143 if (RT_FAILURE(rc))
3144 goto failure;
3145
3146#ifdef VBOX_WITH_STATISTICS
3147 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3148 if (RT_FAILURE(rc))
3149 goto failure;
3150#endif
3151 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pPatch);
3152 if (rc != VINF_SUCCESS)
3153 {
3154 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3155 goto failure;
3156 }
3157
3158 //size of patch block
3159 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3160
3161 //update free pointer in patch memory
3162 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3163 /* Round to next 8 byte boundary. */
3164 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3165
3166 pPatch->uState = PATCH_ENABLED;
3167
3168 /*
3169 * Insert into patch to guest lookup tree
3170 */
3171 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3172 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3173 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3174 AssertMsg(rc, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3175 if (!rc)
3176 {
3177 rc = VERR_PATCHING_REFUSED;
3178 goto failure;
3179 }
3180
3181 /* Note that patmr3SetBranchTargets can install additional patches!! */
3182 rc = patmr3SetBranchTargets(pVM, pPatch);
3183 if (rc != VINF_SUCCESS)
3184 {
3185 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3186 goto failure;
3187 }
3188
3189#ifdef LOG_ENABLED
3190 Log(("Patch code ----------------------------------------------------------\n"));
3191 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
3192 Log(("Patch code ends -----------------------------------------------------\n"));
3193#endif
3194
3195 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3196
3197 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3198 pPatch->pTempInfo->nrIllegalInstr = 0;
3199
3200 pVM->patm.s.ulCallDepth--;
3201 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3202 return VINF_SUCCESS;
3203
3204failure:
3205 if (pPatchRec->CoreOffset.Key)
3206 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3207
3208 patmEmptyTree(pVM, &pPatch->FixupTree);
3209 pPatch->nrFixups = 0;
3210
3211 patmEmptyTree(pVM, &pPatch->JumpTree);
3212 pPatch->nrJumpRecs = 0;
3213
3214 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3215 pPatch->pTempInfo->nrIllegalInstr = 0;
3216
3217 /* Turn this cli patch into a dummy. */
3218 pPatch->uState = PATCH_REFUSED;
3219 pPatch->pPatchBlockOffset = 0;
3220
3221 // Give back the patch memory we no longer need
3222 Assert(orgOffsetPatchMem != (uint32_t)~0);
3223 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3224
3225 pVM->patm.s.ulCallDepth--;
3226 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3227 return rc;
3228}
3229
3230/**
3231 * Creates trampoline code to jump inside an existing patch
3232 *
3233 * @returns VBox status code.
3234 * @param pVM The VM to operate on.
3235 * @param pInstrGC Guest context point to privileged instruction
3236 * @param pPatchRec Patch record
3237 *
3238 */
3239static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3240{
3241 PPATCHINFO pPatch = &pPatchRec->patch;
3242 RTRCPTR pPage, pPatchTargetGC = 0;
3243 uint32_t orgOffsetPatchMem = ~0;
3244 int rc = VERR_PATCHING_REFUSED;
3245
3246 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3247 /* Save original offset (in case of failures later on). */
3248 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3249
3250 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3251 /** @todo we already checked this before */
3252 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3253
3254 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3255 if (pPatchPage)
3256 {
3257 uint32_t i;
3258
3259 for (i=0;i<pPatchPage->cCount;i++)
3260 {
3261 if (pPatchPage->aPatch[i])
3262 {
3263 PPATCHINFO pPatch2 = pPatchPage->aPatch[i];
3264
3265 if ( (pPatch2->flags & PATMFL_DUPLICATE_FUNCTION)
3266 && pPatch2->uState == PATCH_ENABLED)
3267 {
3268 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch2, pInstrGC);
3269 if (pPatchTargetGC)
3270 {
3271 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3272 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch2->Patch2GuestAddrTree, offsetPatch, false);
3273 Assert(pPatchToGuestRec);
3274
3275 pPatchToGuestRec->fJumpTarget = true;
3276 Assert(pPatchTargetGC != pPatch2->pPrivInstrGC);
3277 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv\n", pPatch2->pPrivInstrGC));
3278 pPatch2->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3279 break;
3280 }
3281 }
3282 }
3283 }
3284 }
3285 AssertReturn(pPatchPage && pPatchTargetGC, VERR_PATCHING_REFUSED);
3286
3287 pPatch->nrPatch2GuestRecs = 0;
3288 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3289 pPatch->uCurPatchOffset = 0;
3290
3291 /** @note Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3292 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3293 if (RT_FAILURE(rc))
3294 goto failure;
3295
3296#ifdef VBOX_WITH_STATISTICS
3297 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3298 if (RT_FAILURE(rc))
3299 goto failure;
3300#endif
3301
3302 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3303 if (RT_FAILURE(rc))
3304 goto failure;
3305
3306 /*
3307 * Insert into patch to guest lookup tree
3308 */
3309 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3310 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3311 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3312 AssertMsg(rc, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3313 if (!rc)
3314 {
3315 rc = VERR_PATCHING_REFUSED;
3316 goto failure;
3317 }
3318
3319 /* size of patch block */
3320 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3321
3322 /* Update free pointer in patch memory. */
3323 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3324 /* Round to next 8 byte boundary */
3325 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3326
3327 /* There's no jump from guest to patch code. */
3328 pPatch->cbPatchJump = 0;
3329
3330 /* Enable the patch. */
3331 pPatch->uState = PATCH_ENABLED;
3332 /* We allow this patch to be called as a function. */
3333 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3334 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3335 return VINF_SUCCESS;
3336
3337failure:
3338 if (pPatchRec->CoreOffset.Key)
3339 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3340
3341 patmEmptyTree(pVM, &pPatch->FixupTree);
3342 pPatch->nrFixups = 0;
3343
3344 patmEmptyTree(pVM, &pPatch->JumpTree);
3345 pPatch->nrJumpRecs = 0;
3346
3347 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3348 pPatch->pTempInfo->nrIllegalInstr = 0;
3349
3350 /* Turn this cli patch into a dummy. */
3351 pPatch->uState = PATCH_REFUSED;
3352 pPatch->pPatchBlockOffset = 0;
3353
3354 // Give back the patch memory we no longer need
3355 Assert(orgOffsetPatchMem != (uint32_t)~0);
3356 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3357
3358 return rc;
3359}
3360
3361
3362/**
3363 * Patch branch target function for call/jump at specified location.
3364 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3365 *
3366 * @returns VBox status code.
3367 * @param pVM The VM to operate on.
3368 * @param pCtx Guest context
3369 *
3370 */
3371VMMR3DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3372{
3373 RTRCPTR pBranchTarget, pPage;
3374 int rc;
3375 RTRCPTR pPatchTargetGC = 0;
3376
3377 pBranchTarget = pCtx->edx;
3378 pBranchTarget = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3379
3380 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3381 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3382
3383 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3384 if (pPatchPage)
3385 {
3386 uint32_t i;
3387
3388 for (i=0;i<pPatchPage->cCount;i++)
3389 {
3390 if (pPatchPage->aPatch[i])
3391 {
3392 PPATCHINFO pPatch = pPatchPage->aPatch[i];
3393
3394 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3395 && pPatch->uState == PATCH_ENABLED)
3396 {
3397 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3398 if (pPatchTargetGC)
3399 {
3400 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3401 break;
3402 }
3403 }
3404 }
3405 }
3406 }
3407
3408 if (pPatchTargetGC)
3409 {
3410 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3411 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3412 }
3413 else
3414 {
3415 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3416 }
3417
3418 if (rc == VINF_SUCCESS)
3419 {
3420 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3421 Assert(pPatchTargetGC);
3422 }
3423
3424 if (pPatchTargetGC)
3425 {
3426 pCtx->eax = pPatchTargetGC;
3427 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3428 }
3429 else
3430 {
3431 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3432 pCtx->eax = 0;
3433 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3434 }
3435 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3436 rc = PATMAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3437 AssertRC(rc);
3438
3439 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3440 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3441 return VINF_SUCCESS;
3442}
3443
3444/**
3445 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3446 *
3447 * @returns VBox status code.
3448 * @param pVM The VM to operate on.
3449 * @param pCpu Disassembly CPU structure ptr
3450 * @param pInstrGC Guest context point to privileged instruction
3451 * @param pPatch Patch record
3452 *
3453 */
3454static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3455{
3456 int rc = VERR_PATCHING_REFUSED;
3457 DISCPUSTATE cpu;
3458 RTRCPTR pTargetGC;
3459 PPATMPATCHREC pPatchFunction;
3460 uint32_t opsize;
3461 bool disret;
3462#ifdef LOG_ENABLED
3463 char szOutput[256];
3464#endif
3465
3466 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3467 Assert((pCpu->pCurInstr->opcode == OP_CALL || pCpu->pCurInstr->opcode == OP_JMP) && pCpu->opsize == SIZEOF_NEARJUMP32);
3468
3469 if ((pCpu->pCurInstr->opcode != OP_CALL && pCpu->pCurInstr->opcode != OP_JMP) || pCpu->opsize != SIZEOF_NEARJUMP32)
3470 {
3471 rc = VERR_PATCHING_REFUSED;
3472 goto failure;
3473 }
3474
3475 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3476 if (pTargetGC == 0)
3477 {
3478 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
3479 rc = VERR_PATCHING_REFUSED;
3480 goto failure;
3481 }
3482
3483 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3484 if (pPatchFunction == NULL)
3485 {
3486 for(;;)
3487 {
3488 /* It could be an indirect call (call -> jmp dest).
3489 * Note that it's dangerous to assume the jump will never change...
3490 */
3491 uint8_t *pTmpInstrHC;
3492
3493 pTmpInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pTargetGC);
3494 Assert(pTmpInstrHC);
3495 if (pTmpInstrHC == 0)
3496 break;
3497
3498 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3499 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pTargetGC, pTmpInstrHC, &opsize, NULL);
3500 if (disret == false || cpu.pCurInstr->opcode != OP_JMP)
3501 break;
3502
3503 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3504 if (pTargetGC == 0)
3505 {
3506 break;
3507 }
3508
3509 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3510 break;
3511 }
3512 if (pPatchFunction == 0)
3513 {
3514 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3515 rc = VERR_PATCHING_REFUSED;
3516 goto failure;
3517 }
3518 }
3519
3520 // make a copy of the guest code bytes that will be overwritten
3521 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3522
3523 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3524 AssertRC(rc);
3525
3526 /* Now replace the original call in the guest code */
3527 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), true);
3528 AssertRC(rc);
3529 if (RT_FAILURE(rc))
3530 goto failure;
3531
3532 /* Lowest and highest address for write monitoring. */
3533 pPatch->pInstrGCLowest = pInstrGC;
3534 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3535
3536#ifdef LOG_ENABLED
3537 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3538 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3539 Log(("Call patch: %s", szOutput));
3540#endif
3541
3542 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3543
3544 pPatch->uState = PATCH_ENABLED;
3545 return VINF_SUCCESS;
3546
3547failure:
3548 /* Turn this patch into a dummy. */
3549 pPatch->uState = PATCH_REFUSED;
3550
3551 return rc;
3552}
3553
3554/**
3555 * Replace the address in an MMIO instruction with the cached version.
3556 *
3557 * @returns VBox status code.
3558 * @param pVM The VM to operate on.
3559 * @param pInstrGC Guest context point to privileged instruction
3560 * @param pCpu Disassembly CPU structure ptr
3561 * @param pPatch Patch record
3562 *
3563 * @note returns failure if patching is not allowed or possible
3564 *
3565 */
3566static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3567{
3568 uint8_t *pPB;
3569 int rc = VERR_PATCHING_REFUSED;
3570#ifdef LOG_ENABLED
3571 DISCPUSTATE cpu;
3572 uint32_t opsize;
3573 bool disret;
3574 char szOutput[256];
3575#endif
3576
3577 Assert(pVM->patm.s.mmio.pCachedData);
3578 if (!pVM->patm.s.mmio.pCachedData)
3579 goto failure;
3580
3581 if (pCpu->param2.flags != USE_DISPLACEMENT32)
3582 goto failure;
3583
3584 pPB = pPatch->pPrivInstrHC;
3585
3586 /* Add relocation record for cached data access. */
3587 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3588 {
3589 Log(("Relocation failed for cached mmio address!!\n"));
3590 return VERR_PATCHING_REFUSED;
3591 }
3592#ifdef LOG_ENABLED
3593 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3594 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3595 Log(("MMIO patch old instruction: %s", szOutput));
3596#endif
3597
3598 /* Save original instruction. */
3599 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3600 AssertRC(rc);
3601
3602 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3603
3604 /* Replace address with that of the cached item. */
3605 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->opsize - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3606 AssertRC(rc);
3607 if (RT_FAILURE(rc))
3608 {
3609 goto failure;
3610 }
3611
3612#ifdef LOG_ENABLED
3613 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3614 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3615 Log(("MMIO patch: %s", szOutput));
3616#endif
3617 pVM->patm.s.mmio.pCachedData = 0;
3618 pVM->patm.s.mmio.GCPhys = 0;
3619 pPatch->uState = PATCH_ENABLED;
3620 return VINF_SUCCESS;
3621
3622failure:
3623 /* Turn this patch into a dummy. */
3624 pPatch->uState = PATCH_REFUSED;
3625
3626 return rc;
3627}
3628
3629
3630/**
3631 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3632 *
3633 * @returns VBox status code.
3634 * @param pVM The VM to operate on.
3635 * @param pInstrGC Guest context point to privileged instruction
3636 * @param pPatch Patch record
3637 *
3638 * @note returns failure if patching is not allowed or possible
3639 *
3640 */
3641static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3642{
3643 DISCPUSTATE cpu;
3644 uint32_t opsize;
3645 bool disret;
3646 uint8_t *pInstrHC;
3647#ifdef LOG_ENABLED
3648 char szOutput[256];
3649#endif
3650
3651 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3652
3653 /* Convert GC to HC address. */
3654 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3655 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3656
3657 /* Disassemble mmio instruction. */
3658 cpu.mode = pPatch->uOpMode;
3659 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
3660 if (disret == false)
3661 {
3662 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3663 return VERR_PATCHING_REFUSED;
3664 }
3665
3666 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
3667 if (opsize > MAX_INSTR_SIZE)
3668 return VERR_PATCHING_REFUSED;
3669 if (cpu.param2.flags != USE_DISPLACEMENT32)
3670 return VERR_PATCHING_REFUSED;
3671
3672 /* Add relocation record for cached data access. */
3673 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3674 {
3675 Log(("Relocation failed for cached mmio address!!\n"));
3676 return VERR_PATCHING_REFUSED;
3677 }
3678 /* Replace address with that of the cached item. */
3679 *(RTRCPTR *)&pInstrHC[cpu.opsize - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3680
3681 /* Lowest and highest address for write monitoring. */
3682 pPatch->pInstrGCLowest = pInstrGC;
3683 pPatch->pInstrGCHighest = pInstrGC + cpu.opsize;
3684
3685#ifdef LOG_ENABLED
3686 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3687 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3688 Log(("MMIO patch: %s", szOutput));
3689#endif
3690
3691 pVM->patm.s.mmio.pCachedData = 0;
3692 pVM->patm.s.mmio.GCPhys = 0;
3693 return VINF_SUCCESS;
3694}
3695
3696/**
3697 * Activates an int3 patch
3698 *
3699 * @returns VBox status code.
3700 * @param pVM The VM to operate on.
3701 * @param pPatch Patch record
3702 */
3703static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3704{
3705 uint8_t ASMInt3 = 0xCC;
3706 int rc;
3707
3708 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3709 Assert(pPatch->uState != PATCH_ENABLED);
3710
3711 /* Replace first opcode byte with 'int 3'. */
3712 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &ASMInt3, sizeof(ASMInt3));
3713 AssertRC(rc);
3714
3715 pPatch->cbPatchJump = sizeof(ASMInt3);
3716
3717 return rc;
3718}
3719
3720/**
3721 * Deactivates an int3 patch
3722 *
3723 * @returns VBox status code.
3724 * @param pVM The VM to operate on.
3725 * @param pPatch Patch record
3726 */
3727static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3728{
3729 uint8_t ASMInt3 = 0xCC;
3730 int rc;
3731
3732 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3733 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3734
3735 /* Restore first opcode byte. */
3736 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3737 AssertRC(rc);
3738 return rc;
3739}
3740
3741/**
3742 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically in the guest context.
3743 *
3744 * @returns VBox status code.
3745 * @param pVM The VM to operate on.
3746 * @param pInstrGC Guest context point to privileged instruction
3747 * @param pInstrHC Host context point to privileged instruction
3748 * @param pCpu Disassembly CPU structure ptr
3749 * @param pPatch Patch record
3750 *
3751 * @note returns failure if patching is not allowed or possible
3752 *
3753 */
3754VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3755{
3756 uint8_t ASMInt3 = 0xCC;
3757 int rc;
3758
3759 /** @note Do not use patch memory here! It might called during patch installation too. */
3760
3761#ifdef LOG_ENABLED
3762 DISCPUSTATE cpu;
3763 char szOutput[256];
3764 uint32_t opsize;
3765
3766 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3767 PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3768 Log(("PATMR3PatchInstrInt3: %s", szOutput));
3769#endif
3770
3771 /* Save the original instruction. */
3772 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3773 AssertRC(rc);
3774 pPatch->cbPatchJump = sizeof(ASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3775
3776 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3777
3778 /* Replace first opcode byte with 'int 3'. */
3779 rc = patmActivateInt3Patch(pVM, pPatch);
3780 if (RT_FAILURE(rc))
3781 goto failure;
3782
3783 /* Lowest and highest address for write monitoring. */
3784 pPatch->pInstrGCLowest = pInstrGC;
3785 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3786
3787 pPatch->uState = PATCH_ENABLED;
3788 return VINF_SUCCESS;
3789
3790failure:
3791 /* Turn this patch into a dummy. */
3792 return VERR_PATCHING_REFUSED;
3793}
3794
3795#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3796/**
3797 * Patch a jump instruction at specified location
3798 *
3799 * @returns VBox status code.
3800 * @param pVM The VM to operate on.
3801 * @param pInstrGC Guest context point to privileged instruction
3802 * @param pInstrHC Host context point to privileged instruction
3803 * @param pCpu Disassembly CPU structure ptr
3804 * @param pPatchRec Patch record
3805 *
3806 * @note returns failure if patching is not allowed or possible
3807 *
3808 */
3809int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3810{
3811 PPATCHINFO pPatch = &pPatchRec->patch;
3812 int rc = VERR_PATCHING_REFUSED;
3813#ifdef LOG_ENABLED
3814 bool disret;
3815 DISCPUSTATE cpu;
3816 uint32_t opsize;
3817 char szOutput[256];
3818#endif
3819
3820 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3821 pPatch->uCurPatchOffset = 0;
3822 pPatch->cbPatchBlockSize = 0;
3823 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3824
3825 /*
3826 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3827 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3828 */
3829 switch (pCpu->pCurInstr->opcode)
3830 {
3831 case OP_JO:
3832 case OP_JNO:
3833 case OP_JC:
3834 case OP_JNC:
3835 case OP_JE:
3836 case OP_JNE:
3837 case OP_JBE:
3838 case OP_JNBE:
3839 case OP_JS:
3840 case OP_JNS:
3841 case OP_JP:
3842 case OP_JNP:
3843 case OP_JL:
3844 case OP_JNL:
3845 case OP_JLE:
3846 case OP_JNLE:
3847 case OP_JMP:
3848 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3849 Assert(pCpu->param1.flags & USE_IMMEDIATE32_REL);
3850 if (!(pCpu->param1.flags & USE_IMMEDIATE32_REL))
3851 goto failure;
3852
3853 Assert(pCpu->opsize == SIZEOF_NEARJUMP32 || pCpu->opsize == SIZEOF_NEAR_COND_JUMP32);
3854 if (pCpu->opsize != SIZEOF_NEARJUMP32 && pCpu->opsize != SIZEOF_NEAR_COND_JUMP32)
3855 goto failure;
3856
3857 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->opsize))
3858 {
3859 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
3860 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
3861 rc = VERR_PATCHING_REFUSED;
3862 goto failure;
3863 }
3864
3865 break;
3866
3867 default:
3868 goto failure;
3869 }
3870
3871 // make a copy of the guest code bytes that will be overwritten
3872 Assert(pCpu->opsize <= sizeof(pPatch->aPrivInstr));
3873 Assert(pCpu->opsize >= SIZEOF_NEARJUMP32);
3874 pPatch->cbPatchJump = pCpu->opsize;
3875
3876 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3877 AssertRC(rc);
3878
3879 /* Now insert a jump in the guest code. */
3880 /*
3881 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
3882 * references the target instruction in the conflict patch.
3883 */
3884 RTRCPTR pJmpDest = PATMR3GuestGCPtrToPatchGCPtr(pVM, pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval);
3885
3886 AssertMsg(pJmpDest, ("PATMR3GuestGCPtrToPatchGCPtr failed for %RRv\n", pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval));
3887 pPatch->pPatchJumpDestGC = pJmpDest;
3888
3889 rc = patmGenJumpToPatch(pVM, pPatch, true);
3890 AssertRC(rc);
3891 if (RT_FAILURE(rc))
3892 goto failure;
3893
3894 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
3895
3896#ifdef LOG_ENABLED
3897 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3898 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3899 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
3900#endif
3901
3902 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3903
3904 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
3905
3906 /* Lowest and highest address for write monitoring. */
3907 pPatch->pInstrGCLowest = pInstrGC;
3908 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
3909
3910 pPatch->uState = PATCH_ENABLED;
3911 return VINF_SUCCESS;
3912
3913failure:
3914 /* Turn this cli patch into a dummy. */
3915 pPatch->uState = PATCH_REFUSED;
3916
3917 return rc;
3918}
3919#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
3920
3921
3922/**
3923 * Gives hint to PATM about supervisor guest instructions
3924 *
3925 * @returns VBox status code.
3926 * @param pVM The VM to operate on.
3927 * @param pInstr Guest context point to privileged instruction
3928 * @param flags Patch flags
3929 */
3930VMMR3DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
3931{
3932 Assert(pInstrGC);
3933 Assert(flags == PATMFL_CODE32);
3934
3935 Log(("PATMR3AddHint %RRv\n", pInstrGC));
3936 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
3937}
3938
3939/**
3940 * Patch privileged instruction at specified location
3941 *
3942 * @returns VBox status code.
3943 * @param pVM The VM to operate on.
3944 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
3945 * @param flags Patch flags
3946 *
3947 * @note returns failure if patching is not allowed or possible
3948 */
3949VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
3950{
3951 DISCPUSTATE cpu;
3952 R3PTRTYPE(uint8_t *) pInstrHC;
3953 uint32_t opsize;
3954 PPATMPATCHREC pPatchRec;
3955 PCPUMCTX pCtx = 0;
3956 bool disret;
3957 int rc;
3958 PVMCPU pVCpu = VMMGetCpu0(pVM);
3959
3960 if (!pVM || pInstrGC == 0 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
3961 {
3962 AssertFailed();
3963 return VERR_INVALID_PARAMETER;
3964 }
3965
3966 if (PATMIsEnabled(pVM) == false)
3967 return VERR_PATCHING_REFUSED;
3968
3969 /* Test for patch conflict only with patches that actually change guest code. */
3970 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
3971 {
3972 PPATCHINFO pConflictPatch = PATMFindActivePatchByEntrypoint(pVM, pInstrGC);
3973 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
3974 if (pConflictPatch != 0)
3975 return VERR_PATCHING_REFUSED;
3976 }
3977
3978 if (!(flags & PATMFL_CODE32))
3979 {
3980 /** @todo Only 32 bits code right now */
3981 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
3982 return VERR_NOT_IMPLEMENTED;
3983 }
3984
3985 /* We ran out of patch memory; don't bother anymore. */
3986 if (pVM->patm.s.fOutOfMemory == true)
3987 return VERR_PATCHING_REFUSED;
3988
3989 /* Make sure the code selector is wide open; otherwise refuse. */
3990 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
3991 if (CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0)
3992 {
3993 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
3994 if (pInstrGCFlat != pInstrGC)
3995 {
3996 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
3997 return VERR_PATCHING_REFUSED;
3998 }
3999 }
4000
4001 /** @note the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4002 if (!(flags & PATMFL_GUEST_SPECIFIC))
4003 {
4004 /* New code. Make sure CSAM has a go at it first. */
4005 CSAMR3CheckCode(pVM, pInstrGC);
4006 }
4007
4008 /** @note obsolete */
4009 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4010 && (flags & PATMFL_MMIO_ACCESS))
4011 {
4012 RTRCUINTPTR offset;
4013 void *pvPatchCoreOffset;
4014
4015 /* Find the patch record. */
4016 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4017 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4018 if (pvPatchCoreOffset == NULL)
4019 {
4020 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4021 return VERR_PATCH_NOT_FOUND; //fatal error
4022 }
4023 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4024
4025 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4026 }
4027
4028 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4029
4030 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4031 if (pPatchRec)
4032 {
4033 Assert(!(flags & PATMFL_TRAMPOLINE));
4034
4035 /* Hints about existing patches are ignored. */
4036 if (flags & PATMFL_INSTR_HINT)
4037 return VERR_PATCHING_REFUSED;
4038
4039 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4040 {
4041 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4042 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4043 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4044 }
4045
4046 if (pPatchRec->patch.uState == PATCH_DISABLED)
4047 {
4048 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4049 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4050 {
4051 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4052 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4053 }
4054 else
4055 Log(("Enabling patch %RRv again\n", pInstrGC));
4056
4057 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4058 rc = PATMR3EnablePatch(pVM, pInstrGC);
4059 if (RT_SUCCESS(rc))
4060 return VWRN_PATCH_ENABLED;
4061
4062 return rc;
4063 }
4064 if ( pPatchRec->patch.uState == PATCH_ENABLED
4065 || pPatchRec->patch.uState == PATCH_DIRTY)
4066 {
4067 /*
4068 * The patch might have been overwritten.
4069 */
4070 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4071 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4072 {
4073 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4074 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4075 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4076 {
4077 if (flags & PATMFL_IDTHANDLER)
4078 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4079
4080 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4081 }
4082 }
4083 rc = PATMR3RemovePatch(pVM, pInstrGC);
4084 if (RT_FAILURE(rc))
4085 return VERR_PATCHING_REFUSED;
4086 }
4087 else
4088 {
4089 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4090 /* already tried it once! */
4091 return VERR_PATCHING_REFUSED;
4092 }
4093 }
4094
4095 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4096 if (RT_FAILURE(rc))
4097 {
4098 Log(("Out of memory!!!!\n"));
4099 return VERR_NO_MEMORY;
4100 }
4101 pPatchRec->Core.Key = pInstrGC;
4102 pPatchRec->patch.uState = PATCH_REFUSED; //default
4103 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4104 Assert(rc);
4105
4106 RTGCPHYS GCPhys;
4107 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4108 if (rc != VINF_SUCCESS)
4109 {
4110 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4111 return rc;
4112 }
4113 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4114 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4115 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4116 {
4117 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4118 return VERR_PATCHING_REFUSED;
4119 }
4120 GCPhys = GCPhys + (pInstrGC & PAGE_OFFSET_MASK);
4121 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, MAX_INSTR_SIZE, (void **)&pInstrHC);
4122 AssertRCReturn(rc, rc);
4123
4124 pPatchRec->patch.pPrivInstrHC = pInstrHC;
4125 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4126 pPatchRec->patch.flags = flags;
4127 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4128
4129 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4130 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4131
4132 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4133 {
4134 /*
4135 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4136 */
4137 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4138 if (pPatchNear)
4139 {
4140 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4141 {
4142 Log(("Dangerous patch; would overwrite the ususable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4143
4144 pPatchRec->patch.uState = PATCH_UNUSABLE;
4145 /*
4146 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4147 */
4148 return VERR_PATCHING_REFUSED;
4149 }
4150 }
4151 }
4152
4153 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4154 if (pPatchRec->patch.pTempInfo == 0)
4155 {
4156 Log(("Out of memory!!!!\n"));
4157 return VERR_NO_MEMORY;
4158 }
4159
4160 cpu.mode = pPatchRec->patch.uOpMode;
4161 disret = PATMR3DISInstr(pVM, &pPatchRec->patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
4162 if (disret == false)
4163 {
4164 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4165 return VERR_PATCHING_REFUSED;
4166 }
4167
4168 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
4169 if (opsize > MAX_INSTR_SIZE)
4170 {
4171 return VERR_PATCHING_REFUSED;
4172 }
4173
4174 pPatchRec->patch.cbPrivInstr = opsize;
4175 pPatchRec->patch.opcode = cpu.pCurInstr->opcode;
4176
4177 /* Restricted hinting for now. */
4178 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->opcode == OP_CLI);
4179
4180 /* Allocate statistics slot */
4181 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4182 {
4183 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4184 }
4185 else
4186 {
4187 Log(("WARNING: Patch index wrap around!!\n"));
4188 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4189 }
4190
4191 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4192 {
4193 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec);
4194 }
4195 else
4196 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4197 {
4198 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec);
4199 }
4200 else
4201 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4202 {
4203 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4204 }
4205 else
4206 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4207 {
4208 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &pPatchRec->patch);
4209 }
4210 else
4211 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4212 {
4213 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4214 }
4215 else
4216 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4217 {
4218 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &pPatchRec->patch);
4219 }
4220 else
4221 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4222 {
4223 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4224 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4225
4226 rc = patmIdtHandler(pVM, pInstrGC, pInstrHC, opsize, pPatchRec);
4227#ifdef VBOX_WITH_STATISTICS
4228 if ( rc == VINF_SUCCESS
4229 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4230 {
4231 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4232 }
4233#endif
4234 }
4235 else
4236 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4237 {
4238 switch (cpu.pCurInstr->opcode)
4239 {
4240 case OP_SYSENTER:
4241 case OP_PUSH:
4242 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4243 if (rc == VINF_SUCCESS)
4244 {
4245 if (rc == VINF_SUCCESS)
4246 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4247 return rc;
4248 }
4249 break;
4250
4251 default:
4252 rc = VERR_NOT_IMPLEMENTED;
4253 break;
4254 }
4255 }
4256 else
4257 {
4258 switch (cpu.pCurInstr->opcode)
4259 {
4260 case OP_SYSENTER:
4261 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4262 if (rc == VINF_SUCCESS)
4263 {
4264 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4265 return VINF_SUCCESS;
4266 }
4267 break;
4268
4269#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4270 case OP_JO:
4271 case OP_JNO:
4272 case OP_JC:
4273 case OP_JNC:
4274 case OP_JE:
4275 case OP_JNE:
4276 case OP_JBE:
4277 case OP_JNBE:
4278 case OP_JS:
4279 case OP_JNS:
4280 case OP_JP:
4281 case OP_JNP:
4282 case OP_JL:
4283 case OP_JNL:
4284 case OP_JLE:
4285 case OP_JNLE:
4286 case OP_JECXZ:
4287 case OP_LOOP:
4288 case OP_LOOPNE:
4289 case OP_LOOPE:
4290 case OP_JMP:
4291 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4292 {
4293 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4294 break;
4295 }
4296 return VERR_NOT_IMPLEMENTED;
4297#endif
4298
4299 case OP_PUSHF:
4300 case OP_CLI:
4301 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4302 rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->opcode, opsize, pPatchRec);
4303 break;
4304
4305 case OP_STR:
4306 case OP_SGDT:
4307 case OP_SLDT:
4308 case OP_SIDT:
4309 case OP_CPUID:
4310 case OP_LSL:
4311 case OP_LAR:
4312 case OP_SMSW:
4313 case OP_VERW:
4314 case OP_VERR:
4315 case OP_IRET:
4316 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4317 break;
4318
4319 default:
4320 return VERR_NOT_IMPLEMENTED;
4321 }
4322 }
4323
4324 if (rc != VINF_SUCCESS)
4325 {
4326 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4327 {
4328 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4329 pPatchRec->patch.nrPatch2GuestRecs = 0;
4330 }
4331 pVM->patm.s.uCurrentPatchIdx--;
4332 }
4333 else
4334 {
4335 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4336 AssertRCReturn(rc, rc);
4337
4338 /* Keep track upper and lower boundaries of patched instructions */
4339 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4340 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4341 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4342 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4343
4344 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4345 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4346
4347 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4348 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4349
4350 rc = VINF_SUCCESS;
4351
4352 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4353 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4354 {
4355 rc = PATMR3DisablePatch(pVM, pInstrGC);
4356 AssertRCReturn(rc, rc);
4357 }
4358
4359#ifdef VBOX_WITH_STATISTICS
4360 /* Register statistics counter */
4361 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4362 {
4363 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4364 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4365#ifndef DEBUG_sandervl
4366 /* Full breakdown for the GUI. */
4367 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4368 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4369 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4370 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4371 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4372 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4373 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4374 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4375 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4376 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4377 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4378 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4379 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4380 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4381 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4382 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4383#endif
4384 }
4385#endif
4386 }
4387 return rc;
4388}
4389
4390/**
4391 * Query instruction size
4392 *
4393 * @returns VBox status code.
4394 * @param pVM The VM to operate on.
4395 * @param pPatch Patch record
4396 * @param pInstrGC Instruction address
4397 */
4398static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4399{
4400 uint8_t *pInstrHC;
4401
4402 int rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pInstrGC, (PRTR3PTR)&pInstrHC);
4403 if (rc == VINF_SUCCESS)
4404 {
4405 DISCPUSTATE cpu;
4406 bool disret;
4407 uint32_t opsize;
4408
4409 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4410 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL, PATMREAD_ORGCODE | PATMREAD_NOCHECK);
4411 if (disret)
4412 return opsize;
4413 }
4414 return 0;
4415}
4416
4417/**
4418 * Add patch to page record
4419 *
4420 * @returns VBox status code.
4421 * @param pVM The VM to operate on.
4422 * @param pPage Page address
4423 * @param pPatch Patch record
4424 */
4425int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4426{
4427 PPATMPATCHPAGE pPatchPage;
4428 int rc;
4429
4430 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4431
4432 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4433 if (pPatchPage)
4434 {
4435 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4436 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4437 {
4438 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4439 PPATCHINFO *paPatchOld = pPatchPage->aPatch;
4440
4441 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4442 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4443 if (RT_FAILURE(rc))
4444 {
4445 Log(("Out of memory!!!!\n"));
4446 return VERR_NO_MEMORY;
4447 }
4448 memcpy(pPatchPage->aPatch, paPatchOld, cMaxPatchesOld*sizeof(PPATCHINFO));
4449 MMHyperFree(pVM, paPatchOld);
4450 }
4451 pPatchPage->aPatch[pPatchPage->cCount] = pPatch;
4452 pPatchPage->cCount++;
4453 }
4454 else
4455 {
4456 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4457 if (RT_FAILURE(rc))
4458 {
4459 Log(("Out of memory!!!!\n"));
4460 return VERR_NO_MEMORY;
4461 }
4462 pPatchPage->Core.Key = pPage;
4463 pPatchPage->cCount = 1;
4464 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4465
4466 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4467 if (RT_FAILURE(rc))
4468 {
4469 Log(("Out of memory!!!!\n"));
4470 MMHyperFree(pVM, pPatchPage);
4471 return VERR_NO_MEMORY;
4472 }
4473 pPatchPage->aPatch[0] = pPatch;
4474
4475 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4476 Assert(rc);
4477 pVM->patm.s.cPageRecords++;
4478
4479 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4480 }
4481 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4482
4483 /* Get the closest guest instruction (from below) */
4484 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4485 Assert(pGuestToPatchRec);
4486 if (pGuestToPatchRec)
4487 {
4488 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4489 if ( pPatchPage->pLowestAddrGC == 0
4490 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4491 {
4492 RTRCUINTPTR offset;
4493
4494 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4495
4496 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4497 /* If we're too close to the page boundary, then make sure an instruction from the previous page doesn't cross the boundary itself. */
4498 if (offset && offset < MAX_INSTR_SIZE)
4499 {
4500 /* Get the closest guest instruction (from above) */
4501 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4502
4503 if (pGuestToPatchRec)
4504 {
4505 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4506 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4507 {
4508 pPatchPage->pLowestAddrGC = pPage;
4509 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4510 }
4511 }
4512 }
4513 }
4514 }
4515
4516 /* Get the closest guest instruction (from above) */
4517 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4518 Assert(pGuestToPatchRec);
4519 if (pGuestToPatchRec)
4520 {
4521 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4522 if ( pPatchPage->pHighestAddrGC == 0
4523 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4524 {
4525 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4526 /* Increase by instruction size. */
4527 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4528//// Assert(size);
4529 pPatchPage->pHighestAddrGC += size;
4530 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4531 }
4532 }
4533
4534 return VINF_SUCCESS;
4535}
4536
4537/**
4538 * Remove patch from page record
4539 *
4540 * @returns VBox status code.
4541 * @param pVM The VM to operate on.
4542 * @param pPage Page address
4543 * @param pPatch Patch record
4544 */
4545int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4546{
4547 PPATMPATCHPAGE pPatchPage;
4548 int rc;
4549
4550 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4551 Assert(pPatchPage);
4552
4553 if (!pPatchPage)
4554 return VERR_INVALID_PARAMETER;
4555
4556 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4557
4558 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4559 if (pPatchPage->cCount > 1)
4560 {
4561 uint32_t i;
4562
4563 /* Used by multiple patches */
4564 for (i=0;i<pPatchPage->cCount;i++)
4565 {
4566 if (pPatchPage->aPatch[i] == pPatch)
4567 {
4568 pPatchPage->aPatch[i] = 0;
4569 break;
4570 }
4571 }
4572 /* close the gap between the remaining pointers. */
4573 if (i < pPatchPage->cCount - 1)
4574 {
4575 memcpy(&pPatchPage->aPatch[i], &pPatchPage->aPatch[i+1], sizeof(PPATCHINFO)*(pPatchPage->cCount - (i+1)));
4576 }
4577 AssertMsg(i < pPatchPage->cCount, ("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4578
4579 pPatchPage->cCount--;
4580 }
4581 else
4582 {
4583 PPATMPATCHPAGE pPatchNode;
4584
4585 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4586
4587 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4588 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4589 Assert(pPatchNode && pPatchNode == pPatchPage);
4590
4591 Assert(pPatchPage->aPatch);
4592 rc = MMHyperFree(pVM, pPatchPage->aPatch);
4593 AssertRC(rc);
4594 rc = MMHyperFree(pVM, pPatchPage);
4595 AssertRC(rc);
4596 pVM->patm.s.cPageRecords--;
4597 }
4598 return VINF_SUCCESS;
4599}
4600
4601/**
4602 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4603 *
4604 * @returns VBox status code.
4605 * @param pVM The VM to operate on.
4606 * @param pPatch Patch record
4607 */
4608int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4609{
4610 int rc;
4611 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4612
4613 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4614 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4615 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4616
4617 /** @todo optimize better (large gaps between current and next used page) */
4618 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4619 {
4620 /* Get the closest guest instruction (from above) */
4621 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4622 if ( pGuestToPatchRec
4623 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4624 )
4625 {
4626 /* Code in page really patched -> add record */
4627 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4628 AssertRC(rc);
4629 }
4630 }
4631 pPatch->flags |= PATMFL_CODE_MONITORED;
4632 return VINF_SUCCESS;
4633}
4634
4635/**
4636 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4637 *
4638 * @returns VBox status code.
4639 * @param pVM The VM to operate on.
4640 * @param pPatch Patch record
4641 */
4642int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4643{
4644 int rc;
4645 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4646
4647 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4648 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4649 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4650
4651 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4652 {
4653 /* Get the closest guest instruction (from above) */
4654 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4655 if ( pGuestToPatchRec
4656 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4657 )
4658 {
4659 /* Code in page really patched -> remove record */
4660 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4661 AssertRC(rc);
4662 }
4663 }
4664 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4665 return VINF_SUCCESS;
4666}
4667
4668/**
4669 * Notifies PATM about a (potential) write to code that has been patched.
4670 *
4671 * @returns VBox status code.
4672 * @param pVM The VM to operate on.
4673 * @param GCPtr GC pointer to write address
4674 * @param cbWrite Nr of bytes to write
4675 *
4676 */
4677VMMR3DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4678{
4679 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4680
4681 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4682
4683 Assert(VM_IS_EMT(pVM));
4684
4685 /* Quick boundary check */
4686 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4687 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4688 )
4689 return VINF_SUCCESS;
4690
4691 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4692
4693 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4694 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4695
4696 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4697 {
4698loop_start:
4699 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4700 if (pPatchPage)
4701 {
4702 uint32_t i;
4703 bool fValidPatchWrite = false;
4704
4705 /* Quick check to see if the write is in the patched part of the page */
4706 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4707 || pPatchPage->pHighestAddrGC < GCPtr)
4708 {
4709 break;
4710 }
4711
4712 for (i=0;i<pPatchPage->cCount;i++)
4713 {
4714 if (pPatchPage->aPatch[i])
4715 {
4716 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4717 RTRCPTR pPatchInstrGC;
4718 //unused: bool fForceBreak = false;
4719
4720 Assert(pPatchPage->aPatch[i]->flags & PATMFL_CODE_MONITORED);
4721 /** @todo inefficient and includes redundant checks for multiple pages. */
4722 for (uint32_t j=0; j<cbWrite; j++)
4723 {
4724 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4725
4726 if ( pPatch->cbPatchJump
4727 && pGuestPtrGC >= pPatch->pPrivInstrGC
4728 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4729 {
4730 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4731 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4732 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4733 if (rc == VINF_SUCCESS)
4734 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4735 goto loop_start;
4736
4737 continue;
4738 }
4739
4740 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4741 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4742 if (!pPatchInstrGC)
4743 {
4744 RTRCPTR pClosestInstrGC;
4745 uint32_t size;
4746
4747 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4748 if (pPatchInstrGC)
4749 {
4750 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4751 Assert(pClosestInstrGC <= pGuestPtrGC);
4752 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4753 /* Check if this is not a write into a gap between two patches */
4754 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4755 pPatchInstrGC = 0;
4756 }
4757 }
4758 if (pPatchInstrGC)
4759 {
4760 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4761
4762 fValidPatchWrite = true;
4763
4764 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4765 Assert(pPatchToGuestRec);
4766 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4767 {
4768 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4769
4770 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4771 {
4772 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4773
4774 PATMR3MarkDirtyPatch(pVM, pPatch);
4775
4776 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4777 goto loop_start;
4778 }
4779 else
4780 {
4781 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4782 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4783
4784 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4785 pPatchToGuestRec->fDirty = true;
4786
4787 *pInstrHC = 0xCC;
4788
4789 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4790 }
4791 }
4792 /* else already marked dirty */
4793 }
4794 }
4795 }
4796 } /* for each patch */
4797
4798 if (fValidPatchWrite == false)
4799 {
4800 /* Write to a part of the page that either:
4801 * - doesn't contain any code (shared code/data); rather unlikely
4802 * - old code page that's no longer in active use.
4803 */
4804invalid_write_loop_start:
4805 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4806
4807 if (pPatchPage)
4808 {
4809 for (i=0;i<pPatchPage->cCount;i++)
4810 {
4811 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4812
4813 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
4814 {
4815 /** @note possibly dangerous assumption that all future writes will be harmless. */
4816 if (pPatch->flags & PATMFL_IDTHANDLER)
4817 {
4818 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4819
4820 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
4821 int rc = patmRemovePatchPages(pVM, pPatch);
4822 AssertRC(rc);
4823 }
4824 else
4825 {
4826 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4827 PATMR3MarkDirtyPatch(pVM, pPatch);
4828 }
4829 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4830 goto invalid_write_loop_start;
4831 }
4832 } /* for */
4833 }
4834 }
4835 }
4836 }
4837 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
4838 return VINF_SUCCESS;
4839
4840}
4841
4842/**
4843 * Disable all patches in a flushed page
4844 *
4845 * @returns VBox status code
4846 * @param pVM The VM to operate on.
4847 * @param addr GC address of the page to flush
4848 */
4849/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
4850 */
4851VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
4852{
4853 addr &= PAGE_BASE_GC_MASK;
4854
4855 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
4856 if (pPatchPage)
4857 {
4858 int i;
4859
4860 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
4861 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
4862 {
4863 if (pPatchPage->aPatch[i])
4864 {
4865 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4866
4867 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
4868 PATMR3MarkDirtyPatch(pVM, pPatch);
4869 }
4870 }
4871 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
4872 }
4873 return VINF_SUCCESS;
4874}
4875
4876/**
4877 * Checks if the instructions at the specified address has been patched already.
4878 *
4879 * @returns boolean, patched or not
4880 * @param pVM The VM to operate on.
4881 * @param pInstrGC Guest context pointer to instruction
4882 */
4883VMMR3DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
4884{
4885 PPATMPATCHREC pPatchRec;
4886 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4887 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
4888 return true;
4889 return false;
4890}
4891
4892/**
4893 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
4894 *
4895 * @returns VBox status code.
4896 * @param pVM The VM to operate on.
4897 * @param pInstrGC GC address of instr
4898 * @param pByte opcode byte pointer (OUT)
4899 *
4900 */
4901VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
4902{
4903 PPATMPATCHREC pPatchRec;
4904
4905 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
4906
4907 /* Shortcut. */
4908 if ( !PATMIsEnabled(pVM)
4909 || pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
4910 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
4911 {
4912 return VERR_PATCH_NOT_FOUND;
4913 }
4914
4915 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
4916 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
4917 if ( pPatchRec
4918 && pPatchRec->patch.uState == PATCH_ENABLED
4919 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
4920 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
4921 {
4922 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
4923 *pByte = pPatchRec->patch.aPrivInstr[offset];
4924
4925 if (pPatchRec->patch.cbPatchJump == 1)
4926 {
4927 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
4928 }
4929 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
4930 return VINF_SUCCESS;
4931 }
4932 return VERR_PATCH_NOT_FOUND;
4933}
4934
4935/**
4936 * Disable patch for privileged instruction at specified location
4937 *
4938 * @returns VBox status code.
4939 * @param pVM The VM to operate on.
4940 * @param pInstr Guest context point to privileged instruction
4941 *
4942 * @note returns failure if patching is not allowed or possible
4943 *
4944 */
4945VMMR3DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
4946{
4947 PPATMPATCHREC pPatchRec;
4948 PPATCHINFO pPatch;
4949
4950 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
4951 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4952 if (pPatchRec)
4953 {
4954 int rc = VINF_SUCCESS;
4955
4956 pPatch = &pPatchRec->patch;
4957
4958 /* Already disabled? */
4959 if (pPatch->uState == PATCH_DISABLED)
4960 return VINF_SUCCESS;
4961
4962 /* Clear the IDT entries for the patch we're disabling. */
4963 /** @note very important as we clear IF in the patch itself */
4964 /** @todo this needs to be changed */
4965 if (pPatch->flags & PATMFL_IDTHANDLER)
4966 {
4967 uint32_t iGate;
4968
4969 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
4970 if (iGate != (uint32_t)~0)
4971 {
4972 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
4973 if (++cIDTHandlersDisabled < 256)
4974 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
4975 }
4976 }
4977
4978 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
4979 if ( pPatch->pPatchBlockOffset
4980 && pPatch->uState == PATCH_ENABLED)
4981 {
4982 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
4983 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
4984 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
4985 }
4986
4987 /* IDT or function patches haven't changed any guest code. */
4988 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
4989 {
4990 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
4991 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
4992
4993 if (pPatch->uState != PATCH_REFUSED)
4994 {
4995 AssertMsg(pPatch->pPrivInstrHC, ("Invalid HC pointer?!? (%RRv)\n", pInstrGC));
4996 Assert(pPatch->cbPatchJump);
4997
4998 /** pPrivInstrHC is probably not valid anymore */
4999 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatchRec->patch.pPrivInstrGC, (PRTR3PTR)&pPatchRec->patch.pPrivInstrHC);
5000 if (rc == VINF_SUCCESS)
5001 {
5002 uint8_t temp[16];
5003
5004 Assert(pPatch->cbPatchJump < sizeof(temp));
5005
5006 /* Let's first check if the guest code is still the same. */
5007 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5008 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5009 if (rc == VINF_SUCCESS)
5010 {
5011 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5012
5013 if ( temp[0] != 0xE9 /* jmp opcode */
5014 || *(RTRCINTPTR *)(&temp[1]) != displ
5015 )
5016 {
5017 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5018 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5019 /* Remove it completely */
5020 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5021 rc = PATMR3RemovePatch(pVM, pInstrGC);
5022 AssertRC(rc);
5023 return VWRN_PATCH_REMOVED;
5024 }
5025 }
5026 patmRemoveJumpToPatch(pVM, pPatch);
5027
5028 }
5029 else
5030 {
5031 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5032 pPatch->uState = PATCH_DISABLE_PENDING;
5033 }
5034 }
5035 else
5036 {
5037 AssertMsgFailed(("Patch was refused!\n"));
5038 return VERR_PATCH_ALREADY_DISABLED;
5039 }
5040 }
5041 else
5042 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5043 {
5044 uint8_t temp[16];
5045
5046 Assert(pPatch->cbPatchJump < sizeof(temp));
5047
5048 /* Let's first check if the guest code is still the same. */
5049 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5050 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5051 if (rc == VINF_SUCCESS)
5052 {
5053 if (temp[0] != 0xCC)
5054 {
5055 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5056 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5057 /* Remove it completely */
5058 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5059 rc = PATMR3RemovePatch(pVM, pInstrGC);
5060 AssertRC(rc);
5061 return VWRN_PATCH_REMOVED;
5062 }
5063 patmDeactivateInt3Patch(pVM, pPatch);
5064 }
5065 }
5066
5067 if (rc == VINF_SUCCESS)
5068 {
5069 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5070 if (pPatch->uState == PATCH_DISABLE_PENDING)
5071 {
5072 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5073 pPatch->uState = PATCH_UNUSABLE;
5074 }
5075 else
5076 if (pPatch->uState != PATCH_DIRTY)
5077 {
5078 pPatch->uOldState = pPatch->uState;
5079 pPatch->uState = PATCH_DISABLED;
5080 }
5081 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5082 }
5083
5084 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5085 return VINF_SUCCESS;
5086 }
5087 Log(("Patch not found!\n"));
5088 return VERR_PATCH_NOT_FOUND;
5089}
5090
5091/**
5092 * Permanently disable patch for privileged instruction at specified location
5093 *
5094 * @returns VBox status code.
5095 * @param pVM The VM to operate on.
5096 * @param pInstr Guest context instruction pointer
5097 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5098 * @param pConflictPatch Conflicting patch
5099 *
5100 */
5101static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5102{
5103#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5104 PATCHINFO patch;
5105 DISCPUSTATE cpu;
5106 R3PTRTYPE(uint8_t *) pInstrHC;
5107 uint32_t opsize;
5108 bool disret;
5109 int rc;
5110
5111 RT_ZERO(patch);
5112 pInstrHC = PATMGCVirtToHCVirt(pVM, &patch, pInstrGC);
5113 cpu.mode = (pConflictPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5114 disret = PATMR3DISInstr(pVM, &patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
5115 /*
5116 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5117 * with one that jumps right into the conflict patch.
5118 * Otherwise we must disable the conflicting patch to avoid serious problems.
5119 */
5120 if ( disret == true
5121 && (pConflictPatch->flags & PATMFL_CODE32)
5122 && (cpu.pCurInstr->opcode == OP_JMP || (cpu.pCurInstr->optype & OPTYPE_COND_CONTROLFLOW))
5123 && (cpu.param1.flags & USE_IMMEDIATE32_REL))
5124 {
5125 /* Hint patches must be enabled first. */
5126 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5127 {
5128 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5129 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5130 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5131 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5132 /* Enabling might fail if the patched code has changed in the meantime. */
5133 if (rc != VINF_SUCCESS)
5134 return rc;
5135 }
5136
5137 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5138 if (RT_SUCCESS(rc))
5139 {
5140 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5141 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5142 return VINF_SUCCESS;
5143 }
5144 }
5145#endif
5146
5147 if (pConflictPatch->opcode == OP_CLI)
5148 {
5149 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5150 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5151 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5152 if (rc == VWRN_PATCH_REMOVED)
5153 return VINF_SUCCESS;
5154 if (RT_SUCCESS(rc))
5155 {
5156 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5157 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5158 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5159 if (rc == VERR_PATCH_NOT_FOUND)
5160 return VINF_SUCCESS; /* removed already */
5161
5162 AssertRC(rc);
5163 if (RT_SUCCESS(rc))
5164 {
5165 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5166 return VINF_SUCCESS;
5167 }
5168 }
5169 /* else turned into unusable patch (see below) */
5170 }
5171 else
5172 {
5173 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5174 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5175 if (rc == VWRN_PATCH_REMOVED)
5176 return VINF_SUCCESS;
5177 }
5178
5179 /* No need to monitor the code anymore. */
5180 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5181 {
5182 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5183 AssertRC(rc);
5184 }
5185 pConflictPatch->uState = PATCH_UNUSABLE;
5186 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5187 return VERR_PATCH_DISABLED;
5188}
5189
5190/**
5191 * Enable patch for privileged instruction at specified location
5192 *
5193 * @returns VBox status code.
5194 * @param pVM The VM to operate on.
5195 * @param pInstr Guest context point to privileged instruction
5196 *
5197 * @note returns failure if patching is not allowed or possible
5198 *
5199 */
5200VMMR3DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5201{
5202 PPATMPATCHREC pPatchRec;
5203 PPATCHINFO pPatch;
5204
5205 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5206 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5207 if (pPatchRec)
5208 {
5209 int rc = VINF_SUCCESS;
5210
5211 pPatch = &pPatchRec->patch;
5212
5213 if (pPatch->uState == PATCH_DISABLED)
5214 {
5215 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5216 {
5217 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5218 /** @todo -> pPrivInstrHC is probably not valid anymore */
5219 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatchRec->patch.pPrivInstrGC, (PRTR3PTR)&pPatchRec->patch.pPrivInstrHC);
5220 if (rc == VINF_SUCCESS)
5221 {
5222#ifdef DEBUG
5223 DISCPUSTATE cpu;
5224 char szOutput[256];
5225 uint32_t opsize, i = 0;
5226#endif
5227 uint8_t temp[16];
5228
5229 Assert(pPatch->cbPatchJump < sizeof(temp));
5230
5231 // let's first check if the guest code is still the same
5232 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5233 AssertRC(rc2);
5234
5235 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5236 {
5237 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5238 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5239 /* Remove it completely */
5240 rc = PATMR3RemovePatch(pVM, pInstrGC);
5241 AssertRC(rc);
5242 return VERR_PATCH_NOT_FOUND;
5243 }
5244
5245 rc2 = patmGenJumpToPatch(pVM, pPatch, false);
5246 AssertRC(rc2);
5247 if (RT_FAILURE(rc2))
5248 return rc2;
5249
5250#ifdef DEBUG
5251 bool disret;
5252 i = 0;
5253 while(i < pPatch->cbPatchJump)
5254 {
5255 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5256 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
5257 Log(("Renewed patch instr: %s", szOutput));
5258 i += opsize;
5259 }
5260#endif
5261 }
5262 }
5263 else
5264 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5265 {
5266 uint8_t temp[16];
5267
5268 Assert(pPatch->cbPatchJump < sizeof(temp));
5269
5270 /* Let's first check if the guest code is still the same. */
5271 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5272 AssertRC(rc2);
5273
5274 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5275 {
5276 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5277 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5278 rc = PATMR3RemovePatch(pVM, pInstrGC);
5279 AssertRC(rc);
5280 return VERR_PATCH_NOT_FOUND;
5281 }
5282
5283 rc2 = patmActivateInt3Patch(pVM, pPatch);
5284 if (RT_FAILURE(rc2))
5285 return rc2;
5286 }
5287
5288 pPatch->uState = pPatch->uOldState; //restore state
5289
5290 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5291 if (pPatch->pPatchBlockOffset)
5292 {
5293 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5294 }
5295
5296 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5297 }
5298 else
5299 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5300
5301 return rc;
5302 }
5303 return VERR_PATCH_NOT_FOUND;
5304}
5305
5306/**
5307 * Remove patch for privileged instruction at specified location
5308 *
5309 * @returns VBox status code.
5310 * @param pVM The VM to operate on.
5311 * @param pPatchRec Patch record
5312 * @param fForceRemove Remove *all* patches
5313 */
5314int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5315{
5316 PPATCHINFO pPatch;
5317
5318 pPatch = &pPatchRec->patch;
5319
5320 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5321 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5322 {
5323 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5324 return VERR_ACCESS_DENIED;
5325 }
5326 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5327
5328 /** @note NEVER EVER REUSE PATCH MEMORY */
5329 /** @note PATMR3DisablePatch put a breakpoint (0xCC) at the entry of this patch */
5330
5331 if (pPatchRec->patch.pPatchBlockOffset)
5332 {
5333 PAVLOU32NODECORE pNode;
5334
5335 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5336 Assert(pNode);
5337 }
5338
5339 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5340 {
5341 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5342 AssertRC(rc);
5343 }
5344
5345#ifdef VBOX_WITH_STATISTICS
5346 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5347 {
5348 STAMR3Deregister(pVM, &pPatchRec->patch);
5349#ifndef DEBUG_sandervl
5350 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5351 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5352 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5353 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5354 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5355 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5356 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5357 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5358 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5359 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5360 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5361 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5362 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5363 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5364#endif
5365 }
5366#endif
5367
5368 /** @note no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5369 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5370 pPatch->nrPatch2GuestRecs = 0;
5371 Assert(pPatch->Patch2GuestAddrTree == 0);
5372
5373 patmEmptyTree(pVM, &pPatch->FixupTree);
5374 pPatch->nrFixups = 0;
5375 Assert(pPatch->FixupTree == 0);
5376
5377 if (pPatchRec->patch.pTempInfo)
5378 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5379
5380 /** @note might fail, because it has already been removed (e.g. during reset). */
5381 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5382
5383 /* Free the patch record */
5384 MMHyperFree(pVM, pPatchRec);
5385 return VINF_SUCCESS;
5386}
5387
5388/**
5389 * Attempt to refresh the patch by recompiling its entire code block
5390 *
5391 * @returns VBox status code.
5392 * @param pVM The VM to operate on.
5393 * @param pPatchRec Patch record
5394 */
5395int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5396{
5397 PPATCHINFO pPatch;
5398 int rc;
5399 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5400
5401 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5402
5403 pPatch = &pPatchRec->patch;
5404 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5405 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5406 {
5407 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist\n"));
5408 return VERR_PATCHING_REFUSED;
5409 }
5410
5411 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5412
5413 rc = PATMR3DisablePatch(pVM, pInstrGC);
5414 AssertRC(rc);
5415
5416 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5417 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5418#ifdef VBOX_WITH_STATISTICS
5419 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5420 {
5421 STAMR3Deregister(pVM, &pPatchRec->patch);
5422#ifndef DEBUG_sandervl
5423 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5424 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5425 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5426 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5427 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5428 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5429 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5430 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5431 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5432 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5433 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5434 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5435 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5436 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5437#endif
5438 }
5439#endif
5440
5441 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5442
5443 /* Attempt to install a new patch. */
5444 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5445 if (RT_SUCCESS(rc))
5446 {
5447 RTRCPTR pPatchTargetGC;
5448 PPATMPATCHREC pNewPatchRec;
5449
5450 /* Determine target address in new patch */
5451 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5452 Assert(pPatchTargetGC);
5453 if (!pPatchTargetGC)
5454 {
5455 rc = VERR_PATCHING_REFUSED;
5456 goto failure;
5457 }
5458
5459 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5460 pPatch->uCurPatchOffset = 0;
5461
5462 /* insert jump to new patch in old patch block */
5463 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5464 if (RT_FAILURE(rc))
5465 goto failure;
5466
5467 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5468 Assert(pNewPatchRec); /* can't fail */
5469
5470 /* Remove old patch (only do that when everything is finished) */
5471 int rc2 = PATMRemovePatch(pVM, pPatchRec, true /* force removal */);
5472 AssertRC(rc2);
5473
5474 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5475 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5476
5477 LogRel(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5478 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5479
5480 /* Used by another patch, so don't remove it! */
5481 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5482 }
5483
5484failure:
5485 if (RT_FAILURE(rc))
5486 {
5487 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5488
5489 /* Remove the new inactive patch */
5490 rc = PATMR3RemovePatch(pVM, pInstrGC);
5491 AssertRC(rc);
5492
5493 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5494 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5495
5496 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5497 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5498 AssertRC(rc2);
5499
5500 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5501 }
5502 return rc;
5503}
5504
5505/**
5506 * Find patch for privileged instruction at specified location
5507 *
5508 * @returns Patch structure pointer if found; else NULL
5509 * @param pVM The VM to operate on.
5510 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5511 * @param fIncludeHints Include hinted patches or not
5512 *
5513 */
5514PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5515{
5516 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5517 /* if the patch is enabled, the pointer is not indentical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5518 if (pPatchRec)
5519 {
5520 if ( pPatchRec->patch.uState == PATCH_ENABLED
5521 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5522 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5523 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5524 {
5525 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5526 return &pPatchRec->patch;
5527 }
5528 else
5529 if ( fIncludeHints
5530 && pPatchRec->patch.uState == PATCH_DISABLED
5531 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5532 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5533 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5534 {
5535 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5536 return &pPatchRec->patch;
5537 }
5538 }
5539 return NULL;
5540}
5541
5542/**
5543 * Checks whether the GC address is inside a generated patch jump
5544 *
5545 * @returns true -> yes, false -> no
5546 * @param pVM The VM to operate on.
5547 * @param pAddr Guest context address
5548 * @param pPatchAddr Guest context patch address (if true)
5549 */
5550VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5551{
5552 RTRCPTR addr;
5553 PPATCHINFO pPatch;
5554
5555 if (PATMIsEnabled(pVM) == false)
5556 return false;
5557
5558 if (pPatchAddr == NULL)
5559 pPatchAddr = &addr;
5560
5561 *pPatchAddr = 0;
5562
5563 pPatch = PATMFindActivePatchByEntrypoint(pVM, pAddr);
5564 if (pPatch)
5565 {
5566 *pPatchAddr = pPatch->pPrivInstrGC;
5567 }
5568 return *pPatchAddr == 0 ? false : true;
5569}
5570
5571/**
5572 * Remove patch for privileged instruction at specified location
5573 *
5574 * @returns VBox status code.
5575 * @param pVM The VM to operate on.
5576 * @param pInstr Guest context point to privileged instruction
5577 *
5578 * @note returns failure if patching is not allowed or possible
5579 *
5580 */
5581VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5582{
5583 PPATMPATCHREC pPatchRec;
5584
5585 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5586 if (pPatchRec)
5587 {
5588 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5589 if (rc == VWRN_PATCH_REMOVED)
5590 return VINF_SUCCESS;
5591 return PATMRemovePatch(pVM, pPatchRec, false);
5592 }
5593 AssertFailed();
5594 return VERR_PATCH_NOT_FOUND;
5595}
5596
5597/**
5598 * Mark patch as dirty
5599 *
5600 * @returns VBox status code.
5601 * @param pVM The VM to operate on.
5602 * @param pPatch Patch record
5603 *
5604 * @note returns failure if patching is not allowed or possible
5605 *
5606 */
5607VMMR3DECL(int) PATMR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5608{
5609 if (pPatch->pPatchBlockOffset)
5610 {
5611 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5612 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5613 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5614 }
5615
5616 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5617 /* Put back the replaced instruction. */
5618 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5619 if (rc == VWRN_PATCH_REMOVED)
5620 return VINF_SUCCESS;
5621
5622 /** @note we don't restore patch pages for patches that are not enabled! */
5623 /** @note be careful when changing this behaviour!! */
5624
5625 /* The patch pages are no longer marked for self-modifying code detection */
5626 if (pPatch->flags & PATMFL_CODE_MONITORED)
5627 {
5628 rc = patmRemovePatchPages(pVM, pPatch);
5629 AssertRCReturn(rc, rc);
5630 }
5631 pPatch->uState = PATCH_DIRTY;
5632
5633 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5634 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5635
5636 return VINF_SUCCESS;
5637}
5638
5639/**
5640 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5641 *
5642 * @returns VBox status code.
5643 * @param pVM The VM to operate on.
5644 * @param pPatch Patch block structure pointer
5645 * @param pPatchGC GC address in patch block
5646 */
5647RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5648{
5649 Assert(pPatch->Patch2GuestAddrTree);
5650 /* Get the closest record from below. */
5651 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5652 if (pPatchToGuestRec)
5653 return pPatchToGuestRec->pOrgInstrGC;
5654
5655 return 0;
5656}
5657
5658/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5659 *
5660 * @returns corresponding GC pointer in patch block
5661 * @param pVM The VM to operate on.
5662 * @param pPatch Current patch block pointer
5663 * @param pInstrGC Guest context pointer to privileged instruction
5664 *
5665 */
5666RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5667{
5668 if (pPatch->Guest2PatchAddrTree)
5669 {
5670 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
5671 if (pGuestToPatchRec)
5672 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5673 }
5674
5675 return 0;
5676}
5677
5678/* Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no identical match)
5679 *
5680 * @returns corresponding GC pointer in patch block
5681 * @param pVM The VM to operate on.
5682 * @param pPatch Current patch block pointer
5683 * @param pInstrGC Guest context pointer to privileged instruction
5684 *
5685 */
5686RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5687{
5688 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
5689 if (pGuestToPatchRec)
5690 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5691
5692 return 0;
5693}
5694
5695/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5696 *
5697 * @returns corresponding GC pointer in patch block
5698 * @param pVM The VM to operate on.
5699 * @param pInstrGC Guest context pointer to privileged instruction
5700 *
5701 */
5702VMMR3DECL(RTRCPTR) PATMR3GuestGCPtrToPatchGCPtr(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
5703{
5704 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5705 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
5706 {
5707 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
5708 }
5709 return 0;
5710}
5711
5712/**
5713 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5714 *
5715 * @returns original GC instruction pointer or 0 if not found
5716 * @param pVM The VM to operate on.
5717 * @param pPatchGC GC address in patch block
5718 * @param pEnmState State of the translated address (out)
5719 *
5720 */
5721VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
5722{
5723 PPATMPATCHREC pPatchRec;
5724 void *pvPatchCoreOffset;
5725 RTRCPTR pPrivInstrGC;
5726
5727 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
5728 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5729 if (pvPatchCoreOffset == 0)
5730 {
5731 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
5732 return 0;
5733 }
5734 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
5735 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
5736 if (pEnmState)
5737 {
5738 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
5739 || pPatchRec->patch.uState == PATCH_DIRTY
5740 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
5741 || pPatchRec->patch.uState == PATCH_UNUSABLE),
5742 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
5743
5744 if ( !pPrivInstrGC
5745 || pPatchRec->patch.uState == PATCH_UNUSABLE
5746 || pPatchRec->patch.uState == PATCH_REFUSED)
5747 {
5748 pPrivInstrGC = 0;
5749 *pEnmState = PATMTRANS_FAILED;
5750 }
5751 else
5752 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
5753 {
5754 *pEnmState = PATMTRANS_INHIBITIRQ;
5755 }
5756 else
5757 if ( pPatchRec->patch.uState == PATCH_ENABLED
5758 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
5759 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
5760 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5761 {
5762 *pEnmState = PATMTRANS_OVERWRITTEN;
5763 }
5764 else
5765 if (PATMFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
5766 {
5767 *pEnmState = PATMTRANS_OVERWRITTEN;
5768 }
5769 else
5770 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
5771 {
5772 *pEnmState = PATMTRANS_PATCHSTART;
5773 }
5774 else
5775 *pEnmState = PATMTRANS_SAFE;
5776 }
5777 return pPrivInstrGC;
5778}
5779
5780/**
5781 * Returns the GC pointer of the patch for the specified GC address
5782 *
5783 * @returns VBox status code.
5784 * @param pVM The VM to operate on.
5785 * @param pAddrGC Guest context address
5786 */
5787VMMR3DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
5788{
5789 PPATMPATCHREC pPatchRec;
5790
5791 // Find the patch record
5792 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
5793 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
5794 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
5795 return PATCHCODE_PTR_GC(&pPatchRec->patch);
5796
5797 return 0;
5798}
5799
5800/**
5801 * Attempt to recover dirty instructions
5802 *
5803 * @returns VBox status code.
5804 * @param pVM The VM to operate on.
5805 * @param pCtx CPU context
5806 * @param pPatch Patch record
5807 * @param pPatchToGuestRec Patch to guest address record
5808 * @param pEip GC pointer of trapping instruction
5809 */
5810static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
5811{
5812 DISCPUSTATE CpuOld, CpuNew;
5813 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
5814 int rc;
5815 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
5816 uint32_t cbDirty;
5817 PRECPATCHTOGUEST pRec;
5818 PVMCPU pVCpu = VMMGetCpu0(pVM);
5819
5820 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pPatchToGuestRec->pOrgInstrGC));
5821
5822 pRec = pPatchToGuestRec;
5823 pCurInstrGC = pPatchToGuestRec->pOrgInstrGC;
5824 pCurPatchInstrGC = pEip;
5825 cbDirty = 0;
5826 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5827
5828 /* Find all adjacent dirty instructions */
5829 while (true)
5830 {
5831 if (pRec->fJumpTarget)
5832 {
5833 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pPatchToGuestRec->pOrgInstrGC));
5834 pRec->fDirty = false;
5835 return VERR_PATCHING_REFUSED;
5836 }
5837
5838 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
5839 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5840 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
5841
5842 /* Only harmless instructions are acceptable. */
5843 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
5844 if ( RT_FAILURE(rc)
5845 || !(CpuOld.pCurInstr->optype & OPTYPE_HARMLESS))
5846 {
5847 if (RT_SUCCESS(rc))
5848 cbDirty += CpuOld.opsize;
5849 else
5850 if (!cbDirty)
5851 cbDirty = 1;
5852 break;
5853 }
5854
5855#ifdef DEBUG
5856 char szBuf[256];
5857 szBuf[0] = '\0';
5858 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, 0, szBuf, sizeof(szBuf), NULL);
5859 Log(("DIRTY: %s\n", szBuf));
5860#endif
5861 /* Mark as clean; if we fail we'll let it always fault. */
5862 pRec->fDirty = false;
5863
5864 /** Remove old lookup record. */
5865 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
5866
5867 pCurPatchInstrGC += CpuOld.opsize;
5868 cbDirty += CpuOld.opsize;
5869
5870 /* Let's see if there's another dirty instruction right after. */
5871 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
5872 if (!pRec || !pRec->fDirty)
5873 break; /* no more dirty instructions */
5874
5875 /* In case of complex instructions the next guest instruction could be quite far off. */
5876 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
5877 }
5878
5879 if ( RT_SUCCESS(rc)
5880 && (CpuOld.pCurInstr->optype & OPTYPE_HARMLESS)
5881 )
5882 {
5883 uint32_t cbLeft;
5884
5885 pCurPatchInstrHC = pPatchInstrHC;
5886 pCurPatchInstrGC = pEip;
5887 cbLeft = cbDirty;
5888
5889 while (cbLeft && RT_SUCCESS(rc))
5890 {
5891 bool fValidInstr;
5892
5893 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
5894
5895 fValidInstr = !!(CpuNew.pCurInstr->optype & OPTYPE_HARMLESS);
5896 if ( !fValidInstr
5897 && (CpuNew.pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
5898 )
5899 {
5900 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
5901
5902 if ( pTargetGC >= pPatchToGuestRec->pOrgInstrGC
5903 && pTargetGC <= pPatchToGuestRec->pOrgInstrGC + cbDirty
5904 )
5905 {
5906 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
5907 fValidInstr = true;
5908 }
5909 }
5910
5911 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
5912 if ( rc == VINF_SUCCESS
5913 && CpuNew.opsize <= cbLeft /* must still fit */
5914 && fValidInstr
5915 )
5916 {
5917#ifdef DEBUG
5918 char szBuf[256];
5919 szBuf[0] = '\0';
5920 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, 0, szBuf, sizeof(szBuf), NULL);
5921 Log(("NEW: %s\n", szBuf));
5922#endif
5923
5924 /* Copy the new instruction. */
5925 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.opsize);
5926 AssertRC(rc);
5927
5928 /* Add a new lookup record for the duplicated instruction. */
5929 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
5930 }
5931 else
5932 {
5933#ifdef DEBUG
5934 char szBuf[256];
5935 szBuf[0] = '\0';
5936 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, 0, szBuf, sizeof(szBuf), NULL);
5937 Log(("NEW: %s (FAILED)\n", szBuf));
5938#endif
5939 /* Restore the old lookup record for the duplicated instruction. */
5940 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
5941
5942 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
5943 rc = VERR_PATCHING_REFUSED;
5944 break;
5945 }
5946 pCurInstrGC += CpuNew.opsize;
5947 pCurPatchInstrHC += CpuNew.opsize;
5948 pCurPatchInstrGC += CpuNew.opsize;
5949 cbLeft -= CpuNew.opsize;
5950
5951 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
5952 if (!cbLeft)
5953 {
5954 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
5955 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
5956 {
5957 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
5958 if (pRec)
5959 {
5960 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
5961 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5962
5963 Assert(!pRec->fDirty);
5964
5965 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
5966 if (cbFiller >= SIZEOF_NEARJUMP32)
5967 {
5968 pPatchFillHC[0] = 0xE9;
5969 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
5970#ifdef DEBUG
5971 char szBuf[256];
5972 szBuf[0] = '\0';
5973 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, 0, szBuf, sizeof(szBuf), NULL);
5974 Log(("FILL: %s\n", szBuf));
5975#endif
5976 }
5977 else
5978 {
5979 for (unsigned i = 0; i < cbFiller; i++)
5980 {
5981 pPatchFillHC[i] = 0x90; /* NOP */
5982#ifdef DEBUG
5983 char szBuf[256];
5984 szBuf[0] = '\0';
5985 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC + i, 0, szBuf, sizeof(szBuf), NULL);
5986 Log(("FILL: %s\n", szBuf));
5987#endif
5988 }
5989 }
5990 }
5991 }
5992 }
5993 }
5994 }
5995 else
5996 rc = VERR_PATCHING_REFUSED;
5997
5998 if (RT_SUCCESS(rc))
5999 {
6000 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6001 }
6002 else
6003 {
6004 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6005 Assert(cbDirty);
6006
6007 /* Mark the whole instruction stream with breakpoints. */
6008 if (cbDirty)
6009 memset(pPatchInstrHC, 0xCC, cbDirty);
6010
6011 if ( pVM->patm.s.fOutOfMemory == false
6012 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6013 {
6014 rc = patmR3RefreshPatch(pVM, pPatch);
6015 if (RT_FAILURE(rc))
6016 {
6017 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6018 }
6019 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6020 rc = VERR_PATCHING_REFUSED;
6021 }
6022 }
6023 return rc;
6024}
6025
6026/**
6027 * Handle trap inside patch code
6028 *
6029 * @returns VBox status code.
6030 * @param pVM The VM to operate on.
6031 * @param pCtx CPU context
6032 * @param pEip GC pointer of trapping instruction
6033 * @param ppNewEip GC pointer to new instruction
6034 */
6035VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6036{
6037 PPATMPATCHREC pPatch = 0;
6038 void *pvPatchCoreOffset;
6039 RTRCUINTPTR offset;
6040 RTRCPTR pNewEip;
6041 int rc ;
6042 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6043 PVMCPU pVCpu = VMMGetCpu0(pVM);
6044
6045 Assert(pVM->cCpus == 1);
6046
6047 pNewEip = 0;
6048 *ppNewEip = 0;
6049
6050 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6051
6052 /* Find the patch record. */
6053 /** @note there might not be a patch to guest translation record (global function) */
6054 offset = pEip - pVM->patm.s.pPatchMemGC;
6055 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6056 if (pvPatchCoreOffset)
6057 {
6058 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6059
6060 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6061
6062 if (pPatch->patch.uState == PATCH_DIRTY)
6063 {
6064 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6065 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6066 {
6067 /* Function duplication patches set fPIF to 1 on entry */
6068 pVM->patm.s.pGCStateHC->fPIF = 1;
6069 }
6070 }
6071 else
6072 if (pPatch->patch.uState == PATCH_DISABLED)
6073 {
6074 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6075 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6076 {
6077 /* Function duplication patches set fPIF to 1 on entry */
6078 pVM->patm.s.pGCStateHC->fPIF = 1;
6079 }
6080 }
6081 else
6082 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6083 {
6084 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6085
6086 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6087 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6088 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6089 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6090 }
6091
6092 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6093 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6094
6095 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6096 pPatch->patch.cTraps++;
6097 PATM_STAT_FAULT_INC(&pPatch->patch);
6098 }
6099 else
6100 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6101
6102 /* Check if we were interrupted in PATM generated instruction code. */
6103 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6104 {
6105 DISCPUSTATE Cpu;
6106 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6107 AssertRC(rc);
6108
6109 if ( rc == VINF_SUCCESS
6110 && ( Cpu.pCurInstr->opcode == OP_PUSHF
6111 || Cpu.pCurInstr->opcode == OP_PUSH
6112 || Cpu.pCurInstr->opcode == OP_CALL)
6113 )
6114 {
6115 uint64_t fFlags;
6116
6117 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6118
6119 if (Cpu.pCurInstr->opcode == OP_PUSH)
6120 {
6121 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6122 if ( rc == VINF_SUCCESS
6123 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6124 {
6125 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6126
6127 /* Reset the PATM stack. */
6128 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6129
6130 pVM->patm.s.pGCStateHC->fPIF = 1;
6131
6132 Log(("Faulting push -> go back to the original instruction\n"));
6133
6134 /* continue at the original instruction */
6135 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6136 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6137 return VINF_SUCCESS;
6138 }
6139 }
6140
6141 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6142 rc = PGMShwModifyPage(pVCpu, pCtx->esp, 1, X86_PTE_RW, ~(uint64_t)X86_PTE_RW);
6143 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6144 if (rc == VINF_SUCCESS)
6145 {
6146
6147 /* The guest page *must* be present. */
6148 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6149 if (rc == VINF_SUCCESS && (fFlags & X86_PTE_P))
6150 {
6151 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6152 return VINF_PATCH_CONTINUE;
6153 }
6154 }
6155 }
6156 else
6157 if (pPatch->patch.pPrivInstrGC == pNewEip)
6158 {
6159 /* Invalidated patch or first instruction overwritten.
6160 * We can ignore the fPIF state in this case.
6161 */
6162 /* Reset the PATM stack. */
6163 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6164
6165 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6166
6167 pVM->patm.s.pGCStateHC->fPIF = 1;
6168
6169 /* continue at the original instruction */
6170 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6171 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6172 return VINF_SUCCESS;
6173 }
6174
6175 char szBuf[256];
6176 szBuf[0] = '\0';
6177 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pEip, 0, szBuf, sizeof(szBuf), NULL);
6178
6179 /* Very bad. We crashed in emitted code. Probably stack? */
6180 if (pPatch)
6181 {
6182 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6183 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6184 }
6185 else
6186 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6187 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6188 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6189 }
6190
6191 /* From here on, we must have a valid patch to guest translation. */
6192 if (pvPatchCoreOffset == 0)
6193 {
6194 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6195 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6196 return VERR_PATCH_NOT_FOUND; //fatal error
6197 }
6198
6199 /* Take care of dirty/changed instructions. */
6200 if (pPatchToGuestRec->fDirty)
6201 {
6202 Assert(pPatchToGuestRec->Core.Key == offset);
6203 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6204
6205 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6206 if (RT_SUCCESS(rc))
6207 {
6208 /* Retry the current instruction. */
6209 pNewEip = pEip;
6210 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6211 }
6212 else
6213 {
6214 /* Reset the PATM stack. */
6215 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6216
6217 rc = VINF_SUCCESS; /* Continue at original instruction. */
6218 }
6219
6220 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6221 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6222 return rc;
6223 }
6224
6225#ifdef VBOX_STRICT
6226 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6227 {
6228 DISCPUSTATE cpu;
6229 bool disret;
6230 uint32_t opsize;
6231
6232 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6233 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6234 if (disret && cpu.pCurInstr->opcode == OP_RETN)
6235 {
6236 RTRCPTR retaddr;
6237 PCPUMCTX pCtx2;
6238
6239 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6240
6241 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6242 AssertRC(rc);
6243
6244 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6245 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6246 }
6247 }
6248#endif
6249
6250 /* Return original address, correct by subtracting the CS base address. */
6251 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6252
6253 /* Reset the PATM stack. */
6254 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6255
6256 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6257 {
6258 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6259 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6260#ifdef VBOX_STRICT
6261 DISCPUSTATE cpu;
6262 bool disret;
6263 uint32_t opsize;
6264
6265 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6266 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_ORGCODE);
6267
6268 if (disret && (cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_INT3))
6269 {
6270 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6271 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6272
6273 Assert(cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_IRET);
6274 }
6275#endif
6276 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6277 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6278 }
6279
6280 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6281#ifdef LOG_ENABLED
6282 CPUMR3DisasmInstr(pVM, pVCpu, pCtx, pNewEip, "PATCHRET: ");
6283#endif
6284 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6285 {
6286 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6287 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6288 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6289 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6290 return VERR_PATCH_DISABLED;
6291 }
6292
6293#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6294 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6295 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6296 {
6297 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6298 //we are only wasting time, back out the patch
6299 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6300 pTrapRec->pNextPatchInstr = 0;
6301 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6302 return VERR_PATCH_DISABLED;
6303 }
6304#endif
6305
6306 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6307 return VINF_SUCCESS;
6308}
6309
6310
6311/**
6312 * Handle page-fault in monitored page
6313 *
6314 * @returns VBox status code.
6315 * @param pVM The VM to operate on.
6316 */
6317VMMR3DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6318{
6319 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6320
6321 addr &= PAGE_BASE_GC_MASK;
6322
6323 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6324 AssertRC(rc); NOREF(rc);
6325
6326 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6327 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6328 {
6329 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6330 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6331 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6332 if (rc == VWRN_PATCH_REMOVED)
6333 return VINF_SUCCESS;
6334
6335 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6336
6337 if (addr == pPatchRec->patch.pPrivInstrGC)
6338 addr++;
6339 }
6340
6341 for(;;)
6342 {
6343 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6344
6345 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6346 break;
6347
6348 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6349 {
6350 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6351 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6352 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6353 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6354 }
6355 addr = pPatchRec->patch.pPrivInstrGC + 1;
6356 }
6357
6358 pVM->patm.s.pvFaultMonitor = 0;
6359 return VINF_SUCCESS;
6360}
6361
6362
6363#ifdef VBOX_WITH_STATISTICS
6364
6365static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6366{
6367 if (pPatch->flags & PATMFL_SYSENTER)
6368 {
6369 return "SYSENT";
6370 }
6371 else
6372 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6373 {
6374 static char szTrap[16];
6375 uint32_t iGate;
6376
6377 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6378 if (iGate < 256)
6379 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6380 else
6381 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6382 return szTrap;
6383 }
6384 else
6385 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6386 return "DUPFUNC";
6387 else
6388 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6389 return "FUNCCALL";
6390 else
6391 if (pPatch->flags & PATMFL_TRAMPOLINE)
6392 return "TRAMP";
6393 else
6394 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6395}
6396
6397static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6398{
6399 switch(pPatch->uState)
6400 {
6401 case PATCH_ENABLED:
6402 return "ENA";
6403 case PATCH_DISABLED:
6404 return "DIS";
6405 case PATCH_DIRTY:
6406 return "DIR";
6407 case PATCH_UNUSABLE:
6408 return "UNU";
6409 case PATCH_REFUSED:
6410 return "REF";
6411 case PATCH_DISABLE_PENDING:
6412 return "DIP";
6413 default:
6414 AssertFailed();
6415 return " ";
6416 }
6417}
6418
6419/**
6420 * Resets the sample.
6421 * @param pVM The VM handle.
6422 * @param pvSample The sample registered using STAMR3RegisterCallback.
6423 */
6424static void patmResetStat(PVM pVM, void *pvSample)
6425{
6426 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6427 Assert(pPatch);
6428
6429 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6430 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6431}
6432
6433/**
6434 * Prints the sample into the buffer.
6435 *
6436 * @param pVM The VM handle.
6437 * @param pvSample The sample registered using STAMR3RegisterCallback.
6438 * @param pszBuf The buffer to print into.
6439 * @param cchBuf The size of the buffer.
6440 */
6441static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6442{
6443 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6444 Assert(pPatch);
6445
6446 Assert(pPatch->uState != PATCH_REFUSED);
6447 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6448
6449 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6450 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6451 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6452}
6453
6454/**
6455 * Returns the GC address of the corresponding patch statistics counter
6456 *
6457 * @returns Stat address
6458 * @param pVM The VM to operate on.
6459 * @param pPatch Patch structure
6460 */
6461RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6462{
6463 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6464 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6465}
6466
6467#endif /* VBOX_WITH_STATISTICS */
6468
6469#ifdef VBOX_WITH_DEBUGGER
6470/**
6471 * The '.patmoff' command.
6472 *
6473 * @returns VBox status.
6474 * @param pCmd Pointer to the command descriptor (as registered).
6475 * @param pCmdHlp Pointer to command helper functions.
6476 * @param pVM Pointer to the current VM (if any).
6477 * @param paArgs Pointer to (readonly) array of arguments.
6478 * @param cArgs Number of arguments in the array.
6479 */
6480static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
6481{
6482 /*
6483 * Validate input.
6484 */
6485 if (!pVM)
6486 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6487
6488 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6489 PATMR3AllowPatching(pVM, false);
6490 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6491}
6492
6493/**
6494 * The '.patmon' command.
6495 *
6496 * @returns VBox status.
6497 * @param pCmd Pointer to the command descriptor (as registered).
6498 * @param pCmdHlp Pointer to command helper functions.
6499 * @param pVM Pointer to the current VM (if any).
6500 * @param paArgs Pointer to (readonly) array of arguments.
6501 * @param cArgs Number of arguments in the array.
6502 */
6503static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
6504{
6505 /*
6506 * Validate input.
6507 */
6508 if (!pVM)
6509 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6510
6511 PATMR3AllowPatching(pVM, true);
6512 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6513 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6514}
6515#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette