VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/PATM.cpp@ 13993

Last change on this file since 13993 was 13960, checked in by vboxsync, 16 years ago

Moved guest and host CPU contexts into per-VCPU array.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 242.2 KB
Line 
1/* $Id: PATM.cpp 13960 2008-11-07 13:04:45Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * NOTE: Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20 * Clara, CA 95054 USA or visit http://www.sun.com if you need
21 * additional information or have any questions.
22 */
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_PATM
28#include <VBox/patm.h>
29#include <VBox/stam.h>
30#include <VBox/pgm.h>
31#include <VBox/cpum.h>
32#include <VBox/cpumdis.h>
33#include <VBox/iom.h>
34#include <VBox/sup.h>
35#include <VBox/mm.h>
36#include <VBox/ssm.h>
37#include <VBox/pdm.h>
38#include <VBox/trpm.h>
39#include <VBox/cfgm.h>
40#include <VBox/param.h>
41#include <VBox/selm.h>
42#include <iprt/avl.h>
43#include "PATMInternal.h"
44#include "PATMPatch.h"
45#include <VBox/vm.h>
46#include <VBox/csam.h>
47
48#include <VBox/dbg.h>
49#include <VBox/err.h>
50#include <VBox/log.h>
51#include <iprt/assert.h>
52#include <iprt/asm.h>
53#include <VBox/dis.h>
54#include <VBox/disopcode.h>
55
56#include <iprt/string.h>
57#include "PATMA.h"
58
59//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
60//#define PATM_DISABLE_ALL
61
62/*******************************************************************************
63* Internal Functions *
64*******************************************************************************/
65
66static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
67static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
68static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
69
70#ifdef LOG_ENABLED // keep gcc quiet
71static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
72#endif
73#ifdef VBOX_WITH_STATISTICS
74static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
75static void patmResetStat(PVM pVM, void *pvSample);
76static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
77#endif
78
79#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
80#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
81
82static int patmReinit(PVM pVM);
83static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
84
85#ifdef VBOX_WITH_DEBUGGER
86static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
87static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
88static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
89
90/** Command descriptors. */
91static const DBGCCMD g_aCmds[] =
92{
93 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, pResultDesc, fFlags, pfnHandler pszSyntax, ....pszDescription */
94 { "patmon", 0, 0, NULL, 0, NULL, 0, patmr3CmdOn, "", "Enable patching." },
95 { "patmoff", 0, 0, NULL, 0, NULL, 0, patmr3CmdOff, "", "Disable patching." },
96};
97#endif
98
99/* Don't want to break saved states, so put it here as a global variable. */
100static unsigned int cIDTHandlersDisabled = 0;
101
102/**
103 * Initializes the PATM.
104 *
105 * @returns VBox status code.
106 * @param pVM The VM to operate on.
107 */
108VMMR3DECL(int) PATMR3Init(PVM pVM)
109{
110 int rc;
111
112 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
113
114 AssertReleaseMsg(PATMInterruptFlag == (VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_TIMER | VM_FF_REQUEST),
115 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_TIMER | VM_FF_REQUEST));
116
117 /* Allocate patch memory and GC patch state memory. */
118 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
119 /* Add another page in case the generated code is much larger than expected. */
120 /** @todo bad safety precaution */
121 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
122 if (RT_FAILURE(rc))
123 {
124 Log(("MMR3HyperAlloc failed with %Rrc\n", rc));
125 return rc;
126 }
127 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
128
129 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
130 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
131 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
132
133 /*
134 * Hypervisor memory for GC status data (read/write)
135 *
136 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
137 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
138 *
139 */
140 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /** @note hardcoded dependencies on this exist. */
141 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
142 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
143
144 /* Hypervisor memory for patch statistics */
145 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
146 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
147
148 /* Memory for patch lookup trees. */
149 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
150 AssertRCReturn(rc, rc);
151 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
152
153#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
154 /* Check CFGM option. */
155 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
156 if (RT_FAILURE(rc))
157# ifdef PATM_DISABLE_ALL
158 pVM->fPATMEnabled = false;
159# else
160 pVM->fPATMEnabled = true;
161# endif
162#endif
163
164 rc = patmReinit(pVM);
165 AssertRC(rc);
166 if (RT_FAILURE(rc))
167 return rc;
168
169 /*
170 * Register save and load state notificators.
171 */
172 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
173 NULL, patmr3Save, NULL,
174 NULL, patmr3Load, NULL);
175 if (RT_FAILURE(rc))
176 {
177 AssertRC(rc);
178 return rc;
179 }
180
181#ifdef VBOX_WITH_DEBUGGER
182 /*
183 * Debugger commands.
184 */
185 static bool fRegisteredCmds = false;
186 if (!fRegisteredCmds)
187 {
188 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
189 if (RT_SUCCESS(rc))
190 fRegisteredCmds = true;
191 }
192#endif
193
194#ifdef VBOX_WITH_STATISTICS
195 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
196 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
197 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
198 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
199 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
200 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
201 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
202 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
203
204 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
205 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
206
207 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
208 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
209 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
210
211 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
212 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
213 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
214 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
215 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
216
217 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
218 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
219
220 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
221 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
222
223 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
224 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
225 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
226
227 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
228 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
229 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
230
231 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
232 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
233
234 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
235 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
236 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
237 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
238
239 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
240 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
241
242 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
243 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
244
245 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
246 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
247 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
248
249 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
250 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
251 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
252 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
253
254 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
255 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
256 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
257 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
258 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
259
260 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
261#endif /* VBOX_WITH_STATISTICS */
262
263 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
264 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
265 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
266 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
267 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
268 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
269 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
270 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
271
272 return rc;
273}
274
275/**
276 * Finalizes HMA page attributes.
277 *
278 * @returns VBox status code.
279 * @param pVM The VM handle.
280 */
281VMMR3DECL(int) PATMR3InitFinalize(PVM pVM)
282{
283 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
284 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
285 if (RT_FAILURE(rc))
286 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
287
288 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
289 if (RT_FAILURE(rc))
290 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
291
292 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
293 if (RT_FAILURE(rc))
294 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
295
296 return rc;
297}
298
299/**
300 * (Re)initializes PATM
301 *
302 * @param pVM The VM.
303 */
304static int patmReinit(PVM pVM)
305{
306 int rc;
307
308 /*
309 * Assert alignment and sizes.
310 */
311 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
312 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
313
314 /*
315 * Setup any fixed pointers and offsets.
316 */
317 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
318
319#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
320#ifndef PATM_DISABLE_ALL
321 pVM->fPATMEnabled = true;
322#endif
323#endif
324
325 Assert(pVM->patm.s.pGCStateHC);
326 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
327 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
328
329 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
330 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
331
332 Assert(pVM->patm.s.pGCStackHC);
333 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
334 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
335 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
336 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
337
338 Assert(pVM->patm.s.pStatsHC);
339 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
340 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
341
342 Assert(pVM->patm.s.pPatchMemHC);
343 Assert(pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
344 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
345 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
346
347 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
348 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(pVM));
349
350 Assert(pVM->patm.s.PatchLookupTreeHC);
351 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
352
353 /*
354 * (Re)Initialize PATM structure
355 */
356 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
357 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
358 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
359 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
360 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
361 pVM->patm.s.pvFaultMonitor = 0;
362 pVM->patm.s.deltaReloc = 0;
363
364 /* Lowest and highest patched instruction */
365 pVM->patm.s.pPatchedInstrGCLowest = ~0;
366 pVM->patm.s.pPatchedInstrGCHighest = 0;
367
368 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
369 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
370 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
371
372 pVM->patm.s.pfnSysEnterPatchGC = 0;
373 pVM->patm.s.pfnSysEnterGC = 0;
374
375 pVM->patm.s.fOutOfMemory = false;
376
377 pVM->patm.s.pfnHelperCallGC = 0;
378
379 /* Generate all global functions to be used by future patches. */
380 /* We generate a fake patch in order to use the existing code for relocation. */
381 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
382 if (RT_FAILURE(rc))
383 {
384 Log(("Out of memory!!!!\n"));
385 return VERR_NO_MEMORY;
386 }
387 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
388 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
389 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
390
391 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
392 AssertRC(rc);
393
394 /* Update free pointer in patch memory. */
395 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
396 /* Round to next 8 byte boundary. */
397 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
398 return rc;
399}
400
401
402/**
403 * Applies relocations to data and code managed by this
404 * component. This function will be called at init and
405 * whenever the VMM need to relocate it self inside the GC.
406 *
407 * The PATM will update the addresses used by the switcher.
408 *
409 * @param pVM The VM.
410 */
411VMMR3DECL(void) PATMR3Relocate(PVM pVM)
412{
413 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
414 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
415
416 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
417 if (delta)
418 {
419 PCPUMCTX pCtx;
420
421 /* Update CPUMCTX guest context pointer. */
422 pVM->patm.s.pCPUMCtxGC += delta;
423
424 pVM->patm.s.deltaReloc = delta;
425
426 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
427
428 pCtx = CPUMQueryGuestCtxPtr(pVM);
429
430 /* If we are running patch code right now, then also adjust EIP. */
431 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
432 pCtx->eip += delta;
433
434 pVM->patm.s.pGCStateGC = GCPtrNew;
435 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
436
437 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
438
439 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
440
441 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
442
443 if (pVM->patm.s.pfnSysEnterPatchGC)
444 pVM->patm.s.pfnSysEnterPatchGC += delta;
445
446 /* Deal with the global patch functions. */
447 pVM->patm.s.pfnHelperCallGC += delta;
448 pVM->patm.s.pfnHelperRetGC += delta;
449 pVM->patm.s.pfnHelperIretGC += delta;
450 pVM->patm.s.pfnHelperJumpGC += delta;
451
452 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
453 }
454}
455
456
457/**
458 * Terminates the PATM.
459 *
460 * Termination means cleaning up and freeing all resources,
461 * the VM it self is at this point powered off or suspended.
462 *
463 * @returns VBox status code.
464 * @param pVM The VM to operate on.
465 */
466VMMR3DECL(int) PATMR3Term(PVM pVM)
467{
468 /* Memory was all allocated from the two MM heaps and requires no freeing. */
469 return VINF_SUCCESS;
470}
471
472
473/**
474 * PATM reset callback.
475 *
476 * @returns VBox status code.
477 * @param pVM The VM which is reset.
478 */
479VMMR3DECL(int) PATMR3Reset(PVM pVM)
480{
481 Log(("PATMR3Reset\n"));
482
483 /* Free all patches. */
484 while (true)
485 {
486 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
487 if (pPatchRec)
488 {
489 PATMRemovePatch(pVM, pPatchRec, true);
490 }
491 else
492 break;
493 }
494 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
495 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
496 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
497 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
498
499 int rc = patmReinit(pVM);
500 if (RT_SUCCESS(rc))
501 rc = PATMR3InitFinalize(pVM); /* paranoia */
502
503 return rc;
504}
505
506/**
507 * Read callback for disassembly function; supports reading bytes that cross a page boundary
508 *
509 * @returns VBox status code.
510 * @param pSrc GC source pointer
511 * @param pDest HC destination pointer
512 * @param size Number of bytes to read
513 * @param pvUserdata Callback specific user data (pCpu)
514 *
515 */
516int patmReadBytes(RTUINTPTR pSrc, uint8_t *pDest, unsigned size, void *pvUserdata)
517{
518 DISCPUSTATE *pCpu = (DISCPUSTATE *)pvUserdata;
519 PATMDISASM *pDisInfo = (PATMDISASM *)pCpu->apvUserData[0];
520 int orgsize = size;
521
522 Assert(size);
523 if (size == 0)
524 return VERR_INVALID_PARAMETER;
525
526 /*
527 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
528 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
529 */
530 /** @todo could change in the future! */
531 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
532 {
533 for (int i=0;i<orgsize;i++)
534 {
535 int rc = PATMR3QueryOpcode(pDisInfo->pVM, (RTRCPTR)pSrc, pDest);
536 if (RT_SUCCESS(rc))
537 {
538 pSrc++;
539 pDest++;
540 size--;
541 }
542 else break;
543 }
544 if (size == 0)
545 return VINF_SUCCESS;
546#ifdef VBOX_STRICT
547 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
548 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
549 {
550 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc, NULL) == false);
551 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc+size-1, NULL) == false);
552 }
553#endif
554 }
555
556
557 if (PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(pSrc + size - 1) && !PATMIsPatchGCAddr(pDisInfo->pVM, pSrc))
558 {
559 return PGMPhysSimpleReadGCPtr(pDisInfo->pVM, pDest, pSrc, size);
560 }
561 else
562 {
563 uint8_t *pInstrHC = pDisInfo->pInstrHC;
564
565 Assert(pInstrHC);
566
567 /* pInstrHC is the base address; adjust according to the GC pointer. */
568 pInstrHC = pInstrHC + (pSrc - pDisInfo->pInstrGC);
569
570 memcpy(pDest, (void *)pInstrHC, size);
571 }
572
573 return VINF_SUCCESS;
574}
575
576/**
577 * Callback function for RTAvloU32DoWithAll
578 *
579 * Updates all fixups in the patches
580 *
581 * @returns VBox status code.
582 * @param pNode Current node
583 * @param pParam The VM to operate on.
584 */
585static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
586{
587 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
588 PVM pVM = (PVM)pParam;
589 RTRCINTPTR delta;
590#ifdef LOG_ENABLED
591 DISCPUSTATE cpu;
592 char szOutput[256];
593 uint32_t opsize;
594 bool disret;
595#endif
596 int rc;
597
598 /* Nothing to do if the patch is not active. */
599 if (pPatch->patch.uState == PATCH_REFUSED)
600 return 0;
601
602#ifdef LOG_ENABLED
603 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
604 {
605 /** @note pPrivInstrHC is probably not valid anymore */
606 rc = PGMPhysGCPtr2HCPtr(pVM, pPatch->patch.pPrivInstrGC, (PRTHCPTR)&pPatch->patch.pPrivInstrHC);
607 if (rc == VINF_SUCCESS)
608 {
609 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
610 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, pPatch->patch.pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
611 Log(("Org patch jump: %s", szOutput));
612 }
613 }
614#endif
615
616 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
617 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
618
619 /*
620 * Apply fixups
621 */
622 PRELOCREC pRec = 0;
623 AVLPVKEY key = 0;
624
625 while (true)
626 {
627 /* Get the record that's closest from above */
628 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
629 if (pRec == 0)
630 break;
631
632 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
633
634 switch (pRec->uType)
635 {
636 case FIXUP_ABSOLUTE:
637 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
638 if (!pRec->pSource || PATMIsPatchGCAddr(pVM, pRec->pSource))
639 {
640 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
641 }
642 else
643 {
644 uint8_t curInstr[15];
645 uint8_t oldInstr[15];
646 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
647
648 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
649
650 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
651 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
652
653 rc = PGMPhysSimpleReadGCPtr(pVM, curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
654 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
655
656 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
657
658 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
659 {
660 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
661
662 Log(("PATM: Patch page not present -> check later!\n"));
663 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
664 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
665 }
666 else
667 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
668 {
669 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
670 /*
671 * Disable patch; this is not a good solution
672 */
673 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
674 pPatch->patch.uState = PATCH_DISABLED;
675 }
676 else
677 if (RT_SUCCESS(rc))
678 {
679 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
680 rc = PGMPhysSimpleDirtyWriteGCPtr(pVM, pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
681 AssertRC(rc);
682 }
683 }
684 break;
685
686 case FIXUP_REL_JMPTOPATCH:
687 {
688 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
689
690 if ( pPatch->patch.uState == PATCH_ENABLED
691 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
692 {
693 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
694 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
695 RTRCPTR pJumpOffGC;
696 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
697 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
698
699 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
700
701 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
702#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
703 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
704 {
705 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
706
707 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
708 oldJump[0] = pPatch->patch.aPrivInstr[0];
709 oldJump[1] = pPatch->patch.aPrivInstr[1];
710 *(RTRCUINTPTR *)&oldJump[2] = displOld;
711 }
712 else
713#endif
714 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
715 {
716 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
717 oldJump[0] = 0xE9;
718 *(RTRCUINTPTR *)&oldJump[1] = displOld;
719 }
720 else
721 {
722 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
723 continue; //this should never happen!!
724 }
725 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
726
727 /*
728 * Read old patch jump and compare it to the one we previously installed
729 */
730 rc = PGMPhysSimpleReadGCPtr(pVM, temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
731 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
732
733 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
734 {
735 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
736
737 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
738 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
739 }
740 else
741 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
742 {
743 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
744 /*
745 * Disable patch; this is not a good solution
746 */
747 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
748 pPatch->patch.uState = PATCH_DISABLED;
749 }
750 else
751 if (RT_SUCCESS(rc))
752 {
753 rc = PGMPhysSimpleDirtyWriteGCPtr(pVM, pJumpOffGC, &displ, sizeof(displ));
754 AssertRC(rc);
755 }
756 else
757 {
758 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
759 }
760 }
761 else
762 {
763 Log(("Skip the guest jump to patch code for this disabled patch %08X - %08X\n", pPatch->patch.pPrivInstrHC, pRec->pRelocPos));
764 }
765
766 pRec->pDest = pTarget;
767 break;
768 }
769
770 case FIXUP_REL_JMPTOGUEST:
771 {
772 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
773 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
774
775 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
776 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
777 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
778 pRec->pSource = pSource;
779 break;
780 }
781
782 default:
783 AssertMsg(0, ("Invalid fixup type!!\n"));
784 return VERR_INVALID_PARAMETER;
785 }
786 }
787
788#ifdef LOG_ENABLED
789 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
790 {
791 /** @note pPrivInstrHC is probably not valid anymore */
792 rc = PGMPhysGCPtr2HCPtr(pVM, pPatch->patch.pPrivInstrGC, (PRTHCPTR)&pPatch->patch.pPrivInstrHC);
793 if (rc == VINF_SUCCESS)
794 {
795 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
796 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, pPatch->patch.pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
797 Log(("Rel patch jump: %s", szOutput));
798 }
799 }
800#endif
801 return 0;
802}
803
804/**
805 * #PF Handler callback for virtual access handler ranges.
806 *
807 * Important to realize that a physical page in a range can have aliases, and
808 * for ALL and WRITE handlers these will also trigger.
809 *
810 * @returns VINF_SUCCESS if the handler have carried out the operation.
811 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
812 * @param pVM VM Handle.
813 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
814 * @param pvPtr The HC mapping of that address.
815 * @param pvBuf What the guest is reading/writing.
816 * @param cbBuf How much it's reading/writing.
817 * @param enmAccessType The access type.
818 * @param pvUser User argument.
819 */
820DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
821{
822 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
823 /** @todo could be the wrong virtual address (alias) */
824 pVM->patm.s.pvFaultMonitor = GCPtr;
825 PATMR3HandleMonitoredPage(pVM);
826 return VINF_PGM_HANDLER_DO_DEFAULT;
827}
828
829
830#ifdef VBOX_WITH_DEBUGGER
831/**
832 * Callback function for RTAvloU32DoWithAll
833 *
834 * Enables the patch that's being enumerated
835 *
836 * @returns 0 (continue enumeration).
837 * @param pNode Current node
838 * @param pVM The VM to operate on.
839 */
840static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
841{
842 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
843
844 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
845 return 0;
846}
847#endif /* VBOX_WITH_DEBUGGER */
848
849
850#ifdef VBOX_WITH_DEBUGGER
851/**
852 * Callback function for RTAvloU32DoWithAll
853 *
854 * Disables the patch that's being enumerated
855 *
856 * @returns 0 (continue enumeration).
857 * @param pNode Current node
858 * @param pVM The VM to operate on.
859 */
860static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
861{
862 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
863
864 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
865 return 0;
866}
867#endif
868
869/**
870 * Returns the host context pointer and size of the patch memory block
871 *
872 * @returns VBox status code.
873 * @param pVM The VM to operate on.
874 * @param pcb Size of the patch memory block
875 */
876VMMR3DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
877{
878 if (pcb)
879 {
880 *pcb = pVM->patm.s.cbPatchMem;
881 }
882 return pVM->patm.s.pPatchMemHC;
883}
884
885
886/**
887 * Returns the guest context pointer and size of the patch memory block
888 *
889 * @returns VBox status code.
890 * @param pVM The VM to operate on.
891 * @param pcb Size of the patch memory block
892 */
893VMMR3DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
894{
895 if (pcb)
896 {
897 *pcb = pVM->patm.s.cbPatchMem;
898 }
899 return pVM->patm.s.pPatchMemGC;
900}
901
902
903/**
904 * Returns the host context pointer of the GC context structure
905 *
906 * @returns VBox status code.
907 * @param pVM The VM to operate on.
908 */
909VMMR3DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
910{
911 return pVM->patm.s.pGCStateHC;
912}
913
914
915/**
916 * Checks whether the HC address is part of our patch region
917 *
918 * @returns VBox status code.
919 * @param pVM The VM to operate on.
920 * @param pAddrGC Guest context address
921 */
922VMMR3DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, R3PTRTYPE(uint8_t *) pAddrHC)
923{
924 return (pAddrHC >= pVM->patm.s.pPatchMemHC && pAddrHC < pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) ? true : false;
925}
926
927
928/**
929 * Allows or disallow patching of privileged instructions executed by the guest OS
930 *
931 * @returns VBox status code.
932 * @param pVM The VM to operate on.
933 * @param fAllowPatching Allow/disallow patching
934 */
935VMMR3DECL(int) PATMR3AllowPatching(PVM pVM, uint32_t fAllowPatching)
936{
937 pVM->fPATMEnabled = (fAllowPatching) ? true : false;
938 return VINF_SUCCESS;
939}
940
941/**
942 * Convert a GC patch block pointer to a HC patch pointer
943 *
944 * @returns HC pointer or NULL if it's not a GC patch pointer
945 * @param pVM The VM to operate on.
946 * @param pAddrGC GC pointer
947 */
948VMMR3DECL(R3PTRTYPE(void *)) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
949{
950 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
951 {
952 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
953 }
954 return NULL;
955}
956
957/**
958 * Query PATM state (enabled/disabled)
959 *
960 * @returns 0 - disabled, 1 - enabled
961 * @param pVM The VM to operate on.
962 */
963VMMR3DECL(int) PATMR3IsEnabled(PVM pVM)
964{
965 return pVM->fPATMEnabled;
966}
967
968
969/**
970 * Convert guest context address to host context pointer
971 *
972 * @returns VBox status code.
973 * @param pVM The VM to operate on.
974 * @param pPatch Patch block structure pointer
975 * @param pGCPtr Guest context pointer
976 *
977 * @returns Host context pointer or NULL in case of an error
978 *
979 */
980R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pGCPtr)
981{
982 int rc;
983 R3PTRTYPE(uint8_t *) pHCPtr;
984 uint32_t offset;
985
986 if (PATMIsPatchGCAddr(pVM, pGCPtr))
987 {
988 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
989 }
990
991 offset = pGCPtr & PAGE_OFFSET_MASK;
992 if (pPatch->cacheRec.pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
993 {
994 return pPatch->cacheRec.pPatchLocStartHC + offset;
995 }
996
997 rc = PGMPhysGCPtr2HCPtr(pVM, pGCPtr, (void **)&pHCPtr);
998 if (rc != VINF_SUCCESS)
999 {
1000 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1001 return NULL;
1002 }
1003////invalid? Assert(sizeof(R3PTRTYPE(uint8_t*)) == sizeof(uint32_t));
1004
1005 pPatch->cacheRec.pPatchLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1006 pPatch->cacheRec.pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1007 return pHCPtr;
1008}
1009
1010
1011/* Calculates and fills in all branch targets
1012 *
1013 * @returns VBox status code.
1014 * @param pVM The VM to operate on.
1015 * @param pPatch Current patch block pointer
1016 *
1017 */
1018static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1019{
1020 int32_t displ;
1021
1022 PJUMPREC pRec = 0;
1023 int nrJumpRecs = 0;
1024
1025 /*
1026 * Set all branch targets inside the patch block.
1027 * We remove all jump records as they are no longer needed afterwards.
1028 */
1029 while (true)
1030 {
1031 RCPTRTYPE(uint8_t *) pInstrGC;
1032 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1033
1034 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1035 if (pRec == 0)
1036 break;
1037
1038 nrJumpRecs++;
1039
1040 /* HC in patch block to GC in patch block. */
1041 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1042
1043 if (pRec->opcode == OP_CALL)
1044 {
1045 /* Special case: call function replacement patch from this patch block.
1046 */
1047 PPATMPATCHREC pFunctionRec = PATMQueryFunctionPatch(pVM, pRec->pTargetGC);
1048 if (!pFunctionRec)
1049 {
1050 int rc;
1051
1052 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1053 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1054 else
1055 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1056
1057 if (RT_FAILURE(rc))
1058 {
1059 uint8_t *pPatchHC;
1060 RTRCPTR pPatchGC;
1061 RTRCPTR pOrgInstrGC;
1062
1063 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1064 Assert(pOrgInstrGC);
1065
1066 /* Failure for some reason -> mark exit point with int 3. */
1067 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1068
1069 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1070 Assert(pPatchGC);
1071
1072 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1073
1074 /* Set a breakpoint at the very beginning of the recompiled instruction */
1075 *pPatchHC = 0xCC;
1076
1077 continue;
1078 }
1079 }
1080 else
1081 {
1082 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1083 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1084 }
1085
1086 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1087 }
1088 else
1089 {
1090 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1091 }
1092
1093 if (pBranchTargetGC == 0)
1094 {
1095 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1096 return VERR_PATCHING_REFUSED;
1097 }
1098 /* Our jumps *always* have a dword displacement (to make things easier). */
1099 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1100 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1101 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1102 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1103 }
1104 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1105 Assert(pPatch->JumpTree == 0);
1106 return VINF_SUCCESS;
1107}
1108
1109/* Add an illegal instruction record
1110 *
1111 * @param pVM The VM to operate on.
1112 * @param pPatch Patch structure ptr
1113 * @param pInstrGC Guest context pointer to privileged instruction
1114 *
1115 */
1116static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1117{
1118 PAVLPVNODECORE pRec;
1119
1120 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1121 Assert(pRec);
1122 pRec->Key = (AVLPVKEY)pInstrGC;
1123
1124 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1125 Assert(ret); NOREF(ret);
1126 pPatch->pTempInfo->nrIllegalInstr++;
1127}
1128
1129static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1130{
1131 PAVLPVNODECORE pRec;
1132
1133 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)pInstrGC);
1134 if (pRec)
1135 return true;
1136 return false;
1137}
1138
1139/**
1140 * Add a patch to guest lookup record
1141 *
1142 * @param pVM The VM to operate on.
1143 * @param pPatch Patch structure ptr
1144 * @param pPatchInstrHC Guest context pointer to patch block
1145 * @param pInstrGC Guest context pointer to privileged instruction
1146 * @param enmType Lookup type
1147 * @param fDirty Dirty flag
1148 *
1149 */
1150 /** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
1151void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1152{
1153 bool ret;
1154 PRECPATCHTOGUEST pPatchToGuestRec;
1155 PRECGUESTTOPATCH pGuestToPatchRec;
1156 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1157
1158 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1159 {
1160 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1161 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1162 return; /* already there */
1163
1164 Assert(!pPatchToGuestRec);
1165 }
1166#ifdef VBOX_STRICT
1167 else
1168 {
1169 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1170 Assert(!pPatchToGuestRec);
1171 }
1172#endif
1173
1174 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1175 Assert(pPatchToGuestRec);
1176 pPatchToGuestRec->Core.Key = PatchOffset;
1177 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1178 pPatchToGuestRec->enmType = enmType;
1179 pPatchToGuestRec->fDirty = fDirty;
1180
1181 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1182 Assert(ret);
1183
1184 /* GC to patch address */
1185 if (enmType == PATM_LOOKUP_BOTHDIR)
1186 {
1187 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1188 if (!pGuestToPatchRec)
1189 {
1190 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1191 pGuestToPatchRec->Core.Key = pInstrGC;
1192 pGuestToPatchRec->PatchOffset = PatchOffset;
1193
1194 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1195 Assert(ret);
1196 }
1197 }
1198
1199 pPatch->nrPatch2GuestRecs++;
1200}
1201
1202
1203/**
1204 * Removes a patch to guest lookup record
1205 *
1206 * @param pVM The VM to operate on.
1207 * @param pPatch Patch structure ptr
1208 * @param pPatchInstrGC Guest context pointer to patch block
1209 */
1210void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1211{
1212 PAVLU32NODECORE pNode;
1213 PAVLU32NODECORE pNode2;
1214 PRECPATCHTOGUEST pPatchToGuestRec;
1215 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1216
1217 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1218 Assert(pPatchToGuestRec);
1219 if (pPatchToGuestRec)
1220 {
1221 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1222 {
1223 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1224
1225 Assert(pGuestToPatchRec->Core.Key);
1226 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1227 Assert(pNode2);
1228 }
1229 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1230 Assert(pNode);
1231
1232 MMR3HeapFree(pPatchToGuestRec);
1233 pPatch->nrPatch2GuestRecs--;
1234 }
1235}
1236
1237
1238/**
1239 * RTAvlPVDestroy callback.
1240 */
1241static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1242{
1243 MMR3HeapFree(pNode);
1244 return 0;
1245}
1246
1247/**
1248 * Empty the specified tree (PV tree, MMR3 heap)
1249 *
1250 * @param pVM The VM to operate on.
1251 * @param ppTree Tree to empty
1252 */
1253void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1254{
1255 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1256}
1257
1258
1259/**
1260 * RTAvlU32Destroy callback.
1261 */
1262static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1263{
1264 MMR3HeapFree(pNode);
1265 return 0;
1266}
1267
1268/**
1269 * Empty the specified tree (U32 tree, MMR3 heap)
1270 *
1271 * @param pVM The VM to operate on.
1272 * @param ppTree Tree to empty
1273 */
1274void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1275{
1276 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1277}
1278
1279
1280/**
1281 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1282 *
1283 * @returns VBox status code.
1284 * @param pVM The VM to operate on.
1285 * @param pCpu CPU disassembly state
1286 * @param pInstrGC Guest context pointer to privileged instruction
1287 * @param pCurInstrGC Guest context pointer to the current instruction
1288 * @param pUserData User pointer (callback specific)
1289 *
1290 */
1291static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1292{
1293 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1294 bool fIllegalInstr = false;
1295
1296 //Preliminary heuristics:
1297 //- no call instructions without a fixed displacement between cli and sti/popf
1298 //- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1299 //- no nested pushf/cli
1300 //- sti/popf should be the (eventual) target of all branches
1301 //- no near or far returns; no int xx, no into
1302 //
1303 // Note: Later on we can impose less stricter guidelines if the need arises
1304
1305 /* Bail out if the patch gets too big. */
1306 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1307 {
1308 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1309 fIllegalInstr = true;
1310 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1311 }
1312 else
1313 {
1314 /* No unconditinal jumps or calls without fixed displacements. */
1315 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1316 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1317 )
1318 {
1319 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1320 if ( pCpu->param1.size == 6 /* far call/jmp */
1321 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1322 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1323 )
1324 {
1325 fIllegalInstr = true;
1326 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1327 }
1328 }
1329
1330 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1331 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->opcode == OP_JMP)
1332 {
1333 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC + pCpu->opsize < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1334 {
1335 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1336 /* We turn this one into a int 3 callable patch. */
1337 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1338 }
1339 }
1340 else
1341 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1342 if (pPatch->opcode == OP_PUSHF)
1343 {
1344 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->opcode == OP_PUSHF)
1345 {
1346 fIllegalInstr = true;
1347 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1348 }
1349 }
1350
1351 // no far returns
1352 if (pCpu->pCurInstr->opcode == OP_RETF)
1353 {
1354 pPatch->pTempInfo->nrRetInstr++;
1355 fIllegalInstr = true;
1356 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1357 }
1358 else
1359 // no int xx or into either
1360 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1361 {
1362 fIllegalInstr = true;
1363 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1364 }
1365 }
1366
1367 pPatch->cbPatchBlockSize += pCpu->opsize;
1368
1369 /* Illegal instruction -> end of analysis phase for this code block */
1370 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1371 return VINF_SUCCESS;
1372
1373 /* Check for exit points. */
1374 switch (pCpu->pCurInstr->opcode)
1375 {
1376 case OP_SYSEXIT:
1377 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1378
1379 case OP_SYSENTER:
1380 case OP_ILLUD2:
1381 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1382 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1383 return VINF_SUCCESS;
1384
1385 case OP_STI:
1386 case OP_POPF:
1387 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1388 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1389 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1390 {
1391 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1392 return VERR_PATCHING_REFUSED;
1393 }
1394 if (pPatch->opcode == OP_PUSHF)
1395 {
1396 if (pCpu->pCurInstr->opcode == OP_POPF)
1397 {
1398 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1399 return VINF_SUCCESS;
1400
1401 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1402 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1403 pPatch->flags |= PATMFL_CHECK_SIZE;
1404 }
1405 break; //sti doesn't mark the end of a pushf block; only popf does
1406 }
1407 //else no break
1408 case OP_RETN: /* exit point for function replacement */
1409 return VINF_SUCCESS;
1410
1411 case OP_IRET:
1412 return VINF_SUCCESS; /* exitpoint */
1413
1414 case OP_CPUID:
1415 case OP_CALL:
1416 case OP_JMP:
1417 break;
1418
1419 default:
1420 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1421 {
1422 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1423 return VINF_SUCCESS; /* exit point */
1424 }
1425 break;
1426 }
1427
1428 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1429 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW))
1430 {
1431 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1432 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->opsize));
1433 return VINF_SUCCESS;
1434 }
1435
1436 return VWRN_CONTINUE_ANALYSIS;
1437}
1438
1439/**
1440 * Analyses the instructions inside a function for compliance
1441 *
1442 * @returns VBox status code.
1443 * @param pVM The VM to operate on.
1444 * @param pCpu CPU disassembly state
1445 * @param pInstrGC Guest context pointer to privileged instruction
1446 * @param pCurInstrGC Guest context pointer to the current instruction
1447 * @param pUserData User pointer (callback specific)
1448 *
1449 */
1450static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1451{
1452 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1453 bool fIllegalInstr = false;
1454
1455 //Preliminary heuristics:
1456 //- no call instructions
1457 //- ret ends a block
1458
1459 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1460
1461 // bail out if the patch gets too big
1462 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1463 {
1464 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1465 fIllegalInstr = true;
1466 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1467 }
1468 else
1469 {
1470 // no unconditinal jumps or calls without fixed displacements
1471 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1472 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1473 )
1474 {
1475 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1476 if ( pCpu->param1.size == 6 /* far call/jmp */
1477 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1478 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1479 )
1480 {
1481 fIllegalInstr = true;
1482 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1483 }
1484 }
1485 else /* no far returns */
1486 if (pCpu->pCurInstr->opcode == OP_RETF)
1487 {
1488 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1489 fIllegalInstr = true;
1490 }
1491 else /* no int xx or into either */
1492 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1493 {
1494 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1495 fIllegalInstr = true;
1496 }
1497
1498 #if 0
1499 ///@todo we can handle certain in/out and privileged instructions in the guest context
1500 if (pCpu->pCurInstr->optype & OPTYPE_PRIVILEGED && pCpu->pCurInstr->opcode != OP_STI)
1501 {
1502 Log(("Illegal instructions for function patch!!\n"));
1503 return VERR_PATCHING_REFUSED;
1504 }
1505 #endif
1506 }
1507
1508 pPatch->cbPatchBlockSize += pCpu->opsize;
1509
1510 /* Illegal instruction -> end of analysis phase for this code block */
1511 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1512 {
1513 return VINF_SUCCESS;
1514 }
1515
1516 // Check for exit points
1517 switch (pCpu->pCurInstr->opcode)
1518 {
1519 case OP_ILLUD2:
1520 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1521 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1522 return VINF_SUCCESS;
1523
1524 case OP_IRET:
1525 case OP_SYSEXIT: /* will fault or emulated in GC */
1526 case OP_RETN:
1527 return VINF_SUCCESS;
1528
1529 case OP_POPF:
1530 case OP_STI:
1531 return VWRN_CONTINUE_ANALYSIS;
1532 default:
1533 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1534 {
1535 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1536 return VINF_SUCCESS; /* exit point */
1537 }
1538 return VWRN_CONTINUE_ANALYSIS;
1539 }
1540
1541 return VWRN_CONTINUE_ANALYSIS;
1542}
1543
1544/**
1545 * Recompiles the instructions in a code block
1546 *
1547 * @returns VBox status code.
1548 * @param pVM The VM to operate on.
1549 * @param pCpu CPU disassembly state
1550 * @param pInstrGC Guest context pointer to privileged instruction
1551 * @param pCurInstrGC Guest context pointer to the current instruction
1552 * @param pUserData User pointer (callback specific)
1553 *
1554 */
1555static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1556{
1557 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1558 int rc = VINF_SUCCESS;
1559 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1560
1561 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1562
1563 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1564 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1565 {
1566 /*
1567 * Been there, done that; so insert a jump (we don't want to duplicate code)
1568 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1569 */
1570 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1571 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->prefix & PREFIX_OPSIZE));
1572 }
1573
1574 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1575 {
1576 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pUserData);
1577 }
1578 else
1579 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pUserData);
1580
1581 if (RT_FAILURE(rc))
1582 return rc;
1583
1584 /** @note Never do a direct return unless a failure is encountered! */
1585
1586 /* Clear recompilation of next instruction flag; we are doing that right here. */
1587 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1588 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1589
1590 /* Add lookup record for patch to guest address translation */
1591 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1592
1593 /* Update lowest and highest instruction address for this patch */
1594 if (pCurInstrGC < pPatch->pInstrGCLowest)
1595 pPatch->pInstrGCLowest = pCurInstrGC;
1596 else
1597 if (pCurInstrGC > pPatch->pInstrGCHighest)
1598 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->opsize;
1599
1600 /* Illegal instruction -> end of recompile phase for this code block. */
1601 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1602 {
1603 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1604 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1605 goto end;
1606 }
1607
1608 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1609 * Indirect calls are handled below.
1610 */
1611 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1612 && (pCpu->pCurInstr->opcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1613 && (OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J))
1614 {
1615 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1616 if (pTargetGC == 0)
1617 {
1618 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
1619 return VERR_PATCHING_REFUSED;
1620 }
1621
1622 if (pCpu->pCurInstr->opcode == OP_CALL)
1623 {
1624 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1625 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1626 if (RT_FAILURE(rc))
1627 goto end;
1628 }
1629 else
1630 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->opcode, !!(pCpu->prefix & PREFIX_OPSIZE));
1631
1632 if (RT_SUCCESS(rc))
1633 rc = VWRN_CONTINUE_RECOMPILE;
1634
1635 goto end;
1636 }
1637
1638 switch (pCpu->pCurInstr->opcode)
1639 {
1640 case OP_CLI:
1641 {
1642 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1643 * until we've found the proper exit point(s).
1644 */
1645 if ( pCurInstrGC != pInstrGC
1646 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1647 )
1648 {
1649 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1650 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1651 }
1652 /* Set by irq inhibition; no longer valid now. */
1653 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1654
1655 rc = patmPatchGenCli(pVM, pPatch);
1656 if (RT_SUCCESS(rc))
1657 rc = VWRN_CONTINUE_RECOMPILE;
1658 break;
1659 }
1660
1661 case OP_MOV:
1662 if (pCpu->pCurInstr->optype & OPTYPE_POTENTIALLY_DANGEROUS)
1663 {
1664 /* mov ss, src? */
1665 if ( (pCpu->param1.flags & USE_REG_SEG)
1666 && (pCpu->param1.base.reg_seg == DIS_SELREG_SS))
1667 {
1668 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1669 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1670 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1671 }
1672#if 0 /* necessary for Haiku */
1673 else
1674 if ( (pCpu->param2.flags & USE_REG_SEG)
1675 && (pCpu->param2.base.reg_seg == USE_REG_SS)
1676 && (pCpu->param1.flags & (USE_REG_GEN32|USE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1677 {
1678 /* mov GPR, ss */
1679 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1680 if (RT_SUCCESS(rc))
1681 rc = VWRN_CONTINUE_RECOMPILE;
1682 break;
1683 }
1684#endif
1685 }
1686 goto duplicate_instr;
1687
1688 case OP_POP:
1689 if (pCpu->pCurInstr->param1 == OP_PARM_REG_SS)
1690 {
1691 Assert(pCpu->pCurInstr->optype & OPTYPE_INHIBIT_IRQS);
1692
1693 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1694 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1695 }
1696 goto duplicate_instr;
1697
1698 case OP_STI:
1699 {
1700 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1701
1702 /** In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1703 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1704 {
1705 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1706 fInhibitIRQInstr = true;
1707 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1708 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1709 }
1710 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1711
1712 if (RT_SUCCESS(rc))
1713 {
1714 DISCPUSTATE cpu = *pCpu;
1715 unsigned opsize;
1716 int disret;
1717 RCPTRTYPE(uint8_t *) pNextInstrGC, pReturnInstrGC;
1718 R3PTRTYPE(uint8_t *) pNextInstrHC;
1719
1720 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1721
1722 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1723 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pNextInstrGC);
1724 if (pNextInstrHC == NULL)
1725 {
1726 AssertFailed();
1727 return VERR_PATCHING_REFUSED;
1728 }
1729
1730 // Disassemble the next instruction
1731 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pNextInstrGC, pNextInstrHC, &opsize, NULL);
1732 if (disret == false)
1733 {
1734 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1735 return VERR_PATCHING_REFUSED;
1736 }
1737 pReturnInstrGC = pNextInstrGC + opsize;
1738
1739 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1740 || pReturnInstrGC <= pInstrGC
1741 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1742 )
1743 {
1744 /* Not an exit point for function duplication patches */
1745 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1746 && RT_SUCCESS(rc))
1747 {
1748 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1749 rc = VWRN_CONTINUE_RECOMPILE;
1750 }
1751 else
1752 rc = VINF_SUCCESS; //exit point
1753 }
1754 else {
1755 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1756 rc = VERR_PATCHING_REFUSED; //not allowed!!
1757 }
1758 }
1759 break;
1760 }
1761
1762 case OP_POPF:
1763 {
1764 bool fGenerateJmpBack = (pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32);
1765
1766 /* Not an exit point for IDT handler or function replacement patches */
1767 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1768 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1769 fGenerateJmpBack = false;
1770
1771 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->opsize, !!(pCpu->prefix & PREFIX_OPSIZE), fGenerateJmpBack);
1772 if (RT_SUCCESS(rc))
1773 {
1774 if (fGenerateJmpBack == false)
1775 {
1776 /* Not an exit point for IDT handler or function replacement patches */
1777 rc = VWRN_CONTINUE_RECOMPILE;
1778 }
1779 else
1780 {
1781 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1782 rc = VINF_SUCCESS; /* exit point! */
1783 }
1784 }
1785 break;
1786 }
1787
1788 case OP_PUSHF:
1789 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->prefix & PREFIX_OPSIZE));
1790 if (RT_SUCCESS(rc))
1791 rc = VWRN_CONTINUE_RECOMPILE;
1792 break;
1793
1794 case OP_PUSH:
1795 if (pCpu->pCurInstr->param1 == OP_PARM_REG_CS)
1796 {
1797 rc = patmPatchGenPushCS(pVM, pPatch);
1798 if (RT_SUCCESS(rc))
1799 rc = VWRN_CONTINUE_RECOMPILE;
1800 break;
1801 }
1802 goto duplicate_instr;
1803
1804 case OP_IRET:
1805 Log(("IRET at %RRv\n", pCurInstrGC));
1806 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->prefix & PREFIX_OPSIZE));
1807 if (RT_SUCCESS(rc))
1808 {
1809 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1810 rc = VINF_SUCCESS; /* exit point by definition */
1811 }
1812 break;
1813
1814 case OP_ILLUD2:
1815 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1816 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1817 if (RT_SUCCESS(rc))
1818 rc = VINF_SUCCESS; /* exit point by definition */
1819 Log(("Illegal opcode (0xf 0xb)\n"));
1820 break;
1821
1822 case OP_CPUID:
1823 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1824 if (RT_SUCCESS(rc))
1825 rc = VWRN_CONTINUE_RECOMPILE;
1826 break;
1827
1828 case OP_STR:
1829 case OP_SLDT:
1830 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1831 if (RT_SUCCESS(rc))
1832 rc = VWRN_CONTINUE_RECOMPILE;
1833 break;
1834
1835 case OP_SGDT:
1836 case OP_SIDT:
1837 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1838 if (RT_SUCCESS(rc))
1839 rc = VWRN_CONTINUE_RECOMPILE;
1840 break;
1841
1842 case OP_RETN:
1843 /* retn is an exit point for function patches */
1844 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
1845 if (RT_SUCCESS(rc))
1846 rc = VINF_SUCCESS; /* exit point by definition */
1847 break;
1848
1849 case OP_SYSEXIT:
1850 /* Duplicate it, so it can be emulated in GC (or fault). */
1851 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1852 if (RT_SUCCESS(rc))
1853 rc = VINF_SUCCESS; /* exit point by definition */
1854 break;
1855
1856 case OP_CALL:
1857 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1858 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1859 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1860 */
1861 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1862 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far calls! */)
1863 {
1864 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
1865 if (RT_SUCCESS(rc))
1866 {
1867 rc = VWRN_CONTINUE_RECOMPILE;
1868 }
1869 break;
1870 }
1871 goto gen_illegal_instr;
1872
1873 case OP_JMP:
1874 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1875 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1876 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1877 */
1878 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1879 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far jumps! */)
1880 {
1881 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
1882 if (RT_SUCCESS(rc))
1883 rc = VINF_SUCCESS; /* end of branch */
1884 break;
1885 }
1886 goto gen_illegal_instr;
1887
1888 case OP_INT3:
1889 case OP_INT:
1890 case OP_INTO:
1891 goto gen_illegal_instr;
1892
1893 case OP_MOV_DR:
1894 /** @note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1895 if (pCpu->pCurInstr->param2 == OP_PARM_Dd)
1896 {
1897 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
1898 if (RT_SUCCESS(rc))
1899 rc = VWRN_CONTINUE_RECOMPILE;
1900 break;
1901 }
1902 goto duplicate_instr;
1903
1904 case OP_MOV_CR:
1905 /** @note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1906 if (pCpu->pCurInstr->param2 == OP_PARM_Cd)
1907 {
1908 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
1909 if (RT_SUCCESS(rc))
1910 rc = VWRN_CONTINUE_RECOMPILE;
1911 break;
1912 }
1913 goto duplicate_instr;
1914
1915 default:
1916 if (pCpu->pCurInstr->optype & (OPTYPE_CONTROLFLOW | OPTYPE_PRIVILEGED_NOTRAP))
1917 {
1918gen_illegal_instr:
1919 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1920 if (RT_SUCCESS(rc))
1921 rc = VINF_SUCCESS; /* exit point by definition */
1922 }
1923 else
1924 {
1925duplicate_instr:
1926 Log(("patmPatchGenDuplicate\n"));
1927 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1928 if (RT_SUCCESS(rc))
1929 rc = VWRN_CONTINUE_RECOMPILE;
1930 }
1931 break;
1932 }
1933
1934end:
1935
1936 if ( !fInhibitIRQInstr
1937 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
1938 {
1939 int rc2;
1940 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1941
1942 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
1943 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
1944 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
1945 {
1946 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
1947
1948 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
1949 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1950 rc = VINF_SUCCESS; /* end of the line */
1951 }
1952 else
1953 {
1954 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
1955 }
1956 if (RT_FAILURE(rc2))
1957 rc = rc2;
1958 }
1959
1960 if (RT_SUCCESS(rc))
1961 {
1962 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1963 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
1964 && pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32
1965 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
1966 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
1967 )
1968 {
1969 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1970
1971 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1972 Log(("patmRecompileCallback: end found for single instruction patch at %RRv opsize %d\n", pNextInstrGC, pCpu->opsize));
1973
1974 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
1975 AssertRC(rc);
1976 }
1977 }
1978 return rc;
1979}
1980
1981
1982#ifdef LOG_ENABLED
1983
1984/* Add a disasm jump record (temporary for prevent duplicate analysis)
1985 *
1986 * @param pVM The VM to operate on.
1987 * @param pPatch Patch structure ptr
1988 * @param pInstrGC Guest context pointer to privileged instruction
1989 *
1990 */
1991static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1992{
1993 PAVLPVNODECORE pRec;
1994
1995 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1996 Assert(pRec);
1997 pRec->Key = (AVLPVKEY)pInstrGC;
1998
1999 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2000 Assert(ret);
2001}
2002
2003/**
2004 * Checks if jump target has been analysed before.
2005 *
2006 * @returns VBox status code.
2007 * @param pPatch Patch struct
2008 * @param pInstrGC Jump target
2009 *
2010 */
2011static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2012{
2013 PAVLPVNODECORE pRec;
2014
2015 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)pInstrGC);
2016 if (pRec)
2017 return true;
2018 return false;
2019}
2020
2021/**
2022 * For proper disassembly of the final patch block
2023 *
2024 * @returns VBox status code.
2025 * @param pVM The VM to operate on.
2026 * @param pCpu CPU disassembly state
2027 * @param pInstrGC Guest context pointer to privileged instruction
2028 * @param pCurInstrGC Guest context pointer to the current instruction
2029 * @param pUserData User pointer (callback specific)
2030 *
2031 */
2032int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
2033{
2034 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2035
2036 if (pCpu->pCurInstr->opcode == OP_INT3)
2037 {
2038 /* Could be an int3 inserted in a call patch. Check to be sure */
2039 DISCPUSTATE cpu;
2040 uint8_t *pOrgJumpHC;
2041 RTRCPTR pOrgJumpGC;
2042 uint32_t dummy;
2043
2044 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2045 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2046 pOrgJumpHC = PATMGCVirtToHCVirt(pVM, pPatch, pOrgJumpGC);
2047
2048 bool disret = PATMR3DISInstr(pVM, pPatch, &cpu, pOrgJumpGC, pOrgJumpHC, &dummy, NULL);
2049 if (!disret || cpu.pCurInstr->opcode != OP_CALL || cpu.param1.size != 4 /* only near calls */)
2050 return VINF_SUCCESS;
2051
2052 return VWRN_CONTINUE_ANALYSIS;
2053 }
2054
2055 if ( pCpu->pCurInstr->opcode == OP_ILLUD2
2056 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2057 {
2058 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2059 return VWRN_CONTINUE_ANALYSIS;
2060 }
2061
2062 if ( (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2063 || pCpu->pCurInstr->opcode == OP_INT
2064 || pCpu->pCurInstr->opcode == OP_IRET
2065 || pCpu->pCurInstr->opcode == OP_RETN
2066 || pCpu->pCurInstr->opcode == OP_RETF
2067 )
2068 {
2069 return VINF_SUCCESS;
2070 }
2071
2072 if (pCpu->pCurInstr->opcode == OP_ILLUD2)
2073 return VINF_SUCCESS;
2074
2075 return VWRN_CONTINUE_ANALYSIS;
2076}
2077
2078
2079/**
2080 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2081 *
2082 * @returns VBox status code.
2083 * @param pVM The VM to operate on.
2084 * @param pInstrGC Guest context pointer to the initial privileged instruction
2085 * @param pCurInstrGC Guest context pointer to the current instruction
2086 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2087 * @param pUserData User pointer (callback specific)
2088 *
2089 */
2090int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, void *pUserData)
2091{
2092 DISCPUSTATE cpu;
2093 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2094 int rc = VWRN_CONTINUE_ANALYSIS;
2095 uint32_t opsize, delta;
2096 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2097 bool disret;
2098 char szOutput[256];
2099
2100 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2101
2102 /* We need this to determine branch targets (and for disassembling). */
2103 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2104
2105 while(rc == VWRN_CONTINUE_ANALYSIS)
2106 {
2107 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2108
2109 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2110 if (pCurInstrHC == NULL)
2111 {
2112 rc = VERR_PATCHING_REFUSED;
2113 goto end;
2114 }
2115
2116 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2117 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2118 {
2119 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2120
2121 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2122 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2123 else
2124 Log(("DIS %s", szOutput));
2125
2126 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2127 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2128 {
2129 rc = VINF_SUCCESS;
2130 goto end;
2131 }
2132 }
2133 else
2134 Log(("DIS: %s", szOutput));
2135
2136 if (disret == false)
2137 {
2138 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2139 rc = VINF_SUCCESS;
2140 goto end;
2141 }
2142
2143 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pUserData);
2144 if (rc != VWRN_CONTINUE_ANALYSIS) {
2145 break; //done!
2146 }
2147
2148 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2149 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2150 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2151 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2152 )
2153 {
2154 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2155 RTRCPTR pOrgTargetGC;
2156
2157 if (pTargetGC == 0)
2158 {
2159 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2160 rc = VERR_PATCHING_REFUSED;
2161 break;
2162 }
2163
2164 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2165 {
2166 //jump back to guest code
2167 rc = VINF_SUCCESS;
2168 goto end;
2169 }
2170 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2171
2172 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2173 {
2174 rc = VINF_SUCCESS;
2175 goto end;
2176 }
2177
2178 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2179 {
2180 /* New jump, let's check it. */
2181 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2182
2183 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2184 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pUserData);
2185 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2186
2187 if (rc != VINF_SUCCESS) {
2188 break; //done!
2189 }
2190 }
2191 if (cpu.pCurInstr->opcode == OP_JMP)
2192 {
2193 /* Unconditional jump; return to caller. */
2194 rc = VINF_SUCCESS;
2195 goto end;
2196 }
2197
2198 rc = VWRN_CONTINUE_ANALYSIS;
2199 }
2200 pCurInstrGC += opsize;
2201 }
2202end:
2203 return rc;
2204}
2205
2206/**
2207 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2208 *
2209 * @returns VBox status code.
2210 * @param pVM The VM to operate on.
2211 * @param pInstrGC Guest context pointer to the initial privileged instruction
2212 * @param pCurInstrGC Guest context pointer to the current instruction
2213 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2214 * @param pUserData User pointer (callback specific)
2215 *
2216 */
2217int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, void *pUserData)
2218{
2219 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2220
2221 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pUserData);
2222 /* Free all disasm jump records. */
2223 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2224 return rc;
2225}
2226
2227#endif /* LOG_ENABLED */
2228
2229/**
2230 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2231 * If so, this patch is permanently disabled.
2232 *
2233 * @param pVM The VM to operate on.
2234 * @param pInstrGC Guest context pointer to instruction
2235 * @param pConflictGC Guest context pointer to check
2236 *
2237 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2238 *
2239 */
2240VMMR3DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2241{
2242 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2243 if (pTargetPatch)
2244 {
2245 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2246 }
2247 return VERR_PATCH_NO_CONFLICT;
2248}
2249
2250/**
2251 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2252 *
2253 * @returns VBox status code.
2254 * @param pVM The VM to operate on.
2255 * @param pInstrGC Guest context pointer to privileged instruction
2256 * @param pCurInstrGC Guest context pointer to the current instruction
2257 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2258 * @param pUserData User pointer (callback specific)
2259 *
2260 */
2261static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, void *pUserData)
2262{
2263 DISCPUSTATE cpu;
2264 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2265 int rc = VWRN_CONTINUE_ANALYSIS;
2266 uint32_t opsize;
2267 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2268 bool disret;
2269#ifdef LOG_ENABLED
2270 char szOutput[256];
2271#endif
2272
2273 while (rc == VWRN_CONTINUE_RECOMPILE)
2274 {
2275 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2276
2277 ////Log(("patmRecompileCodeStream %RRv %RRv\n", pInstrGC, pCurInstrGC));
2278
2279 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2280 if (pCurInstrHC == NULL)
2281 {
2282 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2283 goto end;
2284 }
2285#ifdef LOG_ENABLED
2286 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput);
2287 Log(("Recompile: %s", szOutput));
2288#else
2289 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2290#endif
2291 if (disret == false)
2292 {
2293 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2294
2295 /* Add lookup record for patch to guest address translation */
2296 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2297 patmPatchGenIllegalInstr(pVM, pPatch);
2298 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2299 goto end;
2300 }
2301
2302 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pUserData);
2303 if (rc != VWRN_CONTINUE_RECOMPILE)
2304 {
2305 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2306 if ( rc == VINF_SUCCESS
2307 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2308 {
2309 DISCPUSTATE cpunext;
2310 uint32_t opsizenext;
2311 uint8_t *pNextInstrHC;
2312 RTRCPTR pNextInstrGC = pCurInstrGC + opsize;
2313
2314 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2315
2316 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2317 * Recompile the next instruction as well
2318 */
2319 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pNextInstrGC);
2320 if (pNextInstrHC == NULL)
2321 {
2322 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2323 goto end;
2324 }
2325 cpunext.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2326 disret = PATMR3DISInstr(pVM, pPatch, &cpunext, pNextInstrGC, pNextInstrHC, &opsizenext, NULL);
2327 if (disret == false)
2328 {
2329 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2330 goto end;
2331 }
2332 switch(cpunext.pCurInstr->opcode)
2333 {
2334 case OP_IRET: /* inhibit cleared in generated code */
2335 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2336 case OP_HLT:
2337 break; /* recompile these */
2338
2339 default:
2340 if (cpunext.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2341 {
2342 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2343
2344 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2345 AssertRC(rc);
2346 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2347 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2348 }
2349 break;
2350 }
2351
2352 /** @note after a cli we must continue to a proper exit point */
2353 if (cpunext.pCurInstr->opcode != OP_CLI)
2354 {
2355 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pUserData);
2356 if (RT_SUCCESS(rc))
2357 {
2358 rc = VINF_SUCCESS;
2359 goto end;
2360 }
2361 break;
2362 }
2363 else
2364 rc = VWRN_CONTINUE_RECOMPILE;
2365 }
2366 else
2367 break; /* done! */
2368 }
2369
2370 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2371
2372
2373 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2374 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2375 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2376 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2377 )
2378 {
2379 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2380 if (addr == 0)
2381 {
2382 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2383 rc = VERR_PATCHING_REFUSED;
2384 break;
2385 }
2386
2387 Log(("Jump encountered target %RRv\n", addr));
2388
2389 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2390 if (!(cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW))
2391 {
2392 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2393 /* First we need to finish this linear code stream until the next exit point. */
2394 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+opsize, pfnPATMR3Recompile, pUserData);
2395 if (RT_FAILURE(rc))
2396 {
2397 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2398 break; //fatal error
2399 }
2400 }
2401
2402 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2403 {
2404 /* New code; let's recompile it. */
2405 Log(("patmRecompileCodeStream continue with jump\n"));
2406
2407 /*
2408 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2409 * this patch so we can continue our analysis
2410 *
2411 * We rely on CSAM to detect and resolve conflicts
2412 */
2413 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, addr);
2414 if(pTargetPatch)
2415 {
2416 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2417 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2418 }
2419
2420 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2421 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pUserData);
2422 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2423
2424 if(pTargetPatch)
2425 {
2426 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2427 }
2428
2429 if (RT_FAILURE(rc))
2430 {
2431 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2432 break; //done!
2433 }
2434 }
2435 /* Always return to caller here; we're done! */
2436 rc = VINF_SUCCESS;
2437 goto end;
2438 }
2439 else
2440 if (cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW)
2441 {
2442 rc = VINF_SUCCESS;
2443 goto end;
2444 }
2445 pCurInstrGC += opsize;
2446 }
2447end:
2448 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2449 return rc;
2450}
2451
2452
2453/**
2454 * Generate the jump from guest to patch code
2455 *
2456 * @returns VBox status code.
2457 * @param pVM The VM to operate on.
2458 * @param pPatch Patch record
2459 */
2460static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, bool fAddFixup = true)
2461{
2462 uint8_t temp[8];
2463 uint8_t *pPB;
2464 int rc;
2465
2466 Assert(pPatch->cbPatchJump <= sizeof(temp));
2467 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2468
2469 pPB = pPatch->pPrivInstrHC;
2470
2471#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2472 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2473 {
2474 Assert(pPatch->pPatchJumpDestGC);
2475
2476 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2477 {
2478 // jmp [PatchCode]
2479 if (fAddFixup)
2480 {
2481 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2482 {
2483 Log(("Relocation failed for the jump in the guest code!!\n"));
2484 return VERR_PATCHING_REFUSED;
2485 }
2486 }
2487
2488 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2489 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2490 }
2491 else
2492 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2493 {
2494 // jmp [PatchCode]
2495 if (fAddFixup)
2496 {
2497 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2498 {
2499 Log(("Relocation failed for the jump in the guest code!!\n"));
2500 return VERR_PATCHING_REFUSED;
2501 }
2502 }
2503
2504 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2505 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2506 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2507 }
2508 else
2509 {
2510 Assert(0);
2511 return VERR_PATCHING_REFUSED;
2512 }
2513 }
2514 else
2515#endif
2516 {
2517 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2518
2519 // jmp [PatchCode]
2520 if (fAddFixup)
2521 {
2522 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2523 {
2524 Log(("Relocation failed for the jump in the guest code!!\n"));
2525 return VERR_PATCHING_REFUSED;
2526 }
2527 }
2528 temp[0] = 0xE9; //jmp
2529 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2530 }
2531 rc = PGMPhysSimpleDirtyWriteGCPtr(pVM, pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2532 AssertRC(rc);
2533
2534 if (rc == VINF_SUCCESS)
2535 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2536
2537 return rc;
2538}
2539
2540/**
2541 * Remove the jump from guest to patch code
2542 *
2543 * @returns VBox status code.
2544 * @param pVM The VM to operate on.
2545 * @param pPatch Patch record
2546 */
2547static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2548{
2549#ifdef DEBUG
2550 DISCPUSTATE cpu;
2551 char szOutput[256];
2552 uint32_t opsize, i = 0;
2553 bool disret;
2554
2555 while(i < pPatch->cbPrivInstr)
2556 {
2557 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2558 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
2559 if (disret == false)
2560 break;
2561
2562 Log(("Org patch jump: %s", szOutput));
2563 Assert(opsize);
2564 i += opsize;
2565 }
2566#endif
2567
2568 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2569 int rc = PGMPhysSimpleDirtyWriteGCPtr(pVM, pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2570#ifdef DEBUG
2571 if (rc == VINF_SUCCESS)
2572 {
2573 DISCPUSTATE cpu;
2574 char szOutput[256];
2575 uint32_t opsize, i = 0;
2576 bool disret;
2577
2578 while(i < pPatch->cbPrivInstr)
2579 {
2580 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2581 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
2582 if (disret == false)
2583 break;
2584
2585 Log(("Org instr: %s", szOutput));
2586 Assert(opsize);
2587 i += opsize;
2588 }
2589 }
2590#endif
2591 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2592 return rc;
2593}
2594
2595/**
2596 * Generate the call from guest to patch code
2597 *
2598 * @returns VBox status code.
2599 * @param pVM The VM to operate on.
2600 * @param pPatch Patch record
2601 */
2602static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, bool fAddFixup = true)
2603{
2604 uint8_t temp[8];
2605 uint8_t *pPB;
2606 int rc;
2607
2608 Assert(pPatch->cbPatchJump <= sizeof(temp));
2609
2610 pPB = pPatch->pPrivInstrHC;
2611
2612 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2613
2614 // jmp [PatchCode]
2615 if (fAddFixup)
2616 {
2617 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2618 {
2619 Log(("Relocation failed for the jump in the guest code!!\n"));
2620 return VERR_PATCHING_REFUSED;
2621 }
2622 }
2623
2624 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2625 temp[0] = pPatch->aPrivInstr[0];
2626 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2627
2628 rc = PGMPhysSimpleDirtyWriteGCPtr(pVM, pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2629 AssertRC(rc);
2630
2631 return rc;
2632}
2633
2634
2635/**
2636 * Patch cli/sti pushf/popf instruction block at specified location
2637 *
2638 * @returns VBox status code.
2639 * @param pVM The VM to operate on.
2640 * @param pInstrGC Guest context point to privileged instruction
2641 * @param pInstrHC Host context point to privileged instruction
2642 * @param uOpcode Instruction opcode
2643 * @param uOpSize Size of starting instruction
2644 * @param pPatchRec Patch record
2645 *
2646 * @note returns failure if patching is not allowed or possible
2647 *
2648 */
2649VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2650 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2651{
2652 PPATCHINFO pPatch = &pPatchRec->patch;
2653 int rc = VERR_PATCHING_REFUSED;
2654 DISCPUSTATE cpu;
2655 uint32_t orgOffsetPatchMem = ~0;
2656 RTRCPTR pInstrStart;
2657#ifdef LOG_ENABLED
2658 uint32_t opsize;
2659 char szOutput[256];
2660 bool disret;
2661#endif
2662
2663 /* Save original offset (in case of failures later on) */
2664 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2665 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2666
2667 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2668 switch (uOpcode)
2669 {
2670 case OP_MOV:
2671 break;
2672
2673 case OP_CLI:
2674 case OP_PUSHF:
2675 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2676 /** @note special precautions are taken when disabling and enabling such patches. */
2677 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2678 break;
2679
2680 default:
2681 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2682 {
2683 AssertMsg(0, ("PATMR3PatchBlock: Invalid opcode %x\n", uOpcode));
2684 return VERR_INVALID_PARAMETER;
2685 }
2686 }
2687
2688 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2689 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2690
2691 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2692 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2693 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2694 )
2695 {
2696 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2697#ifdef DEBUG_sandervl
2698//// AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
2699#endif
2700 rc = VERR_PATCHING_REFUSED;
2701 goto failure;
2702 }
2703
2704 pPatch->nrPatch2GuestRecs = 0;
2705 pInstrStart = pInstrGC;
2706
2707#ifdef PATM_ENABLE_CALL
2708 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2709#endif
2710
2711 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2712 pPatch->uCurPatchOffset = 0;
2713
2714 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2715
2716 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2717 {
2718 Assert(pPatch->flags & PATMFL_INTHANDLER);
2719
2720 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2721 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2722 if (RT_FAILURE(rc))
2723 goto failure;
2724 }
2725
2726 /***************************************************************************************************************************/
2727 /** @note We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2728 /***************************************************************************************************************************/
2729#ifdef VBOX_WITH_STATISTICS
2730 if (!(pPatch->flags & PATMFL_SYSENTER))
2731 {
2732 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2733 if (RT_FAILURE(rc))
2734 goto failure;
2735 }
2736#endif
2737
2738 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pPatch);
2739 if (rc != VINF_SUCCESS)
2740 {
2741 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2742 goto failure;
2743 }
2744
2745 /* Calculated during analysis. */
2746 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2747 {
2748 /* Most likely cause: we encountered an illegal instruction very early on. */
2749 /** @todo could turn it into an int3 callable patch. */
2750 Log(("PATMR3PatchBlock: patch block too small -> refuse\n"));
2751 rc = VERR_PATCHING_REFUSED;
2752 goto failure;
2753 }
2754
2755 /* size of patch block */
2756 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2757
2758
2759 /* Update free pointer in patch memory. */
2760 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2761 /* Round to next 8 byte boundary. */
2762 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2763
2764 /*
2765 * Insert into patch to guest lookup tree
2766 */
2767 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2768 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2769 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2770 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2771 if (!rc)
2772 {
2773 rc = VERR_PATCHING_REFUSED;
2774 goto failure;
2775 }
2776
2777 /* Note that patmr3SetBranchTargets can install additional patches!! */
2778 rc = patmr3SetBranchTargets(pVM, pPatch);
2779 if (rc != VINF_SUCCESS)
2780 {
2781 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2782 goto failure;
2783 }
2784
2785#ifdef LOG_ENABLED
2786 Log(("Patch code ----------------------------------------------------------\n"));
2787 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
2788 Log(("Patch code ends -----------------------------------------------------\n"));
2789#endif
2790
2791 /* make a copy of the guest code bytes that will be overwritten */
2792 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2793
2794 rc = PGMPhysSimpleReadGCPtr(pVM, pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2795 AssertRC(rc);
2796
2797 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2798 {
2799 /*uint8_t ASMInt3 = 0xCC; - unused */
2800
2801 Log(("PATMR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2802 /* Replace first opcode byte with 'int 3'. */
2803 rc = patmActivateInt3Patch(pVM, pPatch);
2804 if (RT_FAILURE(rc))
2805 goto failure;
2806
2807 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2808 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2809
2810 pPatch->flags &= ~PATMFL_INSTR_HINT;
2811 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2812 }
2813 else
2814 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2815 {
2816 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2817 /* now insert a jump in the guest code */
2818 rc = patmGenJumpToPatch(pVM, pPatch, true);
2819 AssertRC(rc);
2820 if (RT_FAILURE(rc))
2821 goto failure;
2822
2823 }
2824
2825#ifdef LOG_ENABLED
2826 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2827 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2828 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
2829#endif
2830
2831 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2832 pPatch->pTempInfo->nrIllegalInstr = 0;
2833
2834 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2835
2836 pPatch->uState = PATCH_ENABLED;
2837 return VINF_SUCCESS;
2838
2839failure:
2840 if (pPatchRec->CoreOffset.Key)
2841 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
2842
2843 patmEmptyTree(pVM, &pPatch->FixupTree);
2844 pPatch->nrFixups = 0;
2845
2846 patmEmptyTree(pVM, &pPatch->JumpTree);
2847 pPatch->nrJumpRecs = 0;
2848
2849 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2850 pPatch->pTempInfo->nrIllegalInstr = 0;
2851
2852 /* Turn this cli patch into a dummy. */
2853 pPatch->uState = PATCH_REFUSED;
2854 pPatch->pPatchBlockOffset = 0;
2855
2856 // Give back the patch memory we no longer need
2857 Assert(orgOffsetPatchMem != (uint32_t)~0);
2858 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2859
2860 return rc;
2861}
2862
2863/**
2864 * Patch IDT handler
2865 *
2866 * @returns VBox status code.
2867 * @param pVM The VM to operate on.
2868 * @param pInstrGC Guest context point to privileged instruction
2869 * @param pInstrHC Host context point to privileged instruction
2870 * @param uOpSize Size of starting instruction
2871 * @param pPatchRec Patch record
2872 *
2873 * @note returns failure if patching is not allowed or possible
2874 *
2875 */
2876static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2877 uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2878{
2879 PPATCHINFO pPatch = &pPatchRec->patch;
2880 bool disret;
2881 DISCPUSTATE cpuPush, cpuJmp;
2882 uint32_t opsize;
2883 RTRCPTR pCurInstrGC = pInstrGC;
2884 uint8_t *pCurInstrHC = pInstrHC;
2885 uint32_t orgOffsetPatchMem = ~0;
2886
2887 /*
2888 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
2889 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
2890 * condition here and only patch the common entypoint once.
2891 */
2892 cpuPush.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2893 disret = PATMR3DISInstr(pVM, pPatch, &cpuPush, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2894 Assert(disret);
2895 if (disret && cpuPush.pCurInstr->opcode == OP_PUSH)
2896 {
2897 RTRCPTR pJmpInstrGC;
2898 int rc;
2899
2900 pCurInstrGC += opsize;
2901 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2902
2903 cpuJmp.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2904 disret = PATMR3DISInstr(pVM, pPatch, &cpuJmp, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2905 if ( disret
2906 && cpuJmp.pCurInstr->opcode == OP_JMP
2907 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
2908 )
2909 {
2910 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2911 if (pJmpPatch == 0)
2912 {
2913 /* Patch it first! */
2914 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
2915 if (rc != VINF_SUCCESS)
2916 goto failure;
2917 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2918 Assert(pJmpPatch);
2919 }
2920 if (pJmpPatch->patch.uState != PATCH_ENABLED)
2921 goto failure;
2922
2923 /* save original offset (in case of failures later on) */
2924 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2925
2926 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2927 pPatch->uCurPatchOffset = 0;
2928 pPatch->nrPatch2GuestRecs = 0;
2929
2930#ifdef VBOX_WITH_STATISTICS
2931 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2932 if (RT_FAILURE(rc))
2933 goto failure;
2934#endif
2935
2936 /* Install fake cli patch (to clear the virtual IF) */
2937 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2938 if (RT_FAILURE(rc))
2939 goto failure;
2940
2941 /* Add lookup record for patch to guest address translation (for the push) */
2942 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
2943
2944 /* Duplicate push. */
2945 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
2946 if (RT_FAILURE(rc))
2947 goto failure;
2948
2949 /* Generate jump to common entrypoint. */
2950 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
2951 if (RT_FAILURE(rc))
2952 goto failure;
2953
2954 /* size of patch block */
2955 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2956
2957 /* Update free pointer in patch memory. */
2958 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2959 /* Round to next 8 byte boundary */
2960 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2961
2962 /* There's no jump from guest to patch code. */
2963 pPatch->cbPatchJump = 0;
2964
2965
2966#ifdef LOG_ENABLED
2967 Log(("Patch code ----------------------------------------------------------\n"));
2968 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
2969 Log(("Patch code ends -----------------------------------------------------\n"));
2970#endif
2971 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
2972
2973 /*
2974 * Insert into patch to guest lookup tree
2975 */
2976 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2977 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2978 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2979 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2980
2981 pPatch->uState = PATCH_ENABLED;
2982
2983 return VINF_SUCCESS;
2984 }
2985 }
2986failure:
2987 /* Give back the patch memory we no longer need */
2988 if (orgOffsetPatchMem != (uint32_t)~0)
2989 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2990
2991 return PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
2992}
2993
2994/**
2995 * Install a trampoline to call a guest trap handler directly
2996 *
2997 * @returns VBox status code.
2998 * @param pVM The VM to operate on.
2999 * @param pInstrGC Guest context point to privileged instruction
3000 * @param pPatchRec Patch record
3001 *
3002 */
3003static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3004{
3005 PPATCHINFO pPatch = &pPatchRec->patch;
3006 int rc = VERR_PATCHING_REFUSED;
3007 uint32_t orgOffsetPatchMem = ~0;
3008#ifdef LOG_ENABLED
3009 bool disret;
3010 DISCPUSTATE cpu;
3011 uint32_t opsize;
3012 char szOutput[256];
3013#endif
3014
3015 // save original offset (in case of failures later on)
3016 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3017
3018 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3019 pPatch->uCurPatchOffset = 0;
3020 pPatch->nrPatch2GuestRecs = 0;
3021
3022#ifdef VBOX_WITH_STATISTICS
3023 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3024 if (RT_FAILURE(rc))
3025 goto failure;
3026#endif
3027
3028 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3029 if (RT_FAILURE(rc))
3030 goto failure;
3031
3032 /* size of patch block */
3033 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3034
3035 /* Update free pointer in patch memory. */
3036 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3037 /* Round to next 8 byte boundary */
3038 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3039
3040 /* There's no jump from guest to patch code. */
3041 pPatch->cbPatchJump = 0;
3042
3043#ifdef LOG_ENABLED
3044 Log(("Patch code ----------------------------------------------------------\n"));
3045 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
3046 Log(("Patch code ends -----------------------------------------------------\n"));
3047#endif
3048
3049#ifdef LOG_ENABLED
3050 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3051 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3052 Log(("TRAP handler patch: %s", szOutput));
3053#endif
3054 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3055
3056 /*
3057 * Insert into patch to guest lookup tree
3058 */
3059 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3060 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3061 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3062 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3063
3064 pPatch->uState = PATCH_ENABLED;
3065 return VINF_SUCCESS;
3066
3067failure:
3068 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3069
3070 /* Turn this cli patch into a dummy. */
3071 pPatch->uState = PATCH_REFUSED;
3072 pPatch->pPatchBlockOffset = 0;
3073
3074 /* Give back the patch memory we no longer need */
3075 Assert(orgOffsetPatchMem != (uint32_t)~0);
3076 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3077
3078 return rc;
3079}
3080
3081
3082#ifdef LOG_ENABLED
3083/**
3084 * Check if the instruction is patched as a common idt handler
3085 *
3086 * @returns true or false
3087 * @param pVM The VM to operate on.
3088 * @param pInstrGC Guest context point to the instruction
3089 *
3090 */
3091static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3092{
3093 PPATMPATCHREC pRec;
3094
3095 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3096 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3097 return true;
3098 return false;
3099}
3100#endif //DEBUG
3101
3102
3103/**
3104 * Duplicates a complete function
3105 *
3106 * @returns VBox status code.
3107 * @param pVM The VM to operate on.
3108 * @param pInstrGC Guest context point to privileged instruction
3109 * @param pPatchRec Patch record
3110 *
3111 */
3112static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3113{
3114 PPATCHINFO pPatch = &pPatchRec->patch;
3115 int rc = VERR_PATCHING_REFUSED;
3116 DISCPUSTATE cpu;
3117 uint32_t orgOffsetPatchMem = ~0;
3118
3119 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3120 /* Save original offset (in case of failures later on). */
3121 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3122
3123 /* We will not go on indefinitely with call instruction handling. */
3124 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3125 {
3126 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3127 return VERR_PATCHING_REFUSED;
3128 }
3129
3130 pVM->patm.s.ulCallDepth++;
3131
3132#ifdef PATM_ENABLE_CALL
3133 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3134#endif
3135
3136 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3137
3138 pPatch->nrPatch2GuestRecs = 0;
3139 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3140 pPatch->uCurPatchOffset = 0;
3141
3142 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3143
3144 /** @note Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3145 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3146 if (RT_FAILURE(rc))
3147 goto failure;
3148
3149#ifdef VBOX_WITH_STATISTICS
3150 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3151 if (RT_FAILURE(rc))
3152 goto failure;
3153#endif
3154 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pPatch);
3155 if (rc != VINF_SUCCESS)
3156 {
3157 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3158 goto failure;
3159 }
3160
3161 //size of patch block
3162 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3163
3164 //update free pointer in patch memory
3165 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3166 /* Round to next 8 byte boundary. */
3167 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3168
3169 pPatch->uState = PATCH_ENABLED;
3170
3171 /*
3172 * Insert into patch to guest lookup tree
3173 */
3174 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3175 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3176 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3177 AssertMsg(rc, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3178 if (!rc)
3179 {
3180 rc = VERR_PATCHING_REFUSED;
3181 goto failure;
3182 }
3183
3184 /* Note that patmr3SetBranchTargets can install additional patches!! */
3185 rc = patmr3SetBranchTargets(pVM, pPatch);
3186 if (rc != VINF_SUCCESS)
3187 {
3188 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3189 goto failure;
3190 }
3191
3192#ifdef LOG_ENABLED
3193 Log(("Patch code ----------------------------------------------------------\n"));
3194 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
3195 Log(("Patch code ends -----------------------------------------------------\n"));
3196#endif
3197
3198 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3199
3200 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3201 pPatch->pTempInfo->nrIllegalInstr = 0;
3202
3203 pVM->patm.s.ulCallDepth--;
3204 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3205 return VINF_SUCCESS;
3206
3207failure:
3208 if (pPatchRec->CoreOffset.Key)
3209 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3210
3211 patmEmptyTree(pVM, &pPatch->FixupTree);
3212 pPatch->nrFixups = 0;
3213
3214 patmEmptyTree(pVM, &pPatch->JumpTree);
3215 pPatch->nrJumpRecs = 0;
3216
3217 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3218 pPatch->pTempInfo->nrIllegalInstr = 0;
3219
3220 /* Turn this cli patch into a dummy. */
3221 pPatch->uState = PATCH_REFUSED;
3222 pPatch->pPatchBlockOffset = 0;
3223
3224 // Give back the patch memory we no longer need
3225 Assert(orgOffsetPatchMem != (uint32_t)~0);
3226 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3227
3228 pVM->patm.s.ulCallDepth--;
3229 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3230 return rc;
3231}
3232
3233/**
3234 * Creates trampoline code to jump inside an existing patch
3235 *
3236 * @returns VBox status code.
3237 * @param pVM The VM to operate on.
3238 * @param pInstrGC Guest context point to privileged instruction
3239 * @param pPatchRec Patch record
3240 *
3241 */
3242static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3243{
3244 PPATCHINFO pPatch = &pPatchRec->patch;
3245 RTRCPTR pPage, pPatchTargetGC = 0;
3246 uint32_t orgOffsetPatchMem = ~0;
3247 int rc = VERR_PATCHING_REFUSED;
3248
3249 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3250 /* Save original offset (in case of failures later on). */
3251 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3252
3253 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3254 /** @todo we already checked this before */
3255 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3256
3257 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3258 if (pPatchPage)
3259 {
3260 uint32_t i;
3261
3262 for (i=0;i<pPatchPage->cCount;i++)
3263 {
3264 if (pPatchPage->aPatch[i])
3265 {
3266 PPATCHINFO pPatch = pPatchPage->aPatch[i];
3267
3268 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3269 && pPatch->uState == PATCH_ENABLED)
3270 {
3271 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pInstrGC);
3272 if (pPatchTargetGC)
3273 {
3274 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3275 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, offsetPatch, false);
3276 Assert(pPatchToGuestRec);
3277
3278 pPatchToGuestRec->fJumpTarget = true;
3279 Assert(pPatchTargetGC != pPatch->pPrivInstrGC);
3280 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv\n", pPatch->pPrivInstrGC));
3281 pPatch->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3282 break;
3283 }
3284 }
3285 }
3286 }
3287 }
3288 AssertReturn(pPatchPage && pPatchTargetGC, VERR_PATCHING_REFUSED);
3289
3290 pPatch->nrPatch2GuestRecs = 0;
3291 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3292 pPatch->uCurPatchOffset = 0;
3293
3294 /** @note Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3295 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3296 if (RT_FAILURE(rc))
3297 goto failure;
3298
3299#ifdef VBOX_WITH_STATISTICS
3300 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3301 if (RT_FAILURE(rc))
3302 goto failure;
3303#endif
3304
3305 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3306 if (RT_FAILURE(rc))
3307 goto failure;
3308
3309 /*
3310 * Insert into patch to guest lookup tree
3311 */
3312 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3313 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3314 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3315 AssertMsg(rc, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3316 if (!rc)
3317 {
3318 rc = VERR_PATCHING_REFUSED;
3319 goto failure;
3320 }
3321
3322 /* size of patch block */
3323 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3324
3325 /* Update free pointer in patch memory. */
3326 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3327 /* Round to next 8 byte boundary */
3328 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3329
3330 /* There's no jump from guest to patch code. */
3331 pPatch->cbPatchJump = 0;
3332
3333 /* Enable the patch. */
3334 pPatch->uState = PATCH_ENABLED;
3335 /* We allow this patch to be called as a function. */
3336 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3337 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3338 return VINF_SUCCESS;
3339
3340failure:
3341 if (pPatchRec->CoreOffset.Key)
3342 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3343
3344 patmEmptyTree(pVM, &pPatch->FixupTree);
3345 pPatch->nrFixups = 0;
3346
3347 patmEmptyTree(pVM, &pPatch->JumpTree);
3348 pPatch->nrJumpRecs = 0;
3349
3350 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3351 pPatch->pTempInfo->nrIllegalInstr = 0;
3352
3353 /* Turn this cli patch into a dummy. */
3354 pPatch->uState = PATCH_REFUSED;
3355 pPatch->pPatchBlockOffset = 0;
3356
3357 // Give back the patch memory we no longer need
3358 Assert(orgOffsetPatchMem != (uint32_t)~0);
3359 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3360
3361 return rc;
3362}
3363
3364
3365/**
3366 * Patch branch target function for call/jump at specified location.
3367 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3368 *
3369 * @returns VBox status code.
3370 * @param pVM The VM to operate on.
3371 * @param pCtx Guest context
3372 *
3373 */
3374VMMR3DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3375{
3376 RTRCPTR pBranchTarget, pPage;
3377 int rc;
3378 RTRCPTR pPatchTargetGC = 0;
3379
3380 pBranchTarget = pCtx->edx;
3381 pBranchTarget = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3382
3383 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3384 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3385
3386 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3387 if (pPatchPage)
3388 {
3389 uint32_t i;
3390
3391 for (i=0;i<pPatchPage->cCount;i++)
3392 {
3393 if (pPatchPage->aPatch[i])
3394 {
3395 PPATCHINFO pPatch = pPatchPage->aPatch[i];
3396
3397 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3398 && pPatch->uState == PATCH_ENABLED)
3399 {
3400 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3401 if (pPatchTargetGC)
3402 {
3403 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3404 break;
3405 }
3406 }
3407 }
3408 }
3409 }
3410
3411 if (pPatchTargetGC)
3412 {
3413 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3414 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3415 }
3416 else
3417 {
3418 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3419 }
3420
3421 if (rc == VINF_SUCCESS)
3422 {
3423 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3424 Assert(pPatchTargetGC);
3425 }
3426
3427 if (pPatchTargetGC)
3428 {
3429 pCtx->eax = pPatchTargetGC;
3430 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3431 }
3432 else
3433 {
3434 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3435 pCtx->eax = 0;
3436 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3437 }
3438 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3439 rc = PATMAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3440 AssertRC(rc);
3441
3442 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3443 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3444 return VINF_SUCCESS;
3445}
3446
3447/**
3448 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3449 *
3450 * @returns VBox status code.
3451 * @param pVM The VM to operate on.
3452 * @param pCpu Disassembly CPU structure ptr
3453 * @param pInstrGC Guest context point to privileged instruction
3454 * @param pPatch Patch record
3455 *
3456 */
3457static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3458{
3459 int rc = VERR_PATCHING_REFUSED;
3460 DISCPUSTATE cpu;
3461 RTRCPTR pTargetGC;
3462 PPATMPATCHREC pPatchFunction;
3463 uint32_t opsize;
3464 bool disret;
3465#ifdef LOG_ENABLED
3466 char szOutput[256];
3467#endif
3468
3469 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3470 Assert((pCpu->pCurInstr->opcode == OP_CALL || pCpu->pCurInstr->opcode == OP_JMP) && pCpu->opsize == SIZEOF_NEARJUMP32);
3471
3472 if ((pCpu->pCurInstr->opcode != OP_CALL && pCpu->pCurInstr->opcode != OP_JMP) || pCpu->opsize != SIZEOF_NEARJUMP32)
3473 {
3474 rc = VERR_PATCHING_REFUSED;
3475 goto failure;
3476 }
3477
3478 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3479 if (pTargetGC == 0)
3480 {
3481 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
3482 rc = VERR_PATCHING_REFUSED;
3483 goto failure;
3484 }
3485
3486 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3487 if (pPatchFunction == NULL)
3488 {
3489 for(;;)
3490 {
3491 /* It could be an indirect call (call -> jmp dest).
3492 * Note that it's dangerous to assume the jump will never change...
3493 */
3494 uint8_t *pTmpInstrHC;
3495
3496 pTmpInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pTargetGC);
3497 Assert(pTmpInstrHC);
3498 if (pTmpInstrHC == 0)
3499 break;
3500
3501 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3502 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pTargetGC, pTmpInstrHC, &opsize, NULL);
3503 if (disret == false || cpu.pCurInstr->opcode != OP_JMP)
3504 break;
3505
3506 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3507 if (pTargetGC == 0)
3508 {
3509 break;
3510 }
3511
3512 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3513 break;
3514 }
3515 if (pPatchFunction == 0)
3516 {
3517 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3518 rc = VERR_PATCHING_REFUSED;
3519 goto failure;
3520 }
3521 }
3522
3523 // make a copy of the guest code bytes that will be overwritten
3524 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3525
3526 rc = PGMPhysSimpleReadGCPtr(pVM, pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3527 AssertRC(rc);
3528
3529 /* Now replace the original call in the guest code */
3530 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), true);
3531 AssertRC(rc);
3532 if (RT_FAILURE(rc))
3533 goto failure;
3534
3535 /* Lowest and highest address for write monitoring. */
3536 pPatch->pInstrGCLowest = pInstrGC;
3537 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3538
3539#ifdef LOG_ENABLED
3540 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3541 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3542 Log(("Call patch: %s", szOutput));
3543#endif
3544
3545 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3546
3547 pPatch->uState = PATCH_ENABLED;
3548 return VINF_SUCCESS;
3549
3550failure:
3551 /* Turn this patch into a dummy. */
3552 pPatch->uState = PATCH_REFUSED;
3553
3554 return rc;
3555}
3556
3557/**
3558 * Replace the address in an MMIO instruction with the cached version.
3559 *
3560 * @returns VBox status code.
3561 * @param pVM The VM to operate on.
3562 * @param pInstrGC Guest context point to privileged instruction
3563 * @param pCpu Disassembly CPU structure ptr
3564 * @param pPatch Patch record
3565 *
3566 * @note returns failure if patching is not allowed or possible
3567 *
3568 */
3569static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3570{
3571 uint8_t *pPB;
3572 int rc = VERR_PATCHING_REFUSED;
3573#ifdef LOG_ENABLED
3574 DISCPUSTATE cpu;
3575 uint32_t opsize;
3576 bool disret;
3577 char szOutput[256];
3578#endif
3579
3580 Assert(pVM->patm.s.mmio.pCachedData);
3581 if (!pVM->patm.s.mmio.pCachedData)
3582 goto failure;
3583
3584 if (pCpu->param2.flags != USE_DISPLACEMENT32)
3585 goto failure;
3586
3587 pPB = pPatch->pPrivInstrHC;
3588
3589 /* Add relocation record for cached data access. */
3590 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3591 {
3592 Log(("Relocation failed for cached mmio address!!\n"));
3593 return VERR_PATCHING_REFUSED;
3594 }
3595#ifdef LOG_ENABLED
3596 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3597 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3598 Log(("MMIO patch old instruction: %s", szOutput));
3599#endif
3600
3601 /* Save original instruction. */
3602 rc = PGMPhysSimpleReadGCPtr(pVM, pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3603 AssertRC(rc);
3604
3605 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3606
3607 /* Replace address with that of the cached item. */
3608 rc = PGMPhysSimpleDirtyWriteGCPtr(pVM, pInstrGC + pCpu->opsize - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3609 AssertRC(rc);
3610 if (RT_FAILURE(rc))
3611 {
3612 goto failure;
3613 }
3614
3615#ifdef LOG_ENABLED
3616 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3617 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3618 Log(("MMIO patch: %s", szOutput));
3619#endif
3620 pVM->patm.s.mmio.pCachedData = 0;
3621 pVM->patm.s.mmio.GCPhys = 0;
3622 pPatch->uState = PATCH_ENABLED;
3623 return VINF_SUCCESS;
3624
3625failure:
3626 /* Turn this patch into a dummy. */
3627 pPatch->uState = PATCH_REFUSED;
3628
3629 return rc;
3630}
3631
3632
3633/**
3634 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3635 *
3636 * @returns VBox status code.
3637 * @param pVM The VM to operate on.
3638 * @param pInstrGC Guest context point to privileged instruction
3639 * @param pPatch Patch record
3640 *
3641 * @note returns failure if patching is not allowed or possible
3642 *
3643 */
3644static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3645{
3646 DISCPUSTATE cpu;
3647 uint32_t opsize;
3648 bool disret;
3649 uint8_t *pInstrHC;
3650#ifdef LOG_ENABLED
3651 char szOutput[256];
3652#endif
3653
3654 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3655
3656 /* Convert GC to HC address. */
3657 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3658 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3659
3660 /* Disassemble mmio instruction. */
3661 cpu.mode = pPatch->uOpMode;
3662 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
3663 if (disret == false)
3664 {
3665 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3666 return VERR_PATCHING_REFUSED;
3667 }
3668
3669 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
3670 if (opsize > MAX_INSTR_SIZE)
3671 return VERR_PATCHING_REFUSED;
3672 if (cpu.param2.flags != USE_DISPLACEMENT32)
3673 return VERR_PATCHING_REFUSED;
3674
3675 /* Add relocation record for cached data access. */
3676 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3677 {
3678 Log(("Relocation failed for cached mmio address!!\n"));
3679 return VERR_PATCHING_REFUSED;
3680 }
3681 /* Replace address with that of the cached item. */
3682 *(RTRCPTR *)&pInstrHC[cpu.opsize - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3683
3684 /* Lowest and highest address for write monitoring. */
3685 pPatch->pInstrGCLowest = pInstrGC;
3686 pPatch->pInstrGCHighest = pInstrGC + cpu.opsize;
3687
3688#ifdef LOG_ENABLED
3689 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3690 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3691 Log(("MMIO patch: %s", szOutput));
3692#endif
3693
3694 pVM->patm.s.mmio.pCachedData = 0;
3695 pVM->patm.s.mmio.GCPhys = 0;
3696 return VINF_SUCCESS;
3697}
3698
3699/**
3700 * Activates an int3 patch
3701 *
3702 * @returns VBox status code.
3703 * @param pVM The VM to operate on.
3704 * @param pPatch Patch record
3705 */
3706static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3707{
3708 uint8_t ASMInt3 = 0xCC;
3709 int rc;
3710
3711 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3712 Assert(pPatch->uState != PATCH_ENABLED);
3713
3714 /* Replace first opcode byte with 'int 3'. */
3715 rc = PGMPhysSimpleDirtyWriteGCPtr(pVM, pPatch->pPrivInstrGC, &ASMInt3, sizeof(ASMInt3));
3716 AssertRC(rc);
3717
3718 pPatch->cbPatchJump = sizeof(ASMInt3);
3719
3720 return rc;
3721}
3722
3723/**
3724 * Deactivates an int3 patch
3725 *
3726 * @returns VBox status code.
3727 * @param pVM The VM to operate on.
3728 * @param pPatch Patch record
3729 */
3730static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3731{
3732 uint8_t ASMInt3 = 0xCC;
3733 int rc;
3734
3735 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3736 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3737
3738 /* Restore first opcode byte. */
3739 rc = PGMPhysSimpleDirtyWriteGCPtr(pVM, pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3740 AssertRC(rc);
3741 return rc;
3742}
3743
3744/**
3745 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically in the guest context.
3746 *
3747 * @returns VBox status code.
3748 * @param pVM The VM to operate on.
3749 * @param pInstrGC Guest context point to privileged instruction
3750 * @param pInstrHC Host context point to privileged instruction
3751 * @param pCpu Disassembly CPU structure ptr
3752 * @param pPatch Patch record
3753 *
3754 * @note returns failure if patching is not allowed or possible
3755 *
3756 */
3757VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3758{
3759 uint8_t ASMInt3 = 0xCC;
3760 int rc;
3761
3762 /** @note Do not use patch memory here! It might called during patch installation too. */
3763
3764#ifdef LOG_ENABLED
3765 DISCPUSTATE cpu;
3766 char szOutput[256];
3767 uint32_t opsize;
3768
3769 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3770 PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3771 Log(("PATMR3PatchInstrInt3: %s", szOutput));
3772#endif
3773
3774 /* Save the original instruction. */
3775 rc = PGMPhysSimpleReadGCPtr(pVM, pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3776 AssertRC(rc);
3777 pPatch->cbPatchJump = sizeof(ASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3778
3779 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3780
3781 /* Replace first opcode byte with 'int 3'. */
3782 rc = patmActivateInt3Patch(pVM, pPatch);
3783 if (RT_FAILURE(rc))
3784 goto failure;
3785
3786 /* Lowest and highest address for write monitoring. */
3787 pPatch->pInstrGCLowest = pInstrGC;
3788 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3789
3790 pPatch->uState = PATCH_ENABLED;
3791 return VINF_SUCCESS;
3792
3793failure:
3794 /* Turn this patch into a dummy. */
3795 return VERR_PATCHING_REFUSED;
3796}
3797
3798#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3799/**
3800 * Patch a jump instruction at specified location
3801 *
3802 * @returns VBox status code.
3803 * @param pVM The VM to operate on.
3804 * @param pInstrGC Guest context point to privileged instruction
3805 * @param pInstrHC Host context point to privileged instruction
3806 * @param pCpu Disassembly CPU structure ptr
3807 * @param pPatchRec Patch record
3808 *
3809 * @note returns failure if patching is not allowed or possible
3810 *
3811 */
3812int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3813{
3814 PPATCHINFO pPatch = &pPatchRec->patch;
3815 int rc = VERR_PATCHING_REFUSED;
3816#ifdef LOG_ENABLED
3817 bool disret;
3818 DISCPUSTATE cpu;
3819 uint32_t opsize;
3820 char szOutput[256];
3821#endif
3822
3823 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3824 pPatch->uCurPatchOffset = 0;
3825 pPatch->cbPatchBlockSize = 0;
3826 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3827
3828 /*
3829 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3830 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3831 */
3832 switch (pCpu->pCurInstr->opcode)
3833 {
3834 case OP_JO:
3835 case OP_JNO:
3836 case OP_JC:
3837 case OP_JNC:
3838 case OP_JE:
3839 case OP_JNE:
3840 case OP_JBE:
3841 case OP_JNBE:
3842 case OP_JS:
3843 case OP_JNS:
3844 case OP_JP:
3845 case OP_JNP:
3846 case OP_JL:
3847 case OP_JNL:
3848 case OP_JLE:
3849 case OP_JNLE:
3850 case OP_JMP:
3851 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3852 Assert(pCpu->param1.flags & USE_IMMEDIATE32_REL);
3853 if (!(pCpu->param1.flags & USE_IMMEDIATE32_REL))
3854 goto failure;
3855
3856 Assert(pCpu->opsize == SIZEOF_NEARJUMP32 || pCpu->opsize == SIZEOF_NEAR_COND_JUMP32);
3857 if (pCpu->opsize != SIZEOF_NEARJUMP32 && pCpu->opsize != SIZEOF_NEAR_COND_JUMP32)
3858 goto failure;
3859
3860 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->opsize))
3861 {
3862 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
3863 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
3864 rc = VERR_PATCHING_REFUSED;
3865 goto failure;
3866 }
3867
3868 break;
3869
3870 default:
3871 goto failure;
3872 }
3873
3874 // make a copy of the guest code bytes that will be overwritten
3875 Assert(pCpu->opsize <= sizeof(pPatch->aPrivInstr));
3876 Assert(pCpu->opsize >= SIZEOF_NEARJUMP32);
3877 pPatch->cbPatchJump = pCpu->opsize;
3878
3879 rc = PGMPhysSimpleReadGCPtr(pVM, pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3880 AssertRC(rc);
3881
3882 /* Now insert a jump in the guest code. */
3883 /*
3884 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
3885 * references the target instruction in the conflict patch.
3886 */
3887 RTRCPTR pJmpDest = PATMR3GuestGCPtrToPatchGCPtr(pVM, pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval);
3888
3889 AssertMsg(pJmpDest, ("PATMR3GuestGCPtrToPatchGCPtr failed for %RRv\n", pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval));
3890 pPatch->pPatchJumpDestGC = pJmpDest;
3891
3892 rc = patmGenJumpToPatch(pVM, pPatch, true);
3893 AssertRC(rc);
3894 if (RT_FAILURE(rc))
3895 goto failure;
3896
3897 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
3898
3899#ifdef LOG_ENABLED
3900 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3901 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3902 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
3903#endif
3904
3905 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3906
3907 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
3908
3909 /* Lowest and highest address for write monitoring. */
3910 pPatch->pInstrGCLowest = pInstrGC;
3911 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
3912
3913 pPatch->uState = PATCH_ENABLED;
3914 return VINF_SUCCESS;
3915
3916failure:
3917 /* Turn this cli patch into a dummy. */
3918 pPatch->uState = PATCH_REFUSED;
3919
3920 return rc;
3921}
3922#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
3923
3924
3925/**
3926 * Gives hint to PATM about supervisor guest instructions
3927 *
3928 * @returns VBox status code.
3929 * @param pVM The VM to operate on.
3930 * @param pInstr Guest context point to privileged instruction
3931 * @param flags Patch flags
3932 */
3933VMMR3DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
3934{
3935 Assert(pInstrGC);
3936 Assert(flags == PATMFL_CODE32);
3937
3938 Log(("PATMR3AddHint %RRv\n", pInstrGC));
3939 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
3940}
3941
3942/**
3943 * Patch privileged instruction at specified location
3944 *
3945 * @returns VBox status code.
3946 * @param pVM The VM to operate on.
3947 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
3948 * @param flags Patch flags
3949 *
3950 * @note returns failure if patching is not allowed or possible
3951 */
3952VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
3953{
3954 DISCPUSTATE cpu;
3955 R3PTRTYPE(uint8_t *) pInstrHC;
3956 uint32_t opsize;
3957 PPATMPATCHREC pPatchRec;
3958 PCPUMCTX pCtx = 0;
3959 bool disret;
3960 int rc;
3961
3962 if (!pVM || pInstrGC == 0 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
3963 {
3964 AssertFailed();
3965 return VERR_INVALID_PARAMETER;
3966 }
3967
3968 if (PATMIsEnabled(pVM) == false)
3969 return VERR_PATCHING_REFUSED;
3970
3971 /* Test for patch conflict only with patches that actually change guest code. */
3972 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
3973 {
3974 PPATCHINFO pConflictPatch = PATMFindActivePatchByEntrypoint(pVM, pInstrGC);
3975 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
3976 if (pConflictPatch != 0)
3977 return VERR_PATCHING_REFUSED;
3978 }
3979
3980 if (!(flags & PATMFL_CODE32))
3981 {
3982 /** @todo Only 32 bits code right now */
3983 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
3984 return VERR_NOT_IMPLEMENTED;
3985 }
3986
3987 /* We ran out of patch memory; don't bother anymore. */
3988 if (pVM->patm.s.fOutOfMemory == true)
3989 return VERR_PATCHING_REFUSED;
3990
3991 /* Make sure the code selector is wide open; otherwise refuse. */
3992 pCtx = CPUMQueryGuestCtxPtr(pVM);
3993 if (CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)) == 0)
3994 {
3995 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
3996 if (pInstrGCFlat != pInstrGC)
3997 {
3998 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
3999 return VERR_PATCHING_REFUSED;
4000 }
4001 }
4002
4003 /** @note the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4004 if (!(flags & PATMFL_GUEST_SPECIFIC))
4005 {
4006 /* New code. Make sure CSAM has a go at it first. */
4007 CSAMR3CheckCode(pVM, pInstrGC);
4008 }
4009
4010 /** @note obsolete */
4011 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4012 && (flags & PATMFL_MMIO_ACCESS))
4013 {
4014 RTRCUINTPTR offset;
4015 void *pvPatchCoreOffset;
4016
4017 /* Find the patch record. */
4018 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4019 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4020 if (pvPatchCoreOffset == NULL)
4021 {
4022 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4023 return VERR_PATCH_NOT_FOUND; //fatal error
4024 }
4025 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4026
4027 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4028 }
4029
4030 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4031
4032 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4033 if (pPatchRec)
4034 {
4035 Assert(!(flags & PATMFL_TRAMPOLINE));
4036
4037 /* Hints about existing patches are ignored. */
4038 if (flags & PATMFL_INSTR_HINT)
4039 return VERR_PATCHING_REFUSED;
4040
4041 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4042 {
4043 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4044 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4045 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4046 }
4047
4048 if (pPatchRec->patch.uState == PATCH_DISABLED)
4049 {
4050 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4051 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4052 {
4053 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4054 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4055 }
4056 else
4057 Log(("Enabling patch %RRv again\n", pInstrGC));
4058
4059 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4060 rc = PATMR3EnablePatch(pVM, pInstrGC);
4061 if (RT_SUCCESS(rc))
4062 return VWRN_PATCH_ENABLED;
4063
4064 return rc;
4065 }
4066 if ( pPatchRec->patch.uState == PATCH_ENABLED
4067 || pPatchRec->patch.uState == PATCH_DIRTY)
4068 {
4069 /*
4070 * The patch might have been overwritten.
4071 */
4072 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4073 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4074 {
4075 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4076 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4077 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4078 {
4079 if (flags & PATMFL_IDTHANDLER)
4080 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4081
4082 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4083 }
4084 }
4085 rc = PATMR3RemovePatch(pVM, pInstrGC);
4086 if (RT_FAILURE(rc))
4087 return VERR_PATCHING_REFUSED;
4088 }
4089 else
4090 {
4091 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4092 /* already tried it once! */
4093 return VERR_PATCHING_REFUSED;
4094 }
4095 }
4096
4097 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4098 if (RT_FAILURE(rc))
4099 {
4100 Log(("Out of memory!!!!\n"));
4101 return VERR_NO_MEMORY;
4102 }
4103 pPatchRec->Core.Key = pInstrGC;
4104 pPatchRec->patch.uState = PATCH_REFUSED; //default
4105 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4106 Assert(rc);
4107
4108 RTGCPHYS GCPhys;
4109 rc = PGMGstGetPage(pVM, pInstrGC, NULL, &GCPhys);
4110 if (rc != VINF_SUCCESS)
4111 {
4112 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4113 return rc;
4114 }
4115 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4116 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4117 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4118 {
4119 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4120 return VERR_PATCHING_REFUSED;
4121 }
4122 GCPhys = GCPhys + (pInstrGC & PAGE_OFFSET_MASK);
4123 rc = PGMPhysGCPhys2HCPtr(pVM, GCPhys, MAX_INSTR_SIZE, (void **)&pInstrHC);
4124 AssertRCReturn(rc, rc);
4125
4126 pPatchRec->patch.pPrivInstrHC = pInstrHC;
4127 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4128 pPatchRec->patch.flags = flags;
4129 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4130
4131 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4132 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4133
4134 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4135 {
4136 /*
4137 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4138 */
4139 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4140 if (pPatchNear)
4141 {
4142 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4143 {
4144 Log(("Dangerous patch; would overwrite the ususable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4145
4146 pPatchRec->patch.uState = PATCH_UNUSABLE;
4147 /*
4148 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4149 */
4150 return VERR_PATCHING_REFUSED;
4151 }
4152 }
4153 }
4154
4155 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4156 if (pPatchRec->patch.pTempInfo == 0)
4157 {
4158 Log(("Out of memory!!!!\n"));
4159 return VERR_NO_MEMORY;
4160 }
4161
4162 cpu.mode = pPatchRec->patch.uOpMode;
4163 disret = PATMR3DISInstr(pVM, &pPatchRec->patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
4164 if (disret == false)
4165 {
4166 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4167 return VERR_PATCHING_REFUSED;
4168 }
4169
4170 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
4171 if (opsize > MAX_INSTR_SIZE)
4172 {
4173 return VERR_PATCHING_REFUSED;
4174 }
4175
4176 pPatchRec->patch.cbPrivInstr = opsize;
4177 pPatchRec->patch.opcode = cpu.pCurInstr->opcode;
4178
4179 /* Restricted hinting for now. */
4180 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->opcode == OP_CLI);
4181
4182 /* Allocate statistics slot */
4183 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4184 {
4185 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4186 }
4187 else
4188 {
4189 Log(("WARNING: Patch index wrap around!!\n"));
4190 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4191 }
4192
4193 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4194 {
4195 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec);
4196 }
4197 else
4198 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4199 {
4200 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec);
4201 }
4202 else
4203 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4204 {
4205 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4206 }
4207 else
4208 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4209 {
4210 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &pPatchRec->patch);
4211 }
4212 else
4213 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4214 {
4215 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4216 }
4217 else
4218 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4219 {
4220 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &pPatchRec->patch);
4221 }
4222 else
4223 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4224 {
4225 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4226 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4227
4228 rc = patmIdtHandler(pVM, pInstrGC, pInstrHC, opsize, pPatchRec);
4229#ifdef VBOX_WITH_STATISTICS
4230 if ( rc == VINF_SUCCESS
4231 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4232 {
4233 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4234 }
4235#endif
4236 }
4237 else
4238 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4239 {
4240 switch (cpu.pCurInstr->opcode)
4241 {
4242 case OP_SYSENTER:
4243 case OP_PUSH:
4244 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4245 if (rc == VINF_SUCCESS)
4246 {
4247 if (rc == VINF_SUCCESS)
4248 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4249 return rc;
4250 }
4251 break;
4252
4253 default:
4254 rc = VERR_NOT_IMPLEMENTED;
4255 break;
4256 }
4257 }
4258 else
4259 {
4260 switch (cpu.pCurInstr->opcode)
4261 {
4262 case OP_SYSENTER:
4263 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4264 if (rc == VINF_SUCCESS)
4265 {
4266 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4267 return VINF_SUCCESS;
4268 }
4269 break;
4270
4271#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4272 case OP_JO:
4273 case OP_JNO:
4274 case OP_JC:
4275 case OP_JNC:
4276 case OP_JE:
4277 case OP_JNE:
4278 case OP_JBE:
4279 case OP_JNBE:
4280 case OP_JS:
4281 case OP_JNS:
4282 case OP_JP:
4283 case OP_JNP:
4284 case OP_JL:
4285 case OP_JNL:
4286 case OP_JLE:
4287 case OP_JNLE:
4288 case OP_JECXZ:
4289 case OP_LOOP:
4290 case OP_LOOPNE:
4291 case OP_LOOPE:
4292 case OP_JMP:
4293 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4294 {
4295 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4296 break;
4297 }
4298 return VERR_NOT_IMPLEMENTED;
4299#endif
4300
4301 case OP_PUSHF:
4302 case OP_CLI:
4303 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4304 rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->opcode, opsize, pPatchRec);
4305 break;
4306
4307 case OP_STR:
4308 case OP_SGDT:
4309 case OP_SLDT:
4310 case OP_SIDT:
4311 case OP_CPUID:
4312 case OP_LSL:
4313 case OP_LAR:
4314 case OP_SMSW:
4315 case OP_VERW:
4316 case OP_VERR:
4317 case OP_IRET:
4318 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4319 break;
4320
4321 default:
4322 return VERR_NOT_IMPLEMENTED;
4323 }
4324 }
4325
4326 if (rc != VINF_SUCCESS)
4327 {
4328 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4329 {
4330 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4331 pPatchRec->patch.nrPatch2GuestRecs = 0;
4332 }
4333 pVM->patm.s.uCurrentPatchIdx--;
4334 }
4335 else
4336 {
4337 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4338 AssertRCReturn(rc, rc);
4339
4340 /* Keep track upper and lower boundaries of patched instructions */
4341 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4342 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4343 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4344 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4345
4346 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4347 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4348
4349 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4350 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4351
4352 rc = VINF_SUCCESS;
4353
4354 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4355 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4356 {
4357 rc = PATMR3DisablePatch(pVM, pInstrGC);
4358 AssertRCReturn(rc, rc);
4359 }
4360
4361#ifdef VBOX_WITH_STATISTICS
4362 /* Register statistics counter */
4363 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4364 {
4365 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4366 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4367#ifndef DEBUG_sandervl
4368 /* Full breakdown for the GUI. */
4369 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4370 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4371 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4372 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4373 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4374 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4375 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4376 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4377 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4378 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4379 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4380 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4381 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4382 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4383 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4384 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4385#endif
4386 }
4387#endif
4388 }
4389 return rc;
4390}
4391
4392/**
4393 * Query instruction size
4394 *
4395 * @returns VBox status code.
4396 * @param pVM The VM to operate on.
4397 * @param pPatch Patch record
4398 * @param pInstrGC Instruction address
4399 */
4400static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4401{
4402 uint8_t *pInstrHC;
4403
4404 int rc = PGMPhysGCPtr2HCPtr(pVM, pInstrGC, (RTHCPTR *)&pInstrHC);
4405 if (rc == VINF_SUCCESS)
4406 {
4407 DISCPUSTATE cpu;
4408 bool disret;
4409 uint32_t opsize;
4410
4411 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4412 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL, PATMREAD_ORGCODE | PATMREAD_NOCHECK);
4413 if (disret)
4414 return opsize;
4415 }
4416 return 0;
4417}
4418
4419/**
4420 * Add patch to page record
4421 *
4422 * @returns VBox status code.
4423 * @param pVM The VM to operate on.
4424 * @param pPage Page address
4425 * @param pPatch Patch record
4426 */
4427int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4428{
4429 PPATMPATCHPAGE pPatchPage;
4430 int rc;
4431
4432 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4433
4434 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4435 if (pPatchPage)
4436 {
4437 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4438 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4439 {
4440 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4441 PPATCHINFO *paPatchOld = pPatchPage->aPatch;
4442
4443 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4444 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4445 if (RT_FAILURE(rc))
4446 {
4447 Log(("Out of memory!!!!\n"));
4448 return VERR_NO_MEMORY;
4449 }
4450 memcpy(pPatchPage->aPatch, paPatchOld, cMaxPatchesOld*sizeof(PPATCHINFO));
4451 MMHyperFree(pVM, paPatchOld);
4452 }
4453 pPatchPage->aPatch[pPatchPage->cCount] = pPatch;
4454 pPatchPage->cCount++;
4455 }
4456 else
4457 {
4458 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4459 if (RT_FAILURE(rc))
4460 {
4461 Log(("Out of memory!!!!\n"));
4462 return VERR_NO_MEMORY;
4463 }
4464 pPatchPage->Core.Key = pPage;
4465 pPatchPage->cCount = 1;
4466 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4467
4468 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4469 if (RT_FAILURE(rc))
4470 {
4471 Log(("Out of memory!!!!\n"));
4472 MMHyperFree(pVM, pPatchPage);
4473 return VERR_NO_MEMORY;
4474 }
4475 pPatchPage->aPatch[0] = pPatch;
4476
4477 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4478 Assert(rc);
4479 pVM->patm.s.cPageRecords++;
4480
4481 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4482 }
4483 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4484
4485 /* Get the closest guest instruction (from below) */
4486 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4487 Assert(pGuestToPatchRec);
4488 if (pGuestToPatchRec)
4489 {
4490 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4491 if ( pPatchPage->pLowestAddrGC == 0
4492 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4493 {
4494 RTRCUINTPTR offset;
4495
4496 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4497
4498 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4499 /* If we're too close to the page boundary, then make sure an instruction from the previous page doesn't cross the boundary itself. */
4500 if (offset && offset < MAX_INSTR_SIZE)
4501 {
4502 /* Get the closest guest instruction (from above) */
4503 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4504
4505 if (pGuestToPatchRec)
4506 {
4507 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4508 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4509 {
4510 pPatchPage->pLowestAddrGC = pPage;
4511 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4512 }
4513 }
4514 }
4515 }
4516 }
4517
4518 /* Get the closest guest instruction (from above) */
4519 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4520 Assert(pGuestToPatchRec);
4521 if (pGuestToPatchRec)
4522 {
4523 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4524 if ( pPatchPage->pHighestAddrGC == 0
4525 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4526 {
4527 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4528 /* Increase by instruction size. */
4529 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4530//// Assert(size);
4531 pPatchPage->pHighestAddrGC += size;
4532 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4533 }
4534 }
4535
4536 return VINF_SUCCESS;
4537}
4538
4539/**
4540 * Remove patch from page record
4541 *
4542 * @returns VBox status code.
4543 * @param pVM The VM to operate on.
4544 * @param pPage Page address
4545 * @param pPatch Patch record
4546 */
4547int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4548{
4549 PPATMPATCHPAGE pPatchPage;
4550 int rc;
4551
4552 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4553 Assert(pPatchPage);
4554
4555 if (!pPatchPage)
4556 return VERR_INVALID_PARAMETER;
4557
4558 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4559
4560 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4561 if (pPatchPage->cCount > 1)
4562 {
4563 uint32_t i;
4564
4565 /* Used by multiple patches */
4566 for (i=0;i<pPatchPage->cCount;i++)
4567 {
4568 if (pPatchPage->aPatch[i] == pPatch)
4569 {
4570 pPatchPage->aPatch[i] = 0;
4571 break;
4572 }
4573 }
4574 /* close the gap between the remaining pointers. */
4575 if (i < pPatchPage->cCount - 1)
4576 {
4577 memcpy(&pPatchPage->aPatch[i], &pPatchPage->aPatch[i+1], sizeof(PPATCHINFO)*(pPatchPage->cCount - (i+1)));
4578 }
4579 AssertMsg(i < pPatchPage->cCount, ("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4580
4581 pPatchPage->cCount--;
4582 }
4583 else
4584 {
4585 PPATMPATCHPAGE pPatchNode;
4586
4587 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4588
4589 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4590 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4591 Assert(pPatchNode && pPatchNode == pPatchPage);
4592
4593 Assert(pPatchPage->aPatch);
4594 rc = MMHyperFree(pVM, pPatchPage->aPatch);
4595 AssertRC(rc);
4596 rc = MMHyperFree(pVM, pPatchPage);
4597 AssertRC(rc);
4598 pVM->patm.s.cPageRecords--;
4599 }
4600 return VINF_SUCCESS;
4601}
4602
4603/**
4604 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4605 *
4606 * @returns VBox status code.
4607 * @param pVM The VM to operate on.
4608 * @param pPatch Patch record
4609 */
4610int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4611{
4612 int rc;
4613 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4614
4615 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4616 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4617 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4618
4619 /** @todo optimize better (large gaps between current and next used page) */
4620 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4621 {
4622 /* Get the closest guest instruction (from above) */
4623 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4624 if ( pGuestToPatchRec
4625 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4626 )
4627 {
4628 /* Code in page really patched -> add record */
4629 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4630 AssertRC(rc);
4631 }
4632 }
4633 pPatch->flags |= PATMFL_CODE_MONITORED;
4634 return VINF_SUCCESS;
4635}
4636
4637/**
4638 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4639 *
4640 * @returns VBox status code.
4641 * @param pVM The VM to operate on.
4642 * @param pPatch Patch record
4643 */
4644int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4645{
4646 int rc;
4647 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4648
4649 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4650 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4651 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4652
4653 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4654 {
4655 /* Get the closest guest instruction (from above) */
4656 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4657 if ( pGuestToPatchRec
4658 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4659 )
4660 {
4661 /* Code in page really patched -> remove record */
4662 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4663 AssertRC(rc);
4664 }
4665 }
4666 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4667 return VINF_SUCCESS;
4668}
4669
4670/**
4671 * Notifies PATM about a (potential) write to code that has been patched.
4672 *
4673 * @returns VBox status code.
4674 * @param pVM The VM to operate on.
4675 * @param GCPtr GC pointer to write address
4676 * @param cbWrite Nr of bytes to write
4677 *
4678 */
4679VMMR3DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4680{
4681 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4682
4683 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4684
4685 Assert(VM_IS_EMT(pVM));
4686
4687 /* Quick boundary check */
4688 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4689 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4690 )
4691 return VINF_SUCCESS;
4692
4693 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4694
4695 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4696 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4697
4698 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4699 {
4700loop_start:
4701 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4702 if (pPatchPage)
4703 {
4704 uint32_t i;
4705 bool fValidPatchWrite = false;
4706
4707 /* Quick check to see if the write is in the patched part of the page */
4708 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4709 || pPatchPage->pHighestAddrGC < GCPtr)
4710 {
4711 break;
4712 }
4713
4714 for (i=0;i<pPatchPage->cCount;i++)
4715 {
4716 if (pPatchPage->aPatch[i])
4717 {
4718 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4719 RTRCPTR pPatchInstrGC;
4720 //unused: bool fForceBreak = false;
4721
4722 Assert(pPatchPage->aPatch[i]->flags & PATMFL_CODE_MONITORED);
4723 /** @todo inefficient and includes redundant checks for multiple pages. */
4724 for (uint32_t j=0; j<cbWrite; j++)
4725 {
4726 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4727
4728 if ( pPatch->cbPatchJump
4729 && pGuestPtrGC >= pPatch->pPrivInstrGC
4730 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4731 {
4732 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4733 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4734 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4735 if (rc == VINF_SUCCESS)
4736 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4737 goto loop_start;
4738
4739 continue;
4740 }
4741
4742 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4743 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4744 if (!pPatchInstrGC)
4745 {
4746 RTRCPTR pClosestInstrGC;
4747 uint32_t size;
4748
4749 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4750 if (pPatchInstrGC)
4751 {
4752 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4753 Assert(pClosestInstrGC <= pGuestPtrGC);
4754 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4755 /* Check if this is not a write into a gap between two patches */
4756 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4757 pPatchInstrGC = 0;
4758 }
4759 }
4760 if (pPatchInstrGC)
4761 {
4762 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4763
4764 fValidPatchWrite = true;
4765
4766 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4767 Assert(pPatchToGuestRec);
4768 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4769 {
4770 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4771
4772 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4773 {
4774 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4775
4776 PATMR3MarkDirtyPatch(pVM, pPatch);
4777
4778 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4779 goto loop_start;
4780 }
4781 else
4782 {
4783 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4784 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4785
4786 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4787 pPatchToGuestRec->fDirty = true;
4788
4789 *pInstrHC = 0xCC;
4790
4791 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4792 }
4793 }
4794 /* else already marked dirty */
4795 }
4796 }
4797 }
4798 } /* for each patch */
4799
4800 if (fValidPatchWrite == false)
4801 {
4802 /* Write to a part of the page that either:
4803 * - doesn't contain any code (shared code/data); rather unlikely
4804 * - old code page that's no longer in active use.
4805 */
4806invalid_write_loop_start:
4807 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4808
4809 if (pPatchPage)
4810 {
4811 for (i=0;i<pPatchPage->cCount;i++)
4812 {
4813 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4814
4815 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
4816 {
4817 /** @note possibly dangerous assumption that all future writes will be harmless. */
4818 if (pPatch->flags & PATMFL_IDTHANDLER)
4819 {
4820 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4821
4822 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
4823 int rc = patmRemovePatchPages(pVM, pPatch);
4824 AssertRC(rc);
4825 }
4826 else
4827 {
4828 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4829 PATMR3MarkDirtyPatch(pVM, pPatch);
4830 }
4831 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4832 goto invalid_write_loop_start;
4833 }
4834 } /* for */
4835 }
4836 }
4837 }
4838 }
4839 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
4840 return VINF_SUCCESS;
4841
4842}
4843
4844/**
4845 * Disable all patches in a flushed page
4846 *
4847 * @returns VBox status code
4848 * @param pVM The VM to operate on.
4849 * @param addr GC address of the page to flush
4850 */
4851/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
4852 */
4853VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
4854{
4855 addr &= PAGE_BASE_GC_MASK;
4856
4857 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
4858 if (pPatchPage)
4859 {
4860 int i;
4861
4862 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
4863 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
4864 {
4865 if (pPatchPage->aPatch[i])
4866 {
4867 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4868
4869 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
4870 PATMR3MarkDirtyPatch(pVM, pPatch);
4871 }
4872 }
4873 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
4874 }
4875 return VINF_SUCCESS;
4876}
4877
4878/**
4879 * Checks if the instructions at the specified address has been patched already.
4880 *
4881 * @returns boolean, patched or not
4882 * @param pVM The VM to operate on.
4883 * @param pInstrGC Guest context pointer to instruction
4884 */
4885VMMR3DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
4886{
4887 PPATMPATCHREC pPatchRec;
4888 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4889 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
4890 return true;
4891 return false;
4892}
4893
4894/**
4895 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
4896 *
4897 * @returns VBox status code.
4898 * @param pVM The VM to operate on.
4899 * @param pInstrGC GC address of instr
4900 * @param pByte opcode byte pointer (OUT)
4901 *
4902 */
4903VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
4904{
4905 PPATMPATCHREC pPatchRec;
4906
4907 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
4908
4909 /* Shortcut. */
4910 if ( !PATMIsEnabled(pVM)
4911 || pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
4912 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
4913 {
4914 return VERR_PATCH_NOT_FOUND;
4915 }
4916
4917 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
4918 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
4919 if ( pPatchRec
4920 && pPatchRec->patch.uState == PATCH_ENABLED
4921 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
4922 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
4923 {
4924 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
4925 *pByte = pPatchRec->patch.aPrivInstr[offset];
4926
4927 if (pPatchRec->patch.cbPatchJump == 1)
4928 {
4929 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
4930 }
4931 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
4932 return VINF_SUCCESS;
4933 }
4934 return VERR_PATCH_NOT_FOUND;
4935}
4936
4937/**
4938 * Disable patch for privileged instruction at specified location
4939 *
4940 * @returns VBox status code.
4941 * @param pVM The VM to operate on.
4942 * @param pInstr Guest context point to privileged instruction
4943 *
4944 * @note returns failure if patching is not allowed or possible
4945 *
4946 */
4947VMMR3DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
4948{
4949 PPATMPATCHREC pPatchRec;
4950 PPATCHINFO pPatch;
4951
4952 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
4953 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4954 if (pPatchRec)
4955 {
4956 int rc = VINF_SUCCESS;
4957
4958 pPatch = &pPatchRec->patch;
4959
4960 /* Already disabled? */
4961 if (pPatch->uState == PATCH_DISABLED)
4962 return VINF_SUCCESS;
4963
4964 /* Clear the IDT entries for the patch we're disabling. */
4965 /** @note very important as we clear IF in the patch itself */
4966 /** @todo this needs to be changed */
4967 if (pPatch->flags & PATMFL_IDTHANDLER)
4968 {
4969 uint32_t iGate;
4970
4971 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
4972 if (iGate != (uint32_t)~0)
4973 {
4974 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
4975 if (++cIDTHandlersDisabled < 256)
4976 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
4977 }
4978 }
4979
4980 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
4981 if ( pPatch->pPatchBlockOffset
4982 && pPatch->uState == PATCH_ENABLED)
4983 {
4984 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
4985 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
4986 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
4987 }
4988
4989 /* IDT or function patches haven't changed any guest code. */
4990 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
4991 {
4992 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
4993 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
4994
4995 if (pPatch->uState != PATCH_REFUSED)
4996 {
4997 AssertMsg(pPatch->pPrivInstrHC, ("Invalid HC pointer?!? (%RRv)\n", pInstrGC));
4998 Assert(pPatch->cbPatchJump);
4999
5000 /** pPrivInstrHC is probably not valid anymore */
5001 rc = PGMPhysGCPtr2HCPtr(pVM, pPatchRec->patch.pPrivInstrGC, (PRTHCPTR)&pPatchRec->patch.pPrivInstrHC);
5002 if (rc == VINF_SUCCESS)
5003 {
5004 uint8_t temp[16];
5005
5006 Assert(pPatch->cbPatchJump < sizeof(temp));
5007
5008 /* Let's first check if the guest code is still the same. */
5009 rc = PGMPhysSimpleReadGCPtr(pVM, temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5010 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5011 if (rc == VINF_SUCCESS)
5012 {
5013 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5014
5015 if ( temp[0] != 0xE9 /* jmp opcode */
5016 || *(RTRCINTPTR *)(&temp[1]) != displ
5017 )
5018 {
5019 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5020 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5021 /* Remove it completely */
5022 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5023 rc = PATMR3RemovePatch(pVM, pInstrGC);
5024 AssertRC(rc);
5025 return VWRN_PATCH_REMOVED;
5026 }
5027 }
5028 patmRemoveJumpToPatch(pVM, pPatch);
5029
5030 }
5031 else
5032 {
5033 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5034 pPatch->uState = PATCH_DISABLE_PENDING;
5035 }
5036 }
5037 else
5038 {
5039 AssertMsgFailed(("Patch was refused!\n"));
5040 return VERR_PATCH_ALREADY_DISABLED;
5041 }
5042 }
5043 else
5044 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5045 {
5046 uint8_t temp[16];
5047
5048 Assert(pPatch->cbPatchJump < sizeof(temp));
5049
5050 /* Let's first check if the guest code is still the same. */
5051 rc = PGMPhysSimpleReadGCPtr(pVM, temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5052 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5053 if (rc == VINF_SUCCESS)
5054 {
5055 if (temp[0] != 0xCC)
5056 {
5057 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5058 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5059 /* Remove it completely */
5060 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5061 rc = PATMR3RemovePatch(pVM, pInstrGC);
5062 AssertRC(rc);
5063 return VWRN_PATCH_REMOVED;
5064 }
5065 patmDeactivateInt3Patch(pVM, pPatch);
5066 }
5067 }
5068
5069 if (rc == VINF_SUCCESS)
5070 {
5071 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5072 if (pPatch->uState == PATCH_DISABLE_PENDING)
5073 {
5074 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5075 pPatch->uState = PATCH_UNUSABLE;
5076 }
5077 else
5078 if (pPatch->uState != PATCH_DIRTY)
5079 {
5080 pPatch->uOldState = pPatch->uState;
5081 pPatch->uState = PATCH_DISABLED;
5082 }
5083 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5084 }
5085
5086 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5087 return VINF_SUCCESS;
5088 }
5089 Log(("Patch not found!\n"));
5090 return VERR_PATCH_NOT_FOUND;
5091}
5092
5093/**
5094 * Permanently disable patch for privileged instruction at specified location
5095 *
5096 * @returns VBox status code.
5097 * @param pVM The VM to operate on.
5098 * @param pInstr Guest context instruction pointer
5099 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5100 * @param pConflictPatch Conflicting patch
5101 *
5102 */
5103static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5104{
5105#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5106 PATCHINFO patch = {0};
5107 DISCPUSTATE cpu;
5108 R3PTRTYPE(uint8_t *) pInstrHC;
5109 uint32_t opsize;
5110 bool disret;
5111 int rc;
5112
5113 pInstrHC = PATMGCVirtToHCVirt(pVM, &patch, pInstrGC);
5114 cpu.mode = (pConflictPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5115 disret = PATMR3DISInstr(pVM, &patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
5116 /*
5117 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5118 * with one that jumps right into the conflict patch.
5119 * Otherwise we must disable the conflicting patch to avoid serious problems.
5120 */
5121 if ( disret == true
5122 && (pConflictPatch->flags & PATMFL_CODE32)
5123 && (cpu.pCurInstr->opcode == OP_JMP || (cpu.pCurInstr->optype & OPTYPE_COND_CONTROLFLOW))
5124 && (cpu.param1.flags & USE_IMMEDIATE32_REL))
5125 {
5126 /* Hint patches must be enabled first. */
5127 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5128 {
5129 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5130 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5131 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5132 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5133 /* Enabling might fail if the patched code has changed in the meantime. */
5134 if (rc != VINF_SUCCESS)
5135 return rc;
5136 }
5137
5138 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5139 if (RT_SUCCESS(rc))
5140 {
5141 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5142 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5143 return VINF_SUCCESS;
5144 }
5145 }
5146#endif
5147
5148 if (pConflictPatch->opcode == OP_CLI)
5149 {
5150 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5151 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5152 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5153 if (rc == VWRN_PATCH_REMOVED)
5154 return VINF_SUCCESS;
5155 if (RT_SUCCESS(rc))
5156 {
5157 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5158 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5159 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5160 if (rc == VERR_PATCH_NOT_FOUND)
5161 return VINF_SUCCESS; /* removed already */
5162
5163 AssertRC(rc);
5164 if (RT_SUCCESS(rc))
5165 {
5166 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5167 return VINF_SUCCESS;
5168 }
5169 }
5170 /* else turned into unusable patch (see below) */
5171 }
5172 else
5173 {
5174 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5175 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5176 if (rc == VWRN_PATCH_REMOVED)
5177 return VINF_SUCCESS;
5178 }
5179
5180 /* No need to monitor the code anymore. */
5181 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5182 {
5183 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5184 AssertRC(rc);
5185 }
5186 pConflictPatch->uState = PATCH_UNUSABLE;
5187 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5188 return VERR_PATCH_DISABLED;
5189}
5190
5191/**
5192 * Enable patch for privileged instruction at specified location
5193 *
5194 * @returns VBox status code.
5195 * @param pVM The VM to operate on.
5196 * @param pInstr Guest context point to privileged instruction
5197 *
5198 * @note returns failure if patching is not allowed or possible
5199 *
5200 */
5201VMMR3DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5202{
5203 PPATMPATCHREC pPatchRec;
5204 PPATCHINFO pPatch;
5205
5206 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5207 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5208 if (pPatchRec)
5209 {
5210 int rc = VINF_SUCCESS;
5211
5212 pPatch = &pPatchRec->patch;
5213
5214 if (pPatch->uState == PATCH_DISABLED)
5215 {
5216 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5217 {
5218 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5219 /** @todo -> pPrivInstrHC is probably not valid anymore */
5220 rc = PGMPhysGCPtr2HCPtr(pVM, pPatchRec->patch.pPrivInstrGC, (PRTHCPTR)&pPatchRec->patch.pPrivInstrHC);
5221 if (rc == VINF_SUCCESS)
5222 {
5223#ifdef DEBUG
5224 DISCPUSTATE cpu;
5225 char szOutput[256];
5226 uint32_t opsize, i = 0;
5227#endif
5228 uint8_t temp[16];
5229
5230 Assert(pPatch->cbPatchJump < sizeof(temp));
5231
5232 // let's first check if the guest code is still the same
5233 int rc = PGMPhysSimpleReadGCPtr(pVM, temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5234 AssertRC(rc);
5235
5236 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5237 {
5238 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5239 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5240 /* Remove it completely */
5241 rc = PATMR3RemovePatch(pVM, pInstrGC);
5242 AssertRC(rc);
5243 return VERR_PATCH_NOT_FOUND;
5244 }
5245
5246 rc = patmGenJumpToPatch(pVM, pPatch, false);
5247 AssertRC(rc);
5248 if (RT_FAILURE(rc))
5249 return rc;
5250
5251#ifdef DEBUG
5252 bool disret;
5253 i = 0;
5254 while(i < pPatch->cbPatchJump)
5255 {
5256 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5257 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
5258 Log(("Renewed patch instr: %s", szOutput));
5259 i += opsize;
5260 }
5261#endif
5262 }
5263 }
5264 else
5265 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5266 {
5267 uint8_t temp[16];
5268
5269 Assert(pPatch->cbPatchJump < sizeof(temp));
5270
5271 /* Let's first check if the guest code is still the same. */
5272 int rc = PGMPhysSimpleReadGCPtr(pVM, temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5273 AssertRC(rc);
5274
5275 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5276 {
5277 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5278 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5279 rc = PATMR3RemovePatch(pVM, pInstrGC);
5280 AssertRC(rc);
5281 return VERR_PATCH_NOT_FOUND;
5282 }
5283
5284 rc = patmActivateInt3Patch(pVM, pPatch);
5285 if (RT_FAILURE(rc))
5286 return rc;
5287 }
5288
5289 pPatch->uState = pPatch->uOldState; //restore state
5290
5291 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5292 if (pPatch->pPatchBlockOffset)
5293 {
5294 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5295 }
5296
5297 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5298 }
5299 else
5300 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5301
5302 return rc;
5303 }
5304 return VERR_PATCH_NOT_FOUND;
5305}
5306
5307/**
5308 * Remove patch for privileged instruction at specified location
5309 *
5310 * @returns VBox status code.
5311 * @param pVM The VM to operate on.
5312 * @param pPatchRec Patch record
5313 * @param fForceRemove Remove *all* patches
5314 */
5315int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5316{
5317 PPATCHINFO pPatch;
5318
5319 pPatch = &pPatchRec->patch;
5320
5321 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5322 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5323 {
5324 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5325 return VERR_ACCESS_DENIED;
5326 }
5327 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5328
5329 /** @note NEVER EVER REUSE PATCH MEMORY */
5330 /** @note PATMR3DisablePatch put a breakpoint (0xCC) at the entry of this patch */
5331
5332 if (pPatchRec->patch.pPatchBlockOffset)
5333 {
5334 PAVLOU32NODECORE pNode;
5335
5336 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5337 Assert(pNode);
5338 }
5339
5340 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5341 {
5342 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5343 AssertRC(rc);
5344 }
5345
5346#ifdef VBOX_WITH_STATISTICS
5347 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5348 {
5349 STAMR3Deregister(pVM, &pPatchRec->patch);
5350#ifndef DEBUG_sandervl
5351 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5352 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5353 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5354 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5355 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5356 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5357 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5358 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5359 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5360 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5361 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5362 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5363 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5364 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5365#endif
5366 }
5367#endif
5368
5369 /** @note no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5370 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5371 pPatch->nrPatch2GuestRecs = 0;
5372 Assert(pPatch->Patch2GuestAddrTree == 0);
5373
5374 patmEmptyTree(pVM, &pPatch->FixupTree);
5375 pPatch->nrFixups = 0;
5376 Assert(pPatch->FixupTree == 0);
5377
5378 if (pPatchRec->patch.pTempInfo)
5379 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5380
5381 /** @note might fail, because it has already been removed (e.g. during reset). */
5382 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5383
5384 /* Free the patch record */
5385 MMHyperFree(pVM, pPatchRec);
5386 return VINF_SUCCESS;
5387}
5388
5389/**
5390 * Attempt to refresh the patch by recompiling its entire code block
5391 *
5392 * @returns VBox status code.
5393 * @param pVM The VM to operate on.
5394 * @param pPatchRec Patch record
5395 */
5396int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5397{
5398 PPATCHINFO pPatch;
5399 int rc;
5400 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5401
5402 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5403
5404 pPatch = &pPatchRec->patch;
5405 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5406 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5407 {
5408 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist\n"));
5409 return VERR_PATCHING_REFUSED;
5410 }
5411
5412 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5413
5414 rc = PATMR3DisablePatch(pVM, pInstrGC);
5415 AssertRC(rc);
5416
5417 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5418 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5419#ifdef VBOX_WITH_STATISTICS
5420 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5421 {
5422 STAMR3Deregister(pVM, &pPatchRec->patch);
5423#ifndef DEBUG_sandervl
5424 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5425 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5426 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5427 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5428 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5429 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5430 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5431 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5432 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5433 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5434 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5435 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5436 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5437 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5438#endif
5439 }
5440#endif
5441
5442 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5443
5444 /* Attempt to install a new patch. */
5445 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5446 if (RT_SUCCESS(rc))
5447 {
5448 RTRCPTR pPatchTargetGC;
5449 PPATMPATCHREC pNewPatchRec;
5450
5451 /* Determine target address in new patch */
5452 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5453 Assert(pPatchTargetGC);
5454 if (!pPatchTargetGC)
5455 {
5456 rc = VERR_PATCHING_REFUSED;
5457 goto failure;
5458 }
5459
5460 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5461 pPatch->uCurPatchOffset = 0;
5462
5463 /* insert jump to new patch in old patch block */
5464 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5465 if (RT_FAILURE(rc))
5466 goto failure;
5467
5468 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5469 Assert(pNewPatchRec); /* can't fail */
5470
5471 /* Remove old patch (only do that when everything is finished) */
5472 int rc2 = PATMRemovePatch(pVM, pPatchRec, true /* force removal */);
5473 AssertRC(rc2);
5474
5475 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5476 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5477
5478 LogRel(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5479 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5480
5481 /* Used by another patch, so don't remove it! */
5482 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5483 }
5484
5485failure:
5486 if (RT_FAILURE(rc))
5487 {
5488 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5489
5490 /* Remove the new inactive patch */
5491 rc = PATMR3RemovePatch(pVM, pInstrGC);
5492 AssertRC(rc);
5493
5494 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5495 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5496
5497 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5498 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5499 AssertRC(rc2);
5500
5501 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5502 }
5503 return rc;
5504}
5505
5506/**
5507 * Find patch for privileged instruction at specified location
5508 *
5509 * @returns Patch structure pointer if found; else NULL
5510 * @param pVM The VM to operate on.
5511 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5512 * @param fIncludeHints Include hinted patches or not
5513 *
5514 */
5515PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5516{
5517 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5518 /* if the patch is enabled, the pointer is not indentical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5519 if (pPatchRec)
5520 {
5521 if ( pPatchRec->patch.uState == PATCH_ENABLED
5522 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5523 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5524 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5525 {
5526 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5527 return &pPatchRec->patch;
5528 }
5529 else
5530 if ( fIncludeHints
5531 && pPatchRec->patch.uState == PATCH_DISABLED
5532 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5533 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5534 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5535 {
5536 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5537 return &pPatchRec->patch;
5538 }
5539 }
5540 return NULL;
5541}
5542
5543/**
5544 * Checks whether the GC address is inside a generated patch jump
5545 *
5546 * @returns true -> yes, false -> no
5547 * @param pVM The VM to operate on.
5548 * @param pAddr Guest context address
5549 * @param pPatchAddr Guest context patch address (if true)
5550 */
5551VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5552{
5553 RTRCPTR addr;
5554 PPATCHINFO pPatch;
5555
5556 if (PATMIsEnabled(pVM) == false)
5557 return false;
5558
5559 if (pPatchAddr == NULL)
5560 pPatchAddr = &addr;
5561
5562 *pPatchAddr = 0;
5563
5564 pPatch = PATMFindActivePatchByEntrypoint(pVM, pAddr);
5565 if (pPatch)
5566 {
5567 *pPatchAddr = pPatch->pPrivInstrGC;
5568 }
5569 return *pPatchAddr == 0 ? false : true;
5570}
5571
5572/**
5573 * Remove patch for privileged instruction at specified location
5574 *
5575 * @returns VBox status code.
5576 * @param pVM The VM to operate on.
5577 * @param pInstr Guest context point to privileged instruction
5578 *
5579 * @note returns failure if patching is not allowed or possible
5580 *
5581 */
5582VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5583{
5584 PPATMPATCHREC pPatchRec;
5585
5586 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5587 if (pPatchRec)
5588 {
5589 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5590 if (rc == VWRN_PATCH_REMOVED)
5591 return VINF_SUCCESS;
5592 return PATMRemovePatch(pVM, pPatchRec, false);
5593 }
5594 AssertFailed();
5595 return VERR_PATCH_NOT_FOUND;
5596}
5597
5598/**
5599 * Mark patch as dirty
5600 *
5601 * @returns VBox status code.
5602 * @param pVM The VM to operate on.
5603 * @param pPatch Patch record
5604 *
5605 * @note returns failure if patching is not allowed or possible
5606 *
5607 */
5608VMMR3DECL(int) PATMR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5609{
5610 if (pPatch->pPatchBlockOffset)
5611 {
5612 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5613 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5614 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5615 }
5616
5617 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5618 /* Put back the replaced instruction. */
5619 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5620 if (rc == VWRN_PATCH_REMOVED)
5621 return VINF_SUCCESS;
5622
5623 /** @note we don't restore patch pages for patches that are not enabled! */
5624 /** @note be careful when changing this behaviour!! */
5625
5626 /* The patch pages are no longer marked for self-modifying code detection */
5627 if (pPatch->flags & PATMFL_CODE_MONITORED)
5628 {
5629 int rc = patmRemovePatchPages(pVM, pPatch);
5630 AssertRCReturn(rc, rc);
5631 }
5632 pPatch->uState = PATCH_DIRTY;
5633
5634 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5635 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5636
5637 return VINF_SUCCESS;
5638}
5639
5640/**
5641 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5642 *
5643 * @returns VBox status code.
5644 * @param pVM The VM to operate on.
5645 * @param pPatch Patch block structure pointer
5646 * @param pPatchGC GC address in patch block
5647 */
5648RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5649{
5650 Assert(pPatch->Patch2GuestAddrTree);
5651 /* Get the closest record from below. */
5652 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5653 if (pPatchToGuestRec)
5654 return pPatchToGuestRec->pOrgInstrGC;
5655
5656 return 0;
5657}
5658
5659/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5660 *
5661 * @returns corresponding GC pointer in patch block
5662 * @param pVM The VM to operate on.
5663 * @param pPatch Current patch block pointer
5664 * @param pInstrGC Guest context pointer to privileged instruction
5665 *
5666 */
5667RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5668{
5669 if (pPatch->Guest2PatchAddrTree)
5670 {
5671 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
5672 if (pGuestToPatchRec)
5673 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5674 }
5675
5676 return 0;
5677}
5678
5679/* Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no identical match)
5680 *
5681 * @returns corresponding GC pointer in patch block
5682 * @param pVM The VM to operate on.
5683 * @param pPatch Current patch block pointer
5684 * @param pInstrGC Guest context pointer to privileged instruction
5685 *
5686 */
5687RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5688{
5689 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
5690 if (pGuestToPatchRec)
5691 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5692
5693 return 0;
5694}
5695
5696/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5697 *
5698 * @returns corresponding GC pointer in patch block
5699 * @param pVM The VM to operate on.
5700 * @param pInstrGC Guest context pointer to privileged instruction
5701 *
5702 */
5703VMMR3DECL(RTRCPTR) PATMR3GuestGCPtrToPatchGCPtr(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
5704{
5705 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5706 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
5707 {
5708 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
5709 }
5710 return 0;
5711}
5712
5713/**
5714 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5715 *
5716 * @returns original GC instruction pointer or 0 if not found
5717 * @param pVM The VM to operate on.
5718 * @param pPatchGC GC address in patch block
5719 * @param pEnmState State of the translated address (out)
5720 *
5721 */
5722VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
5723{
5724 PPATMPATCHREC pPatchRec;
5725 void *pvPatchCoreOffset;
5726 RTRCPTR pPrivInstrGC;
5727
5728 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
5729 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5730 if (pvPatchCoreOffset == 0)
5731 {
5732 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
5733 return 0;
5734 }
5735 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
5736 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
5737 if (pEnmState)
5738 {
5739 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
5740 || pPatchRec->patch.uState == PATCH_DIRTY
5741 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
5742 || pPatchRec->patch.uState == PATCH_UNUSABLE),
5743 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
5744
5745 if ( !pPrivInstrGC
5746 || pPatchRec->patch.uState == PATCH_UNUSABLE
5747 || pPatchRec->patch.uState == PATCH_REFUSED)
5748 {
5749 pPrivInstrGC = 0;
5750 *pEnmState = PATMTRANS_FAILED;
5751 }
5752 else
5753 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
5754 {
5755 *pEnmState = PATMTRANS_INHIBITIRQ;
5756 }
5757 else
5758 if ( pPatchRec->patch.uState == PATCH_ENABLED
5759 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
5760 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
5761 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5762 {
5763 *pEnmState = PATMTRANS_OVERWRITTEN;
5764 }
5765 else
5766 if (PATMFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
5767 {
5768 *pEnmState = PATMTRANS_OVERWRITTEN;
5769 }
5770 else
5771 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
5772 {
5773 *pEnmState = PATMTRANS_PATCHSTART;
5774 }
5775 else
5776 *pEnmState = PATMTRANS_SAFE;
5777 }
5778 return pPrivInstrGC;
5779}
5780
5781/**
5782 * Returns the GC pointer of the patch for the specified GC address
5783 *
5784 * @returns VBox status code.
5785 * @param pVM The VM to operate on.
5786 * @param pAddrGC Guest context address
5787 */
5788VMMR3DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
5789{
5790 PPATMPATCHREC pPatchRec;
5791
5792 // Find the patch record
5793 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
5794 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
5795 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
5796 return PATCHCODE_PTR_GC(&pPatchRec->patch);
5797
5798 return 0;
5799}
5800
5801/**
5802 * Attempt to recover dirty instructions
5803 *
5804 * @returns VBox status code.
5805 * @param pVM The VM to operate on.
5806 * @param pCtx CPU context
5807 * @param pPatch Patch record
5808 * @param pPatchToGuestRec Patch to guest address record
5809 * @param pEip GC pointer of trapping instruction
5810 */
5811static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
5812{
5813 DISCPUSTATE CpuOld, CpuNew;
5814 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
5815 int rc;
5816 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
5817 uint32_t cbDirty;
5818 PRECPATCHTOGUEST pRec;
5819
5820 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pPatchToGuestRec->pOrgInstrGC));
5821
5822 pRec = pPatchToGuestRec;
5823 pCurInstrGC = pPatchToGuestRec->pOrgInstrGC;
5824 pCurPatchInstrGC = pEip;
5825 cbDirty = 0;
5826 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5827
5828 /* Find all adjacent dirty instructions */
5829 while (true)
5830 {
5831 if (pRec->fJumpTarget)
5832 {
5833 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pPatchToGuestRec->pOrgInstrGC));
5834 pRec->fDirty = false;
5835 return VERR_PATCHING_REFUSED;
5836 }
5837
5838 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
5839 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5840 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
5841
5842 /* Only harmless instructions are acceptable. */
5843 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCurPatchInstrGC, &CpuOld, 0);
5844 if ( RT_FAILURE(rc)
5845 || !(CpuOld.pCurInstr->optype & OPTYPE_HARMLESS))
5846 break;
5847
5848#ifdef DEBUG
5849 char szBuf[256];
5850 szBuf[0] = '\0';
5851 DBGFR3DisasInstr(pVM, pCtx->cs, pCurPatchInstrGC, szBuf, sizeof(szBuf));
5852 Log(("DIRTY: %s\n", szBuf));
5853#endif
5854 /** Remove old lookup record. */
5855 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
5856
5857 pCurPatchInstrGC += CpuOld.opsize;
5858 cbDirty += CpuOld.opsize;
5859
5860 /* Mark as clean; if we fail we'll let it always fault. */
5861 pRec->fDirty = false;
5862
5863 /* Let's see if there's another dirty instruction right after. */
5864 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
5865 if (!pRec || !pRec->fDirty)
5866 break; /* no more dirty instructions */
5867
5868 /* In case of complex instructions the next guest instruction could be quite far off. */
5869 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
5870 }
5871
5872 if ( RT_SUCCESS(rc)
5873 && (CpuOld.pCurInstr->optype & OPTYPE_HARMLESS)
5874 )
5875 {
5876 uint32_t cbLeft;
5877
5878 pCurPatchInstrHC = pPatchInstrHC;
5879 pCurPatchInstrGC = pEip;
5880 cbLeft = cbDirty;
5881
5882 while (cbLeft && RT_SUCCESS(rc))
5883 {
5884 bool fValidInstr;
5885
5886 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCurInstrGC, &CpuNew, 0);
5887
5888 fValidInstr = !!(CpuNew.pCurInstr->optype & OPTYPE_HARMLESS);
5889 if ( !fValidInstr
5890 && (CpuNew.pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
5891 )
5892 {
5893 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
5894
5895 if ( pTargetGC >= pPatchToGuestRec->pOrgInstrGC
5896 && pTargetGC <= pPatchToGuestRec->pOrgInstrGC + cbDirty
5897 )
5898 {
5899 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
5900 fValidInstr = true;
5901 }
5902 }
5903
5904 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
5905 if ( rc == VINF_SUCCESS
5906 && CpuNew.opsize <= cbLeft /* must still fit */
5907 && fValidInstr
5908 )
5909 {
5910#ifdef DEBUG
5911 char szBuf[256];
5912 szBuf[0] = '\0';
5913 DBGFR3DisasInstr(pVM, pCtx->cs, pCurInstrGC, szBuf, sizeof(szBuf));
5914 Log(("NEW: %s\n", szBuf));
5915#endif
5916
5917 /* Copy the new instruction. */
5918 rc = PGMPhysSimpleReadGCPtr(pVM, pCurPatchInstrHC, pCurInstrGC, CpuNew.opsize);
5919 AssertRC(rc);
5920
5921 /* Add a new lookup record for the duplicated instruction. */
5922 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
5923 }
5924 else
5925 {
5926#ifdef DEBUG
5927 char szBuf[256];
5928 szBuf[0] = '\0';
5929 DBGFR3DisasInstr(pVM, pCtx->cs, pCurInstrGC, szBuf, sizeof(szBuf));
5930 Log(("NEW: %s (FAILED)\n", szBuf));
5931#endif
5932 /* Restore the old lookup record for the duplicated instruction. */
5933 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
5934
5935 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
5936 rc = VERR_PATCHING_REFUSED;
5937 break;
5938 }
5939 pCurInstrGC += CpuNew.opsize;
5940 pCurPatchInstrHC += CpuNew.opsize;
5941 pCurPatchInstrGC += CpuNew.opsize;
5942 cbLeft -= CpuNew.opsize;
5943 }
5944 }
5945 else
5946 rc = VERR_PATCHING_REFUSED;
5947
5948 if (RT_SUCCESS(rc))
5949 {
5950 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
5951 }
5952 else
5953 {
5954 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
5955 /* Mark the whole instruction stream with breakpoints. */
5956 memset(pPatchInstrHC, 0xCC, cbDirty);
5957
5958 if ( pVM->patm.s.fOutOfMemory == false
5959 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
5960 {
5961 rc = patmR3RefreshPatch(pVM, pPatch);
5962 if (RT_FAILURE(rc))
5963 {
5964 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
5965 }
5966 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
5967 rc = VERR_PATCHING_REFUSED;
5968 }
5969 }
5970 return rc;
5971}
5972
5973/**
5974 * Handle trap inside patch code
5975 *
5976 * @returns VBox status code.
5977 * @param pVM The VM to operate on.
5978 * @param pCtx CPU context
5979 * @param pEip GC pointer of trapping instruction
5980 * @param ppNewEip GC pointer to new instruction
5981 */
5982VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
5983{
5984 PPATMPATCHREC pPatch = 0;
5985 void *pvPatchCoreOffset;
5986 RTRCUINTPTR offset;
5987 RTRCPTR pNewEip;
5988 int rc ;
5989 PRECPATCHTOGUEST pPatchToGuestRec = 0;
5990
5991 pNewEip = 0;
5992 *ppNewEip = 0;
5993
5994 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
5995
5996 /* Find the patch record. */
5997 /** @note there might not be a patch to guest translation record (global function) */
5998 offset = pEip - pVM->patm.s.pPatchMemGC;
5999 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6000 if (pvPatchCoreOffset)
6001 {
6002 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6003
6004 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6005
6006 if (pPatch->patch.uState == PATCH_DIRTY)
6007 {
6008 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6009 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6010 {
6011 /* Function duplication patches set fPIF to 1 on entry */
6012 pVM->patm.s.pGCStateHC->fPIF = 1;
6013 }
6014 }
6015 else
6016 if (pPatch->patch.uState == PATCH_DISABLED)
6017 {
6018 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6019 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6020 {
6021 /* Function duplication patches set fPIF to 1 on entry */
6022 pVM->patm.s.pGCStateHC->fPIF = 1;
6023 }
6024 }
6025 else
6026 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6027 {
6028 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6029
6030 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6031 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6032 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6033 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6034 }
6035
6036 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6037 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6038
6039 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6040 pPatch->patch.cTraps++;
6041 PATM_STAT_FAULT_INC(&pPatch->patch);
6042 }
6043 else
6044 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6045
6046 /* Check if we were interrupted in PATM generated instruction code. */
6047 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6048 {
6049 DISCPUSTATE Cpu;
6050 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pEip, &Cpu, "PIF Trap: ");
6051 AssertRC(rc);
6052
6053 if ( rc == VINF_SUCCESS
6054 && ( Cpu.pCurInstr->opcode == OP_PUSHF
6055 || Cpu.pCurInstr->opcode == OP_PUSH
6056 || Cpu.pCurInstr->opcode == OP_CALL)
6057 )
6058 {
6059 uint64_t fFlags;
6060
6061 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6062
6063 if (Cpu.pCurInstr->opcode == OP_PUSH)
6064 {
6065 rc = PGMShwGetPage(pVM, pCtx->esp, &fFlags, NULL);
6066 if ( rc == VINF_SUCCESS
6067 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6068 {
6069 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6070
6071 /* Reset the PATM stack. */
6072 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6073
6074 pVM->patm.s.pGCStateHC->fPIF = 1;
6075
6076 Log(("Faulting push -> go back to the original instruction\n"));
6077
6078 /* continue at the original instruction */
6079 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6080 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6081 return VINF_SUCCESS;
6082 }
6083 }
6084
6085 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6086 rc = PGMShwModifyPage(pVM, pCtx->esp, 1, X86_PTE_RW, ~(uint64_t)X86_PTE_RW);
6087 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6088 if (rc == VINF_SUCCESS)
6089 {
6090
6091 /* The guest page *must* be present. */
6092 rc = PGMGstGetPage(pVM, pCtx->esp, &fFlags, NULL);
6093 if (rc == VINF_SUCCESS && (fFlags & X86_PTE_P))
6094 {
6095 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6096 return VINF_PATCH_CONTINUE;
6097 }
6098 }
6099 }
6100 else
6101 if (pPatch->patch.pPrivInstrGC == pNewEip)
6102 {
6103 /* Invalidated patch or first instruction overwritten.
6104 * We can ignore the fPIF state in this case.
6105 */
6106 /* Reset the PATM stack. */
6107 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6108
6109 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6110
6111 pVM->patm.s.pGCStateHC->fPIF = 1;
6112
6113 /* continue at the original instruction */
6114 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6115 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6116 return VINF_SUCCESS;
6117 }
6118
6119 char szBuf[256];
6120 szBuf[0] = '\0';
6121 DBGFR3DisasInstr(pVM, pCtx->cs, pEip, szBuf, sizeof(szBuf));
6122
6123 /* Very bad. We crashed in emitted code. Probably stack? */
6124 if (pPatch)
6125 {
6126 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6127 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%x fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVM), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6128 }
6129 else
6130 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6131 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVM), szBuf));
6132 EMR3FatalError(pVM, VERR_INTERNAL_ERROR);
6133 }
6134
6135 /* From here on, we must have a valid patch to guest translation. */
6136 if (pvPatchCoreOffset == 0)
6137 {
6138 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6139 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6140 return VERR_PATCH_NOT_FOUND; //fatal error
6141 }
6142
6143 /* Take care of dirty/changed instructions. */
6144 if (pPatchToGuestRec->fDirty)
6145 {
6146 Assert(pPatchToGuestRec->Core.Key == offset);
6147 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6148
6149 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6150 if (RT_SUCCESS(rc))
6151 {
6152 /* Retry the current instruction. */
6153 pNewEip = pEip;
6154 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6155 }
6156 else
6157 {
6158 /* Reset the PATM stack. */
6159 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6160
6161 rc = VINF_SUCCESS; /* Continue at original instruction. */
6162 }
6163
6164 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6165 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6166 return rc;
6167 }
6168
6169#ifdef VBOX_STRICT
6170 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6171 {
6172 DISCPUSTATE cpu;
6173 bool disret;
6174 uint32_t opsize;
6175
6176 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6177 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6178 if (disret && cpu.pCurInstr->opcode == OP_RETN)
6179 {
6180 RTRCPTR retaddr;
6181 PCPUMCTX pCtx;
6182
6183 pCtx = CPUMQueryGuestCtxPtr(pVM);
6184
6185 rc = PGMPhysSimpleReadGCPtr(pVM, &retaddr, pCtx->esp, sizeof(retaddr));
6186 AssertRC(rc);
6187
6188 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6189 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6190 }
6191 }
6192#endif
6193
6194 /* Return original address, correct by subtracting the CS base address. */
6195 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6196
6197 /* Reset the PATM stack. */
6198 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6199
6200 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6201 {
6202 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6203 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6204#ifdef VBOX_STRICT
6205 DISCPUSTATE cpu;
6206 bool disret;
6207 uint32_t opsize;
6208
6209 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6210 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_ORGCODE);
6211
6212 if (disret && (cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_INT3))
6213 {
6214 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6215 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6216
6217 Assert(cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_IRET);
6218 }
6219#endif
6220 EMSetInhibitInterruptsPC(pVM, pNewEip);
6221 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6222 }
6223
6224 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6225#ifdef LOG_ENABLED
6226 CPUMR3DisasmInstr(pVM, pCtx, pNewEip, "PATCHRET: ");
6227#endif
6228 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6229 {
6230 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6231 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6232 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6233 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6234 return VERR_PATCH_DISABLED;
6235 }
6236
6237#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6238 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6239 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6240 {
6241 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6242 //we are only wasting time, back out the patch
6243 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6244 pTrapRec->pNextPatchInstr = 0;
6245 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6246 return VERR_PATCH_DISABLED;
6247 }
6248#endif
6249
6250 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6251 return VINF_SUCCESS;
6252}
6253
6254
6255/**
6256 * Handle page-fault in monitored page
6257 *
6258 * @returns VBox status code.
6259 * @param pVM The VM to operate on.
6260 */
6261VMMR3DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6262{
6263 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6264
6265 addr &= PAGE_BASE_GC_MASK;
6266
6267 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6268 AssertRC(rc); NOREF(rc);
6269
6270 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6271 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6272 {
6273 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6274 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6275 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6276 if (rc == VWRN_PATCH_REMOVED)
6277 return VINF_SUCCESS;
6278
6279 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6280
6281 if (addr == pPatchRec->patch.pPrivInstrGC)
6282 addr++;
6283 }
6284
6285 for(;;)
6286 {
6287 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6288
6289 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6290 break;
6291
6292 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6293 {
6294 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6295 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6296 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6297 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6298 }
6299 addr = pPatchRec->patch.pPrivInstrGC + 1;
6300 }
6301
6302 pVM->patm.s.pvFaultMonitor = 0;
6303 return VINF_SUCCESS;
6304}
6305
6306
6307#ifdef VBOX_WITH_STATISTICS
6308
6309static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6310{
6311 if (pPatch->flags & PATMFL_SYSENTER)
6312 {
6313 return "SYSENT";
6314 }
6315 else
6316 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6317 {
6318 static char szTrap[16];
6319 uint32_t iGate;
6320
6321 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6322 if (iGate < 256)
6323 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6324 else
6325 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6326 return szTrap;
6327 }
6328 else
6329 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6330 return "DUPFUNC";
6331 else
6332 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6333 return "FUNCCALL";
6334 else
6335 if (pPatch->flags & PATMFL_TRAMPOLINE)
6336 return "TRAMP";
6337 else
6338 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6339}
6340
6341static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6342{
6343 switch(pPatch->uState)
6344 {
6345 case PATCH_ENABLED:
6346 return "ENA";
6347 case PATCH_DISABLED:
6348 return "DIS";
6349 case PATCH_DIRTY:
6350 return "DIR";
6351 case PATCH_UNUSABLE:
6352 return "UNU";
6353 case PATCH_REFUSED:
6354 return "REF";
6355 case PATCH_DISABLE_PENDING:
6356 return "DIP";
6357 default:
6358 AssertFailed();
6359 return " ";
6360 }
6361}
6362
6363/**
6364 * Resets the sample.
6365 * @param pVM The VM handle.
6366 * @param pvSample The sample registered using STAMR3RegisterCallback.
6367 */
6368static void patmResetStat(PVM pVM, void *pvSample)
6369{
6370 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6371 Assert(pPatch);
6372
6373 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6374 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6375}
6376
6377/**
6378 * Prints the sample into the buffer.
6379 *
6380 * @param pVM The VM handle.
6381 * @param pvSample The sample registered using STAMR3RegisterCallback.
6382 * @param pszBuf The buffer to print into.
6383 * @param cchBuf The size of the buffer.
6384 */
6385static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6386{
6387 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6388 Assert(pPatch);
6389
6390 Assert(pPatch->uState != PATCH_REFUSED);
6391 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6392
6393 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6394 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6395 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6396}
6397
6398/**
6399 * Returns the GC address of the corresponding patch statistics counter
6400 *
6401 * @returns Stat address
6402 * @param pVM The VM to operate on.
6403 * @param pPatch Patch structure
6404 */
6405RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6406{
6407 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6408 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6409}
6410
6411#endif /* VBOX_WITH_STATISTICS */
6412
6413#ifdef VBOX_WITH_DEBUGGER
6414/**
6415 * The '.patmoff' command.
6416 *
6417 * @returns VBox status.
6418 * @param pCmd Pointer to the command descriptor (as registered).
6419 * @param pCmdHlp Pointer to command helper functions.
6420 * @param pVM Pointer to the current VM (if any).
6421 * @param paArgs Pointer to (readonly) array of arguments.
6422 * @param cArgs Number of arguments in the array.
6423 */
6424static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
6425{
6426 /*
6427 * Validate input.
6428 */
6429 if (!pVM)
6430 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6431
6432 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6433 PATMR3AllowPatching(pVM, false);
6434 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6435}
6436
6437/**
6438 * The '.patmon' command.
6439 *
6440 * @returns VBox status.
6441 * @param pCmd Pointer to the command descriptor (as registered).
6442 * @param pCmdHlp Pointer to command helper functions.
6443 * @param pVM Pointer to the current VM (if any).
6444 * @param paArgs Pointer to (readonly) array of arguments.
6445 * @param cArgs Number of arguments in the array.
6446 */
6447static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
6448{
6449 /*
6450 * Validate input.
6451 */
6452 if (!pVM)
6453 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6454
6455 PATMR3AllowPatching(pVM, true);
6456 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6457 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6458}
6459#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette