VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/PATM.cpp@ 22147

Last change on this file since 22147 was 22147, checked in by vboxsync, 15 years ago

PATM: fixed assertion message

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 243.4 KB
Line 
1/* $Id: PATM.cpp 22147 2009-08-11 09:00:08Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * NOTE: Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20 * Clara, CA 95054 USA or visit http://www.sun.com if you need
21 * additional information or have any questions.
22 */
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_PATM
28#include <VBox/patm.h>
29#include <VBox/stam.h>
30#include <VBox/pgm.h>
31#include <VBox/cpum.h>
32#include <VBox/cpumdis.h>
33#include <VBox/iom.h>
34#include <VBox/sup.h>
35#include <VBox/mm.h>
36#include <VBox/ssm.h>
37#include <VBox/pdm.h>
38#include <VBox/trpm.h>
39#include <VBox/cfgm.h>
40#include <VBox/param.h>
41#include <VBox/selm.h>
42#include <iprt/avl.h>
43#include "PATMInternal.h"
44#include "PATMPatch.h"
45#include <VBox/vm.h>
46#include <VBox/csam.h>
47
48#include <VBox/dbg.h>
49#include <VBox/err.h>
50#include <VBox/log.h>
51#include <iprt/assert.h>
52#include <iprt/asm.h>
53#include <VBox/dis.h>
54#include <VBox/disopcode.h>
55
56#include <iprt/string.h>
57#include "PATMA.h"
58
59//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
60//#define PATM_DISABLE_ALL
61
62/*******************************************************************************
63* Internal Functions *
64*******************************************************************************/
65
66static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
67static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
68static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
69
70#ifdef LOG_ENABLED // keep gcc quiet
71static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
72#endif
73#ifdef VBOX_WITH_STATISTICS
74static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
75static void patmResetStat(PVM pVM, void *pvSample);
76static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
77#endif
78
79#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
80#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
81
82static int patmReinit(PVM pVM);
83static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
84
85#ifdef VBOX_WITH_DEBUGGER
86static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
87static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
88static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
89
90/** Command descriptors. */
91static const DBGCCMD g_aCmds[] =
92{
93 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, pResultDesc, fFlags, pfnHandler pszSyntax, ....pszDescription */
94 { "patmon", 0, 0, NULL, 0, NULL, 0, patmr3CmdOn, "", "Enable patching." },
95 { "patmoff", 0, 0, NULL, 0, NULL, 0, patmr3CmdOff, "", "Disable patching." },
96};
97#endif
98
99/* Don't want to break saved states, so put it here as a global variable. */
100static unsigned int cIDTHandlersDisabled = 0;
101
102/**
103 * Initializes the PATM.
104 *
105 * @returns VBox status code.
106 * @param pVM The VM to operate on.
107 */
108VMMR3DECL(int) PATMR3Init(PVM pVM)
109{
110 int rc;
111
112 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
113
114 /* These values can't change as they are hardcoded in patch code (old saved states!) */
115 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
116 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
117 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
118 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
119
120 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
121 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
122
123 /* Allocate patch memory and GC patch state memory. */
124 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
125 /* Add another page in case the generated code is much larger than expected. */
126 /** @todo bad safety precaution */
127 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
128 if (RT_FAILURE(rc))
129 {
130 Log(("MMHyperAlloc failed with %Rrc\n", rc));
131 return rc;
132 }
133 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
134
135 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
136 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
137 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
138
139 /*
140 * Hypervisor memory for GC status data (read/write)
141 *
142 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
143 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
144 *
145 */
146 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /** @note hardcoded dependencies on this exist. */
147 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
148 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
149
150 /* Hypervisor memory for patch statistics */
151 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
152 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
153
154 /* Memory for patch lookup trees. */
155 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
156 AssertRCReturn(rc, rc);
157 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
158
159#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
160 /* Check CFGM option. */
161 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
162 if (RT_FAILURE(rc))
163# ifdef PATM_DISABLE_ALL
164 pVM->fPATMEnabled = false;
165# else
166 pVM->fPATMEnabled = true;
167# endif
168#endif
169
170 rc = patmReinit(pVM);
171 AssertRC(rc);
172 if (RT_FAILURE(rc))
173 return rc;
174
175 /*
176 * Register save and load state notificators.
177 */
178 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
179 NULL, patmr3Save, NULL,
180 NULL, patmr3Load, NULL);
181 if (RT_FAILURE(rc))
182 {
183 AssertRC(rc);
184 return rc;
185 }
186
187#ifdef VBOX_WITH_DEBUGGER
188 /*
189 * Debugger commands.
190 */
191 static bool fRegisteredCmds = false;
192 if (!fRegisteredCmds)
193 {
194 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
195 if (RT_SUCCESS(rc))
196 fRegisteredCmds = true;
197 }
198#endif
199
200#ifdef VBOX_WITH_STATISTICS
201 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
202 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
203 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
204 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
205 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
206 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
207 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
208 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
209
210 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
211 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
212
213 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
214 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
215 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
216
217 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
218 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
219 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
220 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
221 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
222
223 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
224 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
225
226 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
227 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
228
229 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
230 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
231 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
232
233 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
234 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
235 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
236
237 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
238 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
239
240 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
241 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
242 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
243 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
244
245 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
246 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
247
248 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
249 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
250
251 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
252 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
253 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
254
255 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
256 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
257 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
258 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
259
260 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
261 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
262 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
263 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
264 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
265
266 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
267#endif /* VBOX_WITH_STATISTICS */
268
269 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
270 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
271 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
272 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
273 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
274 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
275 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
276 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
277
278 return rc;
279}
280
281/**
282 * Finalizes HMA page attributes.
283 *
284 * @returns VBox status code.
285 * @param pVM The VM handle.
286 */
287VMMR3DECL(int) PATMR3InitFinalize(PVM pVM)
288{
289 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
290 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
291 if (RT_FAILURE(rc))
292 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
293
294 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
295 if (RT_FAILURE(rc))
296 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
297
298 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
299 if (RT_FAILURE(rc))
300 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
301
302 return rc;
303}
304
305/**
306 * (Re)initializes PATM
307 *
308 * @param pVM The VM.
309 */
310static int patmReinit(PVM pVM)
311{
312 int rc;
313
314 /*
315 * Assert alignment and sizes.
316 */
317 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
318 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
319
320 /*
321 * Setup any fixed pointers and offsets.
322 */
323 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
324
325#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
326#ifndef PATM_DISABLE_ALL
327 pVM->fPATMEnabled = true;
328#endif
329#endif
330
331 Assert(pVM->patm.s.pGCStateHC);
332 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
333 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
334
335 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
336 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
337
338 Assert(pVM->patm.s.pGCStackHC);
339 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
340 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
341 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
342 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
343
344 Assert(pVM->patm.s.pStatsHC);
345 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
346 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
347
348 Assert(pVM->patm.s.pPatchMemHC);
349 Assert(pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
350 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
351 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
352
353 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
354 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
355
356 Assert(pVM->patm.s.PatchLookupTreeHC);
357 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
358
359 /*
360 * (Re)Initialize PATM structure
361 */
362 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
363 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
364 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
365 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
366 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
367 pVM->patm.s.pvFaultMonitor = 0;
368 pVM->patm.s.deltaReloc = 0;
369
370 /* Lowest and highest patched instruction */
371 pVM->patm.s.pPatchedInstrGCLowest = ~0;
372 pVM->patm.s.pPatchedInstrGCHighest = 0;
373
374 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
375 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
376 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
377
378 pVM->patm.s.pfnSysEnterPatchGC = 0;
379 pVM->patm.s.pfnSysEnterGC = 0;
380
381 pVM->patm.s.fOutOfMemory = false;
382
383 pVM->patm.s.pfnHelperCallGC = 0;
384
385 /* Generate all global functions to be used by future patches. */
386 /* We generate a fake patch in order to use the existing code for relocation. */
387 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
388 if (RT_FAILURE(rc))
389 {
390 Log(("Out of memory!!!!\n"));
391 return VERR_NO_MEMORY;
392 }
393 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
394 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
395 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
396
397 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
398 AssertRC(rc);
399
400 /* Update free pointer in patch memory. */
401 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
402 /* Round to next 8 byte boundary. */
403 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
404 return rc;
405}
406
407
408/**
409 * Applies relocations to data and code managed by this
410 * component. This function will be called at init and
411 * whenever the VMM need to relocate it self inside the GC.
412 *
413 * The PATM will update the addresses used by the switcher.
414 *
415 * @param pVM The VM.
416 */
417VMMR3DECL(void) PATMR3Relocate(PVM pVM)
418{
419 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
420 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
421
422 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
423 if (delta)
424 {
425 PCPUMCTX pCtx;
426
427 /* Update CPUMCTX guest context pointer. */
428 pVM->patm.s.pCPUMCtxGC += delta;
429
430 pVM->patm.s.deltaReloc = delta;
431
432 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
433
434 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
435
436 /* If we are running patch code right now, then also adjust EIP. */
437 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
438 pCtx->eip += delta;
439
440 pVM->patm.s.pGCStateGC = GCPtrNew;
441 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
442
443 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
444
445 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
446
447 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
448
449 if (pVM->patm.s.pfnSysEnterPatchGC)
450 pVM->patm.s.pfnSysEnterPatchGC += delta;
451
452 /* Deal with the global patch functions. */
453 pVM->patm.s.pfnHelperCallGC += delta;
454 pVM->patm.s.pfnHelperRetGC += delta;
455 pVM->patm.s.pfnHelperIretGC += delta;
456 pVM->patm.s.pfnHelperJumpGC += delta;
457
458 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
459 }
460}
461
462
463/**
464 * Terminates the PATM.
465 *
466 * Termination means cleaning up and freeing all resources,
467 * the VM it self is at this point powered off or suspended.
468 *
469 * @returns VBox status code.
470 * @param pVM The VM to operate on.
471 */
472VMMR3DECL(int) PATMR3Term(PVM pVM)
473{
474 /* Memory was all allocated from the two MM heaps and requires no freeing. */
475 return VINF_SUCCESS;
476}
477
478
479/**
480 * PATM reset callback.
481 *
482 * @returns VBox status code.
483 * @param pVM The VM which is reset.
484 */
485VMMR3DECL(int) PATMR3Reset(PVM pVM)
486{
487 Log(("PATMR3Reset\n"));
488
489 /* Free all patches. */
490 while (true)
491 {
492 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
493 if (pPatchRec)
494 {
495 PATMRemovePatch(pVM, pPatchRec, true);
496 }
497 else
498 break;
499 }
500 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
501 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
502 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
503 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
504
505 int rc = patmReinit(pVM);
506 if (RT_SUCCESS(rc))
507 rc = PATMR3InitFinalize(pVM); /* paranoia */
508
509 return rc;
510}
511
512/**
513 * Read callback for disassembly function; supports reading bytes that cross a page boundary
514 *
515 * @returns VBox status code.
516 * @param pSrc GC source pointer
517 * @param pDest HC destination pointer
518 * @param size Number of bytes to read
519 * @param pvUserdata Callback specific user data (pCpu)
520 *
521 */
522int patmReadBytes(RTUINTPTR pSrc, uint8_t *pDest, unsigned size, void *pvUserdata)
523{
524 DISCPUSTATE *pCpu = (DISCPUSTATE *)pvUserdata;
525 PATMDISASM *pDisInfo = (PATMDISASM *)pCpu->apvUserData[0];
526 int orgsize = size;
527
528 Assert(size);
529 if (size == 0)
530 return VERR_INVALID_PARAMETER;
531
532 /*
533 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
534 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
535 */
536 /** @todo could change in the future! */
537 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
538 {
539 for (int i=0;i<orgsize;i++)
540 {
541 int rc = PATMR3QueryOpcode(pDisInfo->pVM, (RTRCPTR)pSrc, pDest);
542 if (RT_SUCCESS(rc))
543 {
544 pSrc++;
545 pDest++;
546 size--;
547 }
548 else break;
549 }
550 if (size == 0)
551 return VINF_SUCCESS;
552#ifdef VBOX_STRICT
553 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
554 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
555 {
556 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc, NULL) == false);
557 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc+size-1, NULL) == false);
558 }
559#endif
560 }
561
562
563 if (PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(pSrc + size - 1) && !PATMIsPatchGCAddr(pDisInfo->pVM, pSrc))
564 {
565 return PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], pDest, pSrc, size);
566 }
567 else
568 {
569 uint8_t *pInstrHC = pDisInfo->pInstrHC;
570
571 Assert(pInstrHC);
572
573 /* pInstrHC is the base address; adjust according to the GC pointer. */
574 pInstrHC = pInstrHC + (pSrc - pDisInfo->pInstrGC);
575
576 memcpy(pDest, (void *)pInstrHC, size);
577 }
578
579 return VINF_SUCCESS;
580}
581
582/**
583 * Callback function for RTAvloU32DoWithAll
584 *
585 * Updates all fixups in the patches
586 *
587 * @returns VBox status code.
588 * @param pNode Current node
589 * @param pParam The VM to operate on.
590 */
591static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
592{
593 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
594 PVM pVM = (PVM)pParam;
595 RTRCINTPTR delta;
596#ifdef LOG_ENABLED
597 DISCPUSTATE cpu;
598 char szOutput[256];
599 uint32_t opsize;
600 bool disret;
601#endif
602 int rc;
603
604 /* Nothing to do if the patch is not active. */
605 if (pPatch->patch.uState == PATCH_REFUSED)
606 return 0;
607
608#ifdef LOG_ENABLED
609 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
610 {
611 /** @note pPrivInstrHC is probably not valid anymore */
612 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatch->patch.pPrivInstrGC, (PRTR3PTR)&pPatch->patch.pPrivInstrHC);
613 if (rc == VINF_SUCCESS)
614 {
615 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
616 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, pPatch->patch.pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
617 Log(("Org patch jump: %s", szOutput));
618 }
619 }
620#endif
621
622 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
623 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
624
625 /*
626 * Apply fixups
627 */
628 PRELOCREC pRec = 0;
629 AVLPVKEY key = 0;
630
631 while (true)
632 {
633 /* Get the record that's closest from above */
634 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
635 if (pRec == 0)
636 break;
637
638 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
639
640 switch (pRec->uType)
641 {
642 case FIXUP_ABSOLUTE:
643 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
644 if (!pRec->pSource || PATMIsPatchGCAddr(pVM, pRec->pSource))
645 {
646 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
647 }
648 else
649 {
650 uint8_t curInstr[15];
651 uint8_t oldInstr[15];
652 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
653
654 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
655
656 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
657 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
658
659 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
660 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
661
662 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
663
664 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
665 {
666 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
667
668 Log(("PATM: Patch page not present -> check later!\n"));
669 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
670 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
671 }
672 else
673 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
674 {
675 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
676 /*
677 * Disable patch; this is not a good solution
678 */
679 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
680 pPatch->patch.uState = PATCH_DISABLED;
681 }
682 else
683 if (RT_SUCCESS(rc))
684 {
685 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
686 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
687 AssertRC(rc);
688 }
689 }
690 break;
691
692 case FIXUP_REL_JMPTOPATCH:
693 {
694 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
695
696 if ( pPatch->patch.uState == PATCH_ENABLED
697 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
698 {
699 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
700 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
701 RTRCPTR pJumpOffGC;
702 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
703 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
704
705 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
706
707 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
708#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
709 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
710 {
711 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
712
713 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
714 oldJump[0] = pPatch->patch.aPrivInstr[0];
715 oldJump[1] = pPatch->patch.aPrivInstr[1];
716 *(RTRCUINTPTR *)&oldJump[2] = displOld;
717 }
718 else
719#endif
720 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
721 {
722 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
723 oldJump[0] = 0xE9;
724 *(RTRCUINTPTR *)&oldJump[1] = displOld;
725 }
726 else
727 {
728 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
729 continue; //this should never happen!!
730 }
731 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
732
733 /*
734 * Read old patch jump and compare it to the one we previously installed
735 */
736 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
737 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
738
739 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
740 {
741 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
742
743 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
744 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
745 }
746 else
747 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
748 {
749 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
750 /*
751 * Disable patch; this is not a good solution
752 */
753 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
754 pPatch->patch.uState = PATCH_DISABLED;
755 }
756 else
757 if (RT_SUCCESS(rc))
758 {
759 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
760 AssertRC(rc);
761 }
762 else
763 {
764 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
765 }
766 }
767 else
768 {
769 Log(("Skip the guest jump to patch code for this disabled patch %08X - %08X\n", pPatch->patch.pPrivInstrHC, pRec->pRelocPos));
770 }
771
772 pRec->pDest = pTarget;
773 break;
774 }
775
776 case FIXUP_REL_JMPTOGUEST:
777 {
778 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
779 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
780
781 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
782 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
783 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
784 pRec->pSource = pSource;
785 break;
786 }
787
788 default:
789 AssertMsg(0, ("Invalid fixup type!!\n"));
790 return VERR_INVALID_PARAMETER;
791 }
792 }
793
794#ifdef LOG_ENABLED
795 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
796 {
797 /** @note pPrivInstrHC is probably not valid anymore */
798 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatch->patch.pPrivInstrGC, (PRTR3PTR)&pPatch->patch.pPrivInstrHC);
799 if (rc == VINF_SUCCESS)
800 {
801 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
802 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, pPatch->patch.pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
803 Log(("Rel patch jump: %s", szOutput));
804 }
805 }
806#endif
807 return 0;
808}
809
810/**
811 * #PF Handler callback for virtual access handler ranges.
812 *
813 * Important to realize that a physical page in a range can have aliases, and
814 * for ALL and WRITE handlers these will also trigger.
815 *
816 * @returns VINF_SUCCESS if the handler have carried out the operation.
817 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
818 * @param pVM VM Handle.
819 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
820 * @param pvPtr The HC mapping of that address.
821 * @param pvBuf What the guest is reading/writing.
822 * @param cbBuf How much it's reading/writing.
823 * @param enmAccessType The access type.
824 * @param pvUser User argument.
825 */
826DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
827{
828 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
829 /** @todo could be the wrong virtual address (alias) */
830 pVM->patm.s.pvFaultMonitor = GCPtr;
831 PATMR3HandleMonitoredPage(pVM);
832 return VINF_PGM_HANDLER_DO_DEFAULT;
833}
834
835
836#ifdef VBOX_WITH_DEBUGGER
837/**
838 * Callback function for RTAvloU32DoWithAll
839 *
840 * Enables the patch that's being enumerated
841 *
842 * @returns 0 (continue enumeration).
843 * @param pNode Current node
844 * @param pVM The VM to operate on.
845 */
846static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
847{
848 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
849
850 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
851 return 0;
852}
853#endif /* VBOX_WITH_DEBUGGER */
854
855
856#ifdef VBOX_WITH_DEBUGGER
857/**
858 * Callback function for RTAvloU32DoWithAll
859 *
860 * Disables the patch that's being enumerated
861 *
862 * @returns 0 (continue enumeration).
863 * @param pNode Current node
864 * @param pVM The VM to operate on.
865 */
866static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
867{
868 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
869
870 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
871 return 0;
872}
873#endif
874
875/**
876 * Returns the host context pointer and size of the patch memory block
877 *
878 * @returns VBox status code.
879 * @param pVM The VM to operate on.
880 * @param pcb Size of the patch memory block
881 */
882VMMR3DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
883{
884 if (pcb)
885 {
886 *pcb = pVM->patm.s.cbPatchMem;
887 }
888 return pVM->patm.s.pPatchMemHC;
889}
890
891
892/**
893 * Returns the guest context pointer and size of the patch memory block
894 *
895 * @returns VBox status code.
896 * @param pVM The VM to operate on.
897 * @param pcb Size of the patch memory block
898 */
899VMMR3DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
900{
901 if (pcb)
902 {
903 *pcb = pVM->patm.s.cbPatchMem;
904 }
905 return pVM->patm.s.pPatchMemGC;
906}
907
908
909/**
910 * Returns the host context pointer of the GC context structure
911 *
912 * @returns VBox status code.
913 * @param pVM The VM to operate on.
914 */
915VMMR3DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
916{
917 return pVM->patm.s.pGCStateHC;
918}
919
920
921/**
922 * Checks whether the HC address is part of our patch region
923 *
924 * @returns VBox status code.
925 * @param pVM The VM to operate on.
926 * @param pAddrGC Guest context address
927 */
928VMMR3DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, R3PTRTYPE(uint8_t *) pAddrHC)
929{
930 return (pAddrHC >= pVM->patm.s.pPatchMemHC && pAddrHC < pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) ? true : false;
931}
932
933
934/**
935 * Allows or disallow patching of privileged instructions executed by the guest OS
936 *
937 * @returns VBox status code.
938 * @param pVM The VM to operate on.
939 * @param fAllowPatching Allow/disallow patching
940 */
941VMMR3DECL(int) PATMR3AllowPatching(PVM pVM, uint32_t fAllowPatching)
942{
943 pVM->fPATMEnabled = (fAllowPatching) ? true : false;
944 return VINF_SUCCESS;
945}
946
947/**
948 * Convert a GC patch block pointer to a HC patch pointer
949 *
950 * @returns HC pointer or NULL if it's not a GC patch pointer
951 * @param pVM The VM to operate on.
952 * @param pAddrGC GC pointer
953 */
954VMMR3DECL(R3PTRTYPE(void *)) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
955{
956 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
957 {
958 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
959 }
960 return NULL;
961}
962
963/**
964 * Query PATM state (enabled/disabled)
965 *
966 * @returns 0 - disabled, 1 - enabled
967 * @param pVM The VM to operate on.
968 */
969VMMR3DECL(int) PATMR3IsEnabled(PVM pVM)
970{
971 return pVM->fPATMEnabled;
972}
973
974
975/**
976 * Convert guest context address to host context pointer
977 *
978 * @returns VBox status code.
979 * @param pVM The VM to operate on.
980 * @param pPatch Patch block structure pointer
981 * @param pGCPtr Guest context pointer
982 *
983 * @returns Host context pointer or NULL in case of an error
984 *
985 */
986R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pGCPtr)
987{
988 int rc;
989 R3PTRTYPE(uint8_t *) pHCPtr;
990 uint32_t offset;
991
992 if (PATMIsPatchGCAddr(pVM, pGCPtr))
993 {
994 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
995 }
996
997 offset = pGCPtr & PAGE_OFFSET_MASK;
998 if (pPatch->cacheRec.pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
999 {
1000 return pPatch->cacheRec.pPatchLocStartHC + offset;
1001 }
1002
1003 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pGCPtr, (void **)&pHCPtr);
1004 if (rc != VINF_SUCCESS)
1005 {
1006 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1007 return NULL;
1008 }
1009////invalid? Assert(sizeof(R3PTRTYPE(uint8_t*)) == sizeof(uint32_t));
1010
1011 pPatch->cacheRec.pPatchLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1012 pPatch->cacheRec.pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1013 return pHCPtr;
1014}
1015
1016
1017/* Calculates and fills in all branch targets
1018 *
1019 * @returns VBox status code.
1020 * @param pVM The VM to operate on.
1021 * @param pPatch Current patch block pointer
1022 *
1023 */
1024static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1025{
1026 int32_t displ;
1027
1028 PJUMPREC pRec = 0;
1029 int nrJumpRecs = 0;
1030
1031 /*
1032 * Set all branch targets inside the patch block.
1033 * We remove all jump records as they are no longer needed afterwards.
1034 */
1035 while (true)
1036 {
1037 RCPTRTYPE(uint8_t *) pInstrGC;
1038 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1039
1040 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1041 if (pRec == 0)
1042 break;
1043
1044 nrJumpRecs++;
1045
1046 /* HC in patch block to GC in patch block. */
1047 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1048
1049 if (pRec->opcode == OP_CALL)
1050 {
1051 /* Special case: call function replacement patch from this patch block.
1052 */
1053 PPATMPATCHREC pFunctionRec = PATMQueryFunctionPatch(pVM, pRec->pTargetGC);
1054 if (!pFunctionRec)
1055 {
1056 int rc;
1057
1058 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1059 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1060 else
1061 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1062
1063 if (RT_FAILURE(rc))
1064 {
1065 uint8_t *pPatchHC;
1066 RTRCPTR pPatchGC;
1067 RTRCPTR pOrgInstrGC;
1068
1069 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1070 Assert(pOrgInstrGC);
1071
1072 /* Failure for some reason -> mark exit point with int 3. */
1073 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1074
1075 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1076 Assert(pPatchGC);
1077
1078 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1079
1080 /* Set a breakpoint at the very beginning of the recompiled instruction */
1081 *pPatchHC = 0xCC;
1082
1083 continue;
1084 }
1085 }
1086 else
1087 {
1088 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1089 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1090 }
1091
1092 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1093 }
1094 else
1095 {
1096 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1097 }
1098
1099 if (pBranchTargetGC == 0)
1100 {
1101 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1102 return VERR_PATCHING_REFUSED;
1103 }
1104 /* Our jumps *always* have a dword displacement (to make things easier). */
1105 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1106 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1107 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1108 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1109 }
1110 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1111 Assert(pPatch->JumpTree == 0);
1112 return VINF_SUCCESS;
1113}
1114
1115/* Add an illegal instruction record
1116 *
1117 * @param pVM The VM to operate on.
1118 * @param pPatch Patch structure ptr
1119 * @param pInstrGC Guest context pointer to privileged instruction
1120 *
1121 */
1122static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1123{
1124 PAVLPVNODECORE pRec;
1125
1126 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1127 Assert(pRec);
1128 pRec->Key = (AVLPVKEY)pInstrGC;
1129
1130 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1131 Assert(ret); NOREF(ret);
1132 pPatch->pTempInfo->nrIllegalInstr++;
1133}
1134
1135static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1136{
1137 PAVLPVNODECORE pRec;
1138
1139 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)pInstrGC);
1140 if (pRec)
1141 return true;
1142 return false;
1143}
1144
1145/**
1146 * Add a patch to guest lookup record
1147 *
1148 * @param pVM The VM to operate on.
1149 * @param pPatch Patch structure ptr
1150 * @param pPatchInstrHC Guest context pointer to patch block
1151 * @param pInstrGC Guest context pointer to privileged instruction
1152 * @param enmType Lookup type
1153 * @param fDirty Dirty flag
1154 *
1155 */
1156 /** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
1157void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1158{
1159 bool ret;
1160 PRECPATCHTOGUEST pPatchToGuestRec;
1161 PRECGUESTTOPATCH pGuestToPatchRec;
1162 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1163
1164 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1165 {
1166 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1167 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1168 return; /* already there */
1169
1170 Assert(!pPatchToGuestRec);
1171 }
1172#ifdef VBOX_STRICT
1173 else
1174 {
1175 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1176 Assert(!pPatchToGuestRec);
1177 }
1178#endif
1179
1180 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1181 Assert(pPatchToGuestRec);
1182 pPatchToGuestRec->Core.Key = PatchOffset;
1183 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1184 pPatchToGuestRec->enmType = enmType;
1185 pPatchToGuestRec->fDirty = fDirty;
1186
1187 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1188 Assert(ret);
1189
1190 /* GC to patch address */
1191 if (enmType == PATM_LOOKUP_BOTHDIR)
1192 {
1193 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1194 if (!pGuestToPatchRec)
1195 {
1196 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1197 pGuestToPatchRec->Core.Key = pInstrGC;
1198 pGuestToPatchRec->PatchOffset = PatchOffset;
1199
1200 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1201 Assert(ret);
1202 }
1203 }
1204
1205 pPatch->nrPatch2GuestRecs++;
1206}
1207
1208
1209/**
1210 * Removes a patch to guest lookup record
1211 *
1212 * @param pVM The VM to operate on.
1213 * @param pPatch Patch structure ptr
1214 * @param pPatchInstrGC Guest context pointer to patch block
1215 */
1216void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1217{
1218 PAVLU32NODECORE pNode;
1219 PAVLU32NODECORE pNode2;
1220 PRECPATCHTOGUEST pPatchToGuestRec;
1221 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1222
1223 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1224 Assert(pPatchToGuestRec);
1225 if (pPatchToGuestRec)
1226 {
1227 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1228 {
1229 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1230
1231 Assert(pGuestToPatchRec->Core.Key);
1232 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1233 Assert(pNode2);
1234 }
1235 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1236 Assert(pNode);
1237
1238 MMR3HeapFree(pPatchToGuestRec);
1239 pPatch->nrPatch2GuestRecs--;
1240 }
1241}
1242
1243
1244/**
1245 * RTAvlPVDestroy callback.
1246 */
1247static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1248{
1249 MMR3HeapFree(pNode);
1250 return 0;
1251}
1252
1253/**
1254 * Empty the specified tree (PV tree, MMR3 heap)
1255 *
1256 * @param pVM The VM to operate on.
1257 * @param ppTree Tree to empty
1258 */
1259void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1260{
1261 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1262}
1263
1264
1265/**
1266 * RTAvlU32Destroy callback.
1267 */
1268static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1269{
1270 MMR3HeapFree(pNode);
1271 return 0;
1272}
1273
1274/**
1275 * Empty the specified tree (U32 tree, MMR3 heap)
1276 *
1277 * @param pVM The VM to operate on.
1278 * @param ppTree Tree to empty
1279 */
1280void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1281{
1282 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1283}
1284
1285
1286/**
1287 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1288 *
1289 * @returns VBox status code.
1290 * @param pVM The VM to operate on.
1291 * @param pCpu CPU disassembly state
1292 * @param pInstrGC Guest context pointer to privileged instruction
1293 * @param pCurInstrGC Guest context pointer to the current instruction
1294 * @param pUserData User pointer (callback specific)
1295 *
1296 */
1297static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1298{
1299 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1300 bool fIllegalInstr = false;
1301
1302 //Preliminary heuristics:
1303 //- no call instructions without a fixed displacement between cli and sti/popf
1304 //- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1305 //- no nested pushf/cli
1306 //- sti/popf should be the (eventual) target of all branches
1307 //- no near or far returns; no int xx, no into
1308 //
1309 // Note: Later on we can impose less stricter guidelines if the need arises
1310
1311 /* Bail out if the patch gets too big. */
1312 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1313 {
1314 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1315 fIllegalInstr = true;
1316 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1317 }
1318 else
1319 {
1320 /* No unconditinal jumps or calls without fixed displacements. */
1321 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1322 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1323 )
1324 {
1325 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1326 if ( pCpu->param1.size == 6 /* far call/jmp */
1327 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1328 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1329 )
1330 {
1331 fIllegalInstr = true;
1332 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1333 }
1334 }
1335
1336 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1337 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->opcode == OP_JMP)
1338 {
1339 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC + pCpu->opsize < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1340 {
1341 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1342 /* We turn this one into a int 3 callable patch. */
1343 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1344 }
1345 }
1346 else
1347 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1348 if (pPatch->opcode == OP_PUSHF)
1349 {
1350 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->opcode == OP_PUSHF)
1351 {
1352 fIllegalInstr = true;
1353 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1354 }
1355 }
1356
1357 // no far returns
1358 if (pCpu->pCurInstr->opcode == OP_RETF)
1359 {
1360 pPatch->pTempInfo->nrRetInstr++;
1361 fIllegalInstr = true;
1362 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1363 }
1364 else
1365 // no int xx or into either
1366 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1367 {
1368 fIllegalInstr = true;
1369 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1370 }
1371 }
1372
1373 pPatch->cbPatchBlockSize += pCpu->opsize;
1374
1375 /* Illegal instruction -> end of analysis phase for this code block */
1376 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1377 return VINF_SUCCESS;
1378
1379 /* Check for exit points. */
1380 switch (pCpu->pCurInstr->opcode)
1381 {
1382 case OP_SYSEXIT:
1383 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1384
1385 case OP_SYSENTER:
1386 case OP_ILLUD2:
1387 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1388 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1389 return VINF_SUCCESS;
1390
1391 case OP_STI:
1392 case OP_POPF:
1393 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1394 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1395 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1396 {
1397 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1398 return VERR_PATCHING_REFUSED;
1399 }
1400 if (pPatch->opcode == OP_PUSHF)
1401 {
1402 if (pCpu->pCurInstr->opcode == OP_POPF)
1403 {
1404 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1405 return VINF_SUCCESS;
1406
1407 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1408 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1409 pPatch->flags |= PATMFL_CHECK_SIZE;
1410 }
1411 break; //sti doesn't mark the end of a pushf block; only popf does
1412 }
1413 //else no break
1414 case OP_RETN: /* exit point for function replacement */
1415 return VINF_SUCCESS;
1416
1417 case OP_IRET:
1418 return VINF_SUCCESS; /* exitpoint */
1419
1420 case OP_CPUID:
1421 case OP_CALL:
1422 case OP_JMP:
1423 break;
1424
1425 default:
1426 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1427 {
1428 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1429 return VINF_SUCCESS; /* exit point */
1430 }
1431 break;
1432 }
1433
1434 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1435 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW))
1436 {
1437 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1438 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->opsize));
1439 return VINF_SUCCESS;
1440 }
1441
1442 return VWRN_CONTINUE_ANALYSIS;
1443}
1444
1445/**
1446 * Analyses the instructions inside a function for compliance
1447 *
1448 * @returns VBox status code.
1449 * @param pVM The VM to operate on.
1450 * @param pCpu CPU disassembly state
1451 * @param pInstrGC Guest context pointer to privileged instruction
1452 * @param pCurInstrGC Guest context pointer to the current instruction
1453 * @param pUserData User pointer (callback specific)
1454 *
1455 */
1456static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1457{
1458 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1459 bool fIllegalInstr = false;
1460
1461 //Preliminary heuristics:
1462 //- no call instructions
1463 //- ret ends a block
1464
1465 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1466
1467 // bail out if the patch gets too big
1468 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1469 {
1470 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1471 fIllegalInstr = true;
1472 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1473 }
1474 else
1475 {
1476 // no unconditinal jumps or calls without fixed displacements
1477 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1478 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1479 )
1480 {
1481 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1482 if ( pCpu->param1.size == 6 /* far call/jmp */
1483 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1484 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1485 )
1486 {
1487 fIllegalInstr = true;
1488 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1489 }
1490 }
1491 else /* no far returns */
1492 if (pCpu->pCurInstr->opcode == OP_RETF)
1493 {
1494 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1495 fIllegalInstr = true;
1496 }
1497 else /* no int xx or into either */
1498 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1499 {
1500 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1501 fIllegalInstr = true;
1502 }
1503
1504 #if 0
1505 ///@todo we can handle certain in/out and privileged instructions in the guest context
1506 if (pCpu->pCurInstr->optype & OPTYPE_PRIVILEGED && pCpu->pCurInstr->opcode != OP_STI)
1507 {
1508 Log(("Illegal instructions for function patch!!\n"));
1509 return VERR_PATCHING_REFUSED;
1510 }
1511 #endif
1512 }
1513
1514 pPatch->cbPatchBlockSize += pCpu->opsize;
1515
1516 /* Illegal instruction -> end of analysis phase for this code block */
1517 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1518 {
1519 return VINF_SUCCESS;
1520 }
1521
1522 // Check for exit points
1523 switch (pCpu->pCurInstr->opcode)
1524 {
1525 case OP_ILLUD2:
1526 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1527 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1528 return VINF_SUCCESS;
1529
1530 case OP_IRET:
1531 case OP_SYSEXIT: /* will fault or emulated in GC */
1532 case OP_RETN:
1533 return VINF_SUCCESS;
1534
1535 case OP_POPF:
1536 case OP_STI:
1537 return VWRN_CONTINUE_ANALYSIS;
1538 default:
1539 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1540 {
1541 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1542 return VINF_SUCCESS; /* exit point */
1543 }
1544 return VWRN_CONTINUE_ANALYSIS;
1545 }
1546
1547 return VWRN_CONTINUE_ANALYSIS;
1548}
1549
1550/**
1551 * Recompiles the instructions in a code block
1552 *
1553 * @returns VBox status code.
1554 * @param pVM The VM to operate on.
1555 * @param pCpu CPU disassembly state
1556 * @param pInstrGC Guest context pointer to privileged instruction
1557 * @param pCurInstrGC Guest context pointer to the current instruction
1558 * @param pUserData User pointer (callback specific)
1559 *
1560 */
1561static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1562{
1563 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1564 int rc = VINF_SUCCESS;
1565 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1566
1567 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1568
1569 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1570 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1571 {
1572 /*
1573 * Been there, done that; so insert a jump (we don't want to duplicate code)
1574 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1575 */
1576 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1577 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->prefix & PREFIX_OPSIZE));
1578 }
1579
1580 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1581 {
1582 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pUserData);
1583 }
1584 else
1585 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pUserData);
1586
1587 if (RT_FAILURE(rc))
1588 return rc;
1589
1590 /** @note Never do a direct return unless a failure is encountered! */
1591
1592 /* Clear recompilation of next instruction flag; we are doing that right here. */
1593 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1594 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1595
1596 /* Add lookup record for patch to guest address translation */
1597 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1598
1599 /* Update lowest and highest instruction address for this patch */
1600 if (pCurInstrGC < pPatch->pInstrGCLowest)
1601 pPatch->pInstrGCLowest = pCurInstrGC;
1602 else
1603 if (pCurInstrGC > pPatch->pInstrGCHighest)
1604 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->opsize;
1605
1606 /* Illegal instruction -> end of recompile phase for this code block. */
1607 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1608 {
1609 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1610 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1611 goto end;
1612 }
1613
1614 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1615 * Indirect calls are handled below.
1616 */
1617 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1618 && (pCpu->pCurInstr->opcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1619 && (OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J))
1620 {
1621 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1622 if (pTargetGC == 0)
1623 {
1624 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
1625 return VERR_PATCHING_REFUSED;
1626 }
1627
1628 if (pCpu->pCurInstr->opcode == OP_CALL)
1629 {
1630 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1631 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1632 if (RT_FAILURE(rc))
1633 goto end;
1634 }
1635 else
1636 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->opcode, !!(pCpu->prefix & PREFIX_OPSIZE));
1637
1638 if (RT_SUCCESS(rc))
1639 rc = VWRN_CONTINUE_RECOMPILE;
1640
1641 goto end;
1642 }
1643
1644 switch (pCpu->pCurInstr->opcode)
1645 {
1646 case OP_CLI:
1647 {
1648 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1649 * until we've found the proper exit point(s).
1650 */
1651 if ( pCurInstrGC != pInstrGC
1652 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1653 )
1654 {
1655 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1656 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1657 }
1658 /* Set by irq inhibition; no longer valid now. */
1659 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1660
1661 rc = patmPatchGenCli(pVM, pPatch);
1662 if (RT_SUCCESS(rc))
1663 rc = VWRN_CONTINUE_RECOMPILE;
1664 break;
1665 }
1666
1667 case OP_MOV:
1668 if (pCpu->pCurInstr->optype & OPTYPE_POTENTIALLY_DANGEROUS)
1669 {
1670 /* mov ss, src? */
1671 if ( (pCpu->param1.flags & USE_REG_SEG)
1672 && (pCpu->param1.base.reg_seg == DIS_SELREG_SS))
1673 {
1674 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1675 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1676 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1677 }
1678#if 0 /* necessary for Haiku */
1679 else
1680 if ( (pCpu->param2.flags & USE_REG_SEG)
1681 && (pCpu->param2.base.reg_seg == USE_REG_SS)
1682 && (pCpu->param1.flags & (USE_REG_GEN32|USE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1683 {
1684 /* mov GPR, ss */
1685 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1686 if (RT_SUCCESS(rc))
1687 rc = VWRN_CONTINUE_RECOMPILE;
1688 break;
1689 }
1690#endif
1691 }
1692 goto duplicate_instr;
1693
1694 case OP_POP:
1695 if (pCpu->pCurInstr->param1 == OP_PARM_REG_SS)
1696 {
1697 Assert(pCpu->pCurInstr->optype & OPTYPE_INHIBIT_IRQS);
1698
1699 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1700 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1701 }
1702 goto duplicate_instr;
1703
1704 case OP_STI:
1705 {
1706 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1707
1708 /** In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1709 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1710 {
1711 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1712 fInhibitIRQInstr = true;
1713 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1714 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1715 }
1716 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1717
1718 if (RT_SUCCESS(rc))
1719 {
1720 DISCPUSTATE cpu = *pCpu;
1721 unsigned opsize;
1722 int disret;
1723 RCPTRTYPE(uint8_t *) pNextInstrGC, pReturnInstrGC;
1724 R3PTRTYPE(uint8_t *) pNextInstrHC;
1725
1726 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1727
1728 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1729 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pNextInstrGC);
1730 if (pNextInstrHC == NULL)
1731 {
1732 AssertFailed();
1733 return VERR_PATCHING_REFUSED;
1734 }
1735
1736 // Disassemble the next instruction
1737 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pNextInstrGC, pNextInstrHC, &opsize, NULL);
1738 if (disret == false)
1739 {
1740 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1741 return VERR_PATCHING_REFUSED;
1742 }
1743 pReturnInstrGC = pNextInstrGC + opsize;
1744
1745 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1746 || pReturnInstrGC <= pInstrGC
1747 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1748 )
1749 {
1750 /* Not an exit point for function duplication patches */
1751 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1752 && RT_SUCCESS(rc))
1753 {
1754 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1755 rc = VWRN_CONTINUE_RECOMPILE;
1756 }
1757 else
1758 rc = VINF_SUCCESS; //exit point
1759 }
1760 else {
1761 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1762 rc = VERR_PATCHING_REFUSED; //not allowed!!
1763 }
1764 }
1765 break;
1766 }
1767
1768 case OP_POPF:
1769 {
1770 bool fGenerateJmpBack = (pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32);
1771
1772 /* Not an exit point for IDT handler or function replacement patches */
1773 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1774 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1775 fGenerateJmpBack = false;
1776
1777 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->opsize, !!(pCpu->prefix & PREFIX_OPSIZE), fGenerateJmpBack);
1778 if (RT_SUCCESS(rc))
1779 {
1780 if (fGenerateJmpBack == false)
1781 {
1782 /* Not an exit point for IDT handler or function replacement patches */
1783 rc = VWRN_CONTINUE_RECOMPILE;
1784 }
1785 else
1786 {
1787 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1788 rc = VINF_SUCCESS; /* exit point! */
1789 }
1790 }
1791 break;
1792 }
1793
1794 case OP_PUSHF:
1795 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->prefix & PREFIX_OPSIZE));
1796 if (RT_SUCCESS(rc))
1797 rc = VWRN_CONTINUE_RECOMPILE;
1798 break;
1799
1800 case OP_PUSH:
1801 if (pCpu->pCurInstr->param1 == OP_PARM_REG_CS)
1802 {
1803 rc = patmPatchGenPushCS(pVM, pPatch);
1804 if (RT_SUCCESS(rc))
1805 rc = VWRN_CONTINUE_RECOMPILE;
1806 break;
1807 }
1808 goto duplicate_instr;
1809
1810 case OP_IRET:
1811 Log(("IRET at %RRv\n", pCurInstrGC));
1812 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->prefix & PREFIX_OPSIZE));
1813 if (RT_SUCCESS(rc))
1814 {
1815 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1816 rc = VINF_SUCCESS; /* exit point by definition */
1817 }
1818 break;
1819
1820 case OP_ILLUD2:
1821 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1822 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1823 if (RT_SUCCESS(rc))
1824 rc = VINF_SUCCESS; /* exit point by definition */
1825 Log(("Illegal opcode (0xf 0xb)\n"));
1826 break;
1827
1828 case OP_CPUID:
1829 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1830 if (RT_SUCCESS(rc))
1831 rc = VWRN_CONTINUE_RECOMPILE;
1832 break;
1833
1834 case OP_STR:
1835 case OP_SLDT:
1836 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1837 if (RT_SUCCESS(rc))
1838 rc = VWRN_CONTINUE_RECOMPILE;
1839 break;
1840
1841 case OP_SGDT:
1842 case OP_SIDT:
1843 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1844 if (RT_SUCCESS(rc))
1845 rc = VWRN_CONTINUE_RECOMPILE;
1846 break;
1847
1848 case OP_RETN:
1849 /* retn is an exit point for function patches */
1850 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
1851 if (RT_SUCCESS(rc))
1852 rc = VINF_SUCCESS; /* exit point by definition */
1853 break;
1854
1855 case OP_SYSEXIT:
1856 /* Duplicate it, so it can be emulated in GC (or fault). */
1857 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1858 if (RT_SUCCESS(rc))
1859 rc = VINF_SUCCESS; /* exit point by definition */
1860 break;
1861
1862 case OP_CALL:
1863 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1864 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1865 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1866 */
1867 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1868 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far calls! */)
1869 {
1870 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
1871 if (RT_SUCCESS(rc))
1872 {
1873 rc = VWRN_CONTINUE_RECOMPILE;
1874 }
1875 break;
1876 }
1877 goto gen_illegal_instr;
1878
1879 case OP_JMP:
1880 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1881 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1882 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1883 */
1884 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1885 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far jumps! */)
1886 {
1887 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
1888 if (RT_SUCCESS(rc))
1889 rc = VINF_SUCCESS; /* end of branch */
1890 break;
1891 }
1892 goto gen_illegal_instr;
1893
1894 case OP_INT3:
1895 case OP_INT:
1896 case OP_INTO:
1897 goto gen_illegal_instr;
1898
1899 case OP_MOV_DR:
1900 /** @note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1901 if (pCpu->pCurInstr->param2 == OP_PARM_Dd)
1902 {
1903 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
1904 if (RT_SUCCESS(rc))
1905 rc = VWRN_CONTINUE_RECOMPILE;
1906 break;
1907 }
1908 goto duplicate_instr;
1909
1910 case OP_MOV_CR:
1911 /** @note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1912 if (pCpu->pCurInstr->param2 == OP_PARM_Cd)
1913 {
1914 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
1915 if (RT_SUCCESS(rc))
1916 rc = VWRN_CONTINUE_RECOMPILE;
1917 break;
1918 }
1919 goto duplicate_instr;
1920
1921 default:
1922 if (pCpu->pCurInstr->optype & (OPTYPE_CONTROLFLOW | OPTYPE_PRIVILEGED_NOTRAP))
1923 {
1924gen_illegal_instr:
1925 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1926 if (RT_SUCCESS(rc))
1927 rc = VINF_SUCCESS; /* exit point by definition */
1928 }
1929 else
1930 {
1931duplicate_instr:
1932 Log(("patmPatchGenDuplicate\n"));
1933 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1934 if (RT_SUCCESS(rc))
1935 rc = VWRN_CONTINUE_RECOMPILE;
1936 }
1937 break;
1938 }
1939
1940end:
1941
1942 if ( !fInhibitIRQInstr
1943 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
1944 {
1945 int rc2;
1946 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1947
1948 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
1949 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
1950 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
1951 {
1952 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
1953
1954 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
1955 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1956 rc = VINF_SUCCESS; /* end of the line */
1957 }
1958 else
1959 {
1960 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
1961 }
1962 if (RT_FAILURE(rc2))
1963 rc = rc2;
1964 }
1965
1966 if (RT_SUCCESS(rc))
1967 {
1968 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1969 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
1970 && pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32
1971 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
1972 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
1973 )
1974 {
1975 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1976
1977 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1978 Log(("patmRecompileCallback: end found for single instruction patch at %RRv opsize %d\n", pNextInstrGC, pCpu->opsize));
1979
1980 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
1981 AssertRC(rc);
1982 }
1983 }
1984 return rc;
1985}
1986
1987
1988#ifdef LOG_ENABLED
1989
1990/* Add a disasm jump record (temporary for prevent duplicate analysis)
1991 *
1992 * @param pVM The VM to operate on.
1993 * @param pPatch Patch structure ptr
1994 * @param pInstrGC Guest context pointer to privileged instruction
1995 *
1996 */
1997static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1998{
1999 PAVLPVNODECORE pRec;
2000
2001 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2002 Assert(pRec);
2003 pRec->Key = (AVLPVKEY)pInstrGC;
2004
2005 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2006 Assert(ret);
2007}
2008
2009/**
2010 * Checks if jump target has been analysed before.
2011 *
2012 * @returns VBox status code.
2013 * @param pPatch Patch struct
2014 * @param pInstrGC Jump target
2015 *
2016 */
2017static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2018{
2019 PAVLPVNODECORE pRec;
2020
2021 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)pInstrGC);
2022 if (pRec)
2023 return true;
2024 return false;
2025}
2026
2027/**
2028 * For proper disassembly of the final patch block
2029 *
2030 * @returns VBox status code.
2031 * @param pVM The VM to operate on.
2032 * @param pCpu CPU disassembly state
2033 * @param pInstrGC Guest context pointer to privileged instruction
2034 * @param pCurInstrGC Guest context pointer to the current instruction
2035 * @param pUserData User pointer (callback specific)
2036 *
2037 */
2038int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
2039{
2040 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2041
2042 if (pCpu->pCurInstr->opcode == OP_INT3)
2043 {
2044 /* Could be an int3 inserted in a call patch. Check to be sure */
2045 DISCPUSTATE cpu;
2046 uint8_t *pOrgJumpHC;
2047 RTRCPTR pOrgJumpGC;
2048 uint32_t dummy;
2049
2050 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2051 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2052 pOrgJumpHC = PATMGCVirtToHCVirt(pVM, pPatch, pOrgJumpGC);
2053
2054 bool disret = PATMR3DISInstr(pVM, pPatch, &cpu, pOrgJumpGC, pOrgJumpHC, &dummy, NULL);
2055 if (!disret || cpu.pCurInstr->opcode != OP_CALL || cpu.param1.size != 4 /* only near calls */)
2056 return VINF_SUCCESS;
2057
2058 return VWRN_CONTINUE_ANALYSIS;
2059 }
2060
2061 if ( pCpu->pCurInstr->opcode == OP_ILLUD2
2062 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2063 {
2064 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2065 return VWRN_CONTINUE_ANALYSIS;
2066 }
2067
2068 if ( (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2069 || pCpu->pCurInstr->opcode == OP_INT
2070 || pCpu->pCurInstr->opcode == OP_IRET
2071 || pCpu->pCurInstr->opcode == OP_RETN
2072 || pCpu->pCurInstr->opcode == OP_RETF
2073 )
2074 {
2075 return VINF_SUCCESS;
2076 }
2077
2078 if (pCpu->pCurInstr->opcode == OP_ILLUD2)
2079 return VINF_SUCCESS;
2080
2081 return VWRN_CONTINUE_ANALYSIS;
2082}
2083
2084
2085/**
2086 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2087 *
2088 * @returns VBox status code.
2089 * @param pVM The VM to operate on.
2090 * @param pInstrGC Guest context pointer to the initial privileged instruction
2091 * @param pCurInstrGC Guest context pointer to the current instruction
2092 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2093 * @param pUserData User pointer (callback specific)
2094 *
2095 */
2096int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, void *pUserData)
2097{
2098 DISCPUSTATE cpu;
2099 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2100 int rc = VWRN_CONTINUE_ANALYSIS;
2101 uint32_t opsize, delta;
2102 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2103 bool disret;
2104 char szOutput[256];
2105
2106 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2107
2108 /* We need this to determine branch targets (and for disassembling). */
2109 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2110
2111 while(rc == VWRN_CONTINUE_ANALYSIS)
2112 {
2113 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2114
2115 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2116 if (pCurInstrHC == NULL)
2117 {
2118 rc = VERR_PATCHING_REFUSED;
2119 goto end;
2120 }
2121
2122 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2123 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2124 {
2125 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2126
2127 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2128 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2129 else
2130 Log(("DIS %s", szOutput));
2131
2132 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2133 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2134 {
2135 rc = VINF_SUCCESS;
2136 goto end;
2137 }
2138 }
2139 else
2140 Log(("DIS: %s", szOutput));
2141
2142 if (disret == false)
2143 {
2144 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2145 rc = VINF_SUCCESS;
2146 goto end;
2147 }
2148
2149 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pUserData);
2150 if (rc != VWRN_CONTINUE_ANALYSIS) {
2151 break; //done!
2152 }
2153
2154 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2155 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2156 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2157 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2158 )
2159 {
2160 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2161 RTRCPTR pOrgTargetGC;
2162
2163 if (pTargetGC == 0)
2164 {
2165 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2166 rc = VERR_PATCHING_REFUSED;
2167 break;
2168 }
2169
2170 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2171 {
2172 //jump back to guest code
2173 rc = VINF_SUCCESS;
2174 goto end;
2175 }
2176 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2177
2178 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2179 {
2180 rc = VINF_SUCCESS;
2181 goto end;
2182 }
2183
2184 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2185 {
2186 /* New jump, let's check it. */
2187 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2188
2189 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2190 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pUserData);
2191 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2192
2193 if (rc != VINF_SUCCESS) {
2194 break; //done!
2195 }
2196 }
2197 if (cpu.pCurInstr->opcode == OP_JMP)
2198 {
2199 /* Unconditional jump; return to caller. */
2200 rc = VINF_SUCCESS;
2201 goto end;
2202 }
2203
2204 rc = VWRN_CONTINUE_ANALYSIS;
2205 }
2206 pCurInstrGC += opsize;
2207 }
2208end:
2209 return rc;
2210}
2211
2212/**
2213 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2214 *
2215 * @returns VBox status code.
2216 * @param pVM The VM to operate on.
2217 * @param pInstrGC Guest context pointer to the initial privileged instruction
2218 * @param pCurInstrGC Guest context pointer to the current instruction
2219 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2220 * @param pUserData User pointer (callback specific)
2221 *
2222 */
2223int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, void *pUserData)
2224{
2225 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2226
2227 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pUserData);
2228 /* Free all disasm jump records. */
2229 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2230 return rc;
2231}
2232
2233#endif /* LOG_ENABLED */
2234
2235/**
2236 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2237 * If so, this patch is permanently disabled.
2238 *
2239 * @param pVM The VM to operate on.
2240 * @param pInstrGC Guest context pointer to instruction
2241 * @param pConflictGC Guest context pointer to check
2242 *
2243 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2244 *
2245 */
2246VMMR3DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2247{
2248 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2249 if (pTargetPatch)
2250 {
2251 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2252 }
2253 return VERR_PATCH_NO_CONFLICT;
2254}
2255
2256/**
2257 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2258 *
2259 * @returns VBox status code.
2260 * @param pVM The VM to operate on.
2261 * @param pInstrGC Guest context pointer to privileged instruction
2262 * @param pCurInstrGC Guest context pointer to the current instruction
2263 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2264 * @param pUserData User pointer (callback specific)
2265 *
2266 */
2267static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, void *pUserData)
2268{
2269 DISCPUSTATE cpu;
2270 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2271 int rc = VWRN_CONTINUE_ANALYSIS;
2272 uint32_t opsize;
2273 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2274 bool disret;
2275#ifdef LOG_ENABLED
2276 char szOutput[256];
2277#endif
2278
2279 while (rc == VWRN_CONTINUE_RECOMPILE)
2280 {
2281 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2282
2283 ////Log(("patmRecompileCodeStream %RRv %RRv\n", pInstrGC, pCurInstrGC));
2284
2285 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2286 if (pCurInstrHC == NULL)
2287 {
2288 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2289 goto end;
2290 }
2291#ifdef LOG_ENABLED
2292 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput);
2293 Log(("Recompile: %s", szOutput));
2294#else
2295 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2296#endif
2297 if (disret == false)
2298 {
2299 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2300
2301 /* Add lookup record for patch to guest address translation */
2302 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2303 patmPatchGenIllegalInstr(pVM, pPatch);
2304 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2305 goto end;
2306 }
2307
2308 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pUserData);
2309 if (rc != VWRN_CONTINUE_RECOMPILE)
2310 {
2311 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2312 if ( rc == VINF_SUCCESS
2313 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2314 {
2315 DISCPUSTATE cpunext;
2316 uint32_t opsizenext;
2317 uint8_t *pNextInstrHC;
2318 RTRCPTR pNextInstrGC = pCurInstrGC + opsize;
2319
2320 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2321
2322 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2323 * Recompile the next instruction as well
2324 */
2325 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pNextInstrGC);
2326 if (pNextInstrHC == NULL)
2327 {
2328 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2329 goto end;
2330 }
2331 cpunext.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2332 disret = PATMR3DISInstr(pVM, pPatch, &cpunext, pNextInstrGC, pNextInstrHC, &opsizenext, NULL);
2333 if (disret == false)
2334 {
2335 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2336 goto end;
2337 }
2338 switch(cpunext.pCurInstr->opcode)
2339 {
2340 case OP_IRET: /* inhibit cleared in generated code */
2341 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2342 case OP_HLT:
2343 break; /* recompile these */
2344
2345 default:
2346 if (cpunext.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2347 {
2348 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2349
2350 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2351 AssertRC(rc);
2352 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2353 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2354 }
2355 break;
2356 }
2357
2358 /** @note after a cli we must continue to a proper exit point */
2359 if (cpunext.pCurInstr->opcode != OP_CLI)
2360 {
2361 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pUserData);
2362 if (RT_SUCCESS(rc))
2363 {
2364 rc = VINF_SUCCESS;
2365 goto end;
2366 }
2367 break;
2368 }
2369 else
2370 rc = VWRN_CONTINUE_RECOMPILE;
2371 }
2372 else
2373 break; /* done! */
2374 }
2375
2376 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2377
2378
2379 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2380 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2381 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2382 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2383 )
2384 {
2385 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2386 if (addr == 0)
2387 {
2388 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2389 rc = VERR_PATCHING_REFUSED;
2390 break;
2391 }
2392
2393 Log(("Jump encountered target %RRv\n", addr));
2394
2395 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2396 if (!(cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW))
2397 {
2398 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2399 /* First we need to finish this linear code stream until the next exit point. */
2400 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+opsize, pfnPATMR3Recompile, pUserData);
2401 if (RT_FAILURE(rc))
2402 {
2403 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2404 break; //fatal error
2405 }
2406 }
2407
2408 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2409 {
2410 /* New code; let's recompile it. */
2411 Log(("patmRecompileCodeStream continue with jump\n"));
2412
2413 /*
2414 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2415 * this patch so we can continue our analysis
2416 *
2417 * We rely on CSAM to detect and resolve conflicts
2418 */
2419 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, addr);
2420 if(pTargetPatch)
2421 {
2422 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2423 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2424 }
2425
2426 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2427 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pUserData);
2428 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2429
2430 if(pTargetPatch)
2431 {
2432 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2433 }
2434
2435 if (RT_FAILURE(rc))
2436 {
2437 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2438 break; //done!
2439 }
2440 }
2441 /* Always return to caller here; we're done! */
2442 rc = VINF_SUCCESS;
2443 goto end;
2444 }
2445 else
2446 if (cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW)
2447 {
2448 rc = VINF_SUCCESS;
2449 goto end;
2450 }
2451 pCurInstrGC += opsize;
2452 }
2453end:
2454 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2455 return rc;
2456}
2457
2458
2459/**
2460 * Generate the jump from guest to patch code
2461 *
2462 * @returns VBox status code.
2463 * @param pVM The VM to operate on.
2464 * @param pPatch Patch record
2465 */
2466static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, bool fAddFixup = true)
2467{
2468 uint8_t temp[8];
2469 uint8_t *pPB;
2470 int rc;
2471
2472 Assert(pPatch->cbPatchJump <= sizeof(temp));
2473 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2474
2475 pPB = pPatch->pPrivInstrHC;
2476
2477#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2478 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2479 {
2480 Assert(pPatch->pPatchJumpDestGC);
2481
2482 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2483 {
2484 // jmp [PatchCode]
2485 if (fAddFixup)
2486 {
2487 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2488 {
2489 Log(("Relocation failed for the jump in the guest code!!\n"));
2490 return VERR_PATCHING_REFUSED;
2491 }
2492 }
2493
2494 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2495 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2496 }
2497 else
2498 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2499 {
2500 // jmp [PatchCode]
2501 if (fAddFixup)
2502 {
2503 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2504 {
2505 Log(("Relocation failed for the jump in the guest code!!\n"));
2506 return VERR_PATCHING_REFUSED;
2507 }
2508 }
2509
2510 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2511 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2512 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2513 }
2514 else
2515 {
2516 Assert(0);
2517 return VERR_PATCHING_REFUSED;
2518 }
2519 }
2520 else
2521#endif
2522 {
2523 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2524
2525 // jmp [PatchCode]
2526 if (fAddFixup)
2527 {
2528 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2529 {
2530 Log(("Relocation failed for the jump in the guest code!!\n"));
2531 return VERR_PATCHING_REFUSED;
2532 }
2533 }
2534 temp[0] = 0xE9; //jmp
2535 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2536 }
2537 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2538 AssertRC(rc);
2539
2540 if (rc == VINF_SUCCESS)
2541 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2542
2543 return rc;
2544}
2545
2546/**
2547 * Remove the jump from guest to patch code
2548 *
2549 * @returns VBox status code.
2550 * @param pVM The VM to operate on.
2551 * @param pPatch Patch record
2552 */
2553static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2554{
2555#ifdef DEBUG
2556 DISCPUSTATE cpu;
2557 char szOutput[256];
2558 uint32_t opsize, i = 0;
2559 bool disret;
2560
2561 while(i < pPatch->cbPrivInstr)
2562 {
2563 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2564 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
2565 if (disret == false)
2566 break;
2567
2568 Log(("Org patch jump: %s", szOutput));
2569 Assert(opsize);
2570 i += opsize;
2571 }
2572#endif
2573
2574 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2575 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2576#ifdef DEBUG
2577 if (rc == VINF_SUCCESS)
2578 {
2579 DISCPUSTATE cpu;
2580 char szOutput[256];
2581 uint32_t opsize, i = 0;
2582 bool disret;
2583
2584 while(i < pPatch->cbPrivInstr)
2585 {
2586 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2587 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
2588 if (disret == false)
2589 break;
2590
2591 Log(("Org instr: %s", szOutput));
2592 Assert(opsize);
2593 i += opsize;
2594 }
2595 }
2596#endif
2597 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2598 return rc;
2599}
2600
2601/**
2602 * Generate the call from guest to patch code
2603 *
2604 * @returns VBox status code.
2605 * @param pVM The VM to operate on.
2606 * @param pPatch Patch record
2607 */
2608static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, bool fAddFixup = true)
2609{
2610 uint8_t temp[8];
2611 uint8_t *pPB;
2612 int rc;
2613
2614 Assert(pPatch->cbPatchJump <= sizeof(temp));
2615
2616 pPB = pPatch->pPrivInstrHC;
2617
2618 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2619
2620 // jmp [PatchCode]
2621 if (fAddFixup)
2622 {
2623 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2624 {
2625 Log(("Relocation failed for the jump in the guest code!!\n"));
2626 return VERR_PATCHING_REFUSED;
2627 }
2628 }
2629
2630 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2631 temp[0] = pPatch->aPrivInstr[0];
2632 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2633
2634 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2635 AssertRC(rc);
2636
2637 return rc;
2638}
2639
2640
2641/**
2642 * Patch cli/sti pushf/popf instruction block at specified location
2643 *
2644 * @returns VBox status code.
2645 * @param pVM The VM to operate on.
2646 * @param pInstrGC Guest context point to privileged instruction
2647 * @param pInstrHC Host context point to privileged instruction
2648 * @param uOpcode Instruction opcode
2649 * @param uOpSize Size of starting instruction
2650 * @param pPatchRec Patch record
2651 *
2652 * @note returns failure if patching is not allowed or possible
2653 *
2654 */
2655VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2656 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2657{
2658 PPATCHINFO pPatch = &pPatchRec->patch;
2659 int rc = VERR_PATCHING_REFUSED;
2660 DISCPUSTATE cpu;
2661 uint32_t orgOffsetPatchMem = ~0;
2662 RTRCPTR pInstrStart;
2663#ifdef LOG_ENABLED
2664 uint32_t opsize;
2665 char szOutput[256];
2666 bool disret;
2667#endif
2668
2669 /* Save original offset (in case of failures later on) */
2670 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2671 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2672
2673 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2674 switch (uOpcode)
2675 {
2676 case OP_MOV:
2677 break;
2678
2679 case OP_CLI:
2680 case OP_PUSHF:
2681 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2682 /** @note special precautions are taken when disabling and enabling such patches. */
2683 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2684 break;
2685
2686 default:
2687 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2688 {
2689 AssertMsg(0, ("PATMR3PatchBlock: Invalid opcode %x\n", uOpcode));
2690 return VERR_INVALID_PARAMETER;
2691 }
2692 }
2693
2694 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2695 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2696
2697 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2698 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2699 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2700 )
2701 {
2702 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2703#ifdef DEBUG_sandervl
2704//// AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
2705#endif
2706 rc = VERR_PATCHING_REFUSED;
2707 goto failure;
2708 }
2709
2710 pPatch->nrPatch2GuestRecs = 0;
2711 pInstrStart = pInstrGC;
2712
2713#ifdef PATM_ENABLE_CALL
2714 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2715#endif
2716
2717 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2718 pPatch->uCurPatchOffset = 0;
2719
2720 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2721
2722 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2723 {
2724 Assert(pPatch->flags & PATMFL_INTHANDLER);
2725
2726 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2727 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2728 if (RT_FAILURE(rc))
2729 goto failure;
2730 }
2731
2732 /***************************************************************************************************************************/
2733 /** @note We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2734 /***************************************************************************************************************************/
2735#ifdef VBOX_WITH_STATISTICS
2736 if (!(pPatch->flags & PATMFL_SYSENTER))
2737 {
2738 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2739 if (RT_FAILURE(rc))
2740 goto failure;
2741 }
2742#endif
2743
2744 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pPatch);
2745 if (rc != VINF_SUCCESS)
2746 {
2747 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2748 goto failure;
2749 }
2750
2751 /* Calculated during analysis. */
2752 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2753 {
2754 /* Most likely cause: we encountered an illegal instruction very early on. */
2755 /** @todo could turn it into an int3 callable patch. */
2756 Log(("PATMR3PatchBlock: patch block too small -> refuse\n"));
2757 rc = VERR_PATCHING_REFUSED;
2758 goto failure;
2759 }
2760
2761 /* size of patch block */
2762 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2763
2764
2765 /* Update free pointer in patch memory. */
2766 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2767 /* Round to next 8 byte boundary. */
2768 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2769
2770 /*
2771 * Insert into patch to guest lookup tree
2772 */
2773 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2774 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2775 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2776 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2777 if (!rc)
2778 {
2779 rc = VERR_PATCHING_REFUSED;
2780 goto failure;
2781 }
2782
2783 /* Note that patmr3SetBranchTargets can install additional patches!! */
2784 rc = patmr3SetBranchTargets(pVM, pPatch);
2785 if (rc != VINF_SUCCESS)
2786 {
2787 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2788 goto failure;
2789 }
2790
2791#ifdef LOG_ENABLED
2792 Log(("Patch code ----------------------------------------------------------\n"));
2793 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
2794 Log(("Patch code ends -----------------------------------------------------\n"));
2795#endif
2796
2797 /* make a copy of the guest code bytes that will be overwritten */
2798 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2799
2800 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2801 AssertRC(rc);
2802
2803 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2804 {
2805 /*uint8_t ASMInt3 = 0xCC; - unused */
2806
2807 Log(("PATMR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2808 /* Replace first opcode byte with 'int 3'. */
2809 rc = patmActivateInt3Patch(pVM, pPatch);
2810 if (RT_FAILURE(rc))
2811 goto failure;
2812
2813 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2814 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2815
2816 pPatch->flags &= ~PATMFL_INSTR_HINT;
2817 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2818 }
2819 else
2820 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2821 {
2822 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2823 /* now insert a jump in the guest code */
2824 rc = patmGenJumpToPatch(pVM, pPatch, true);
2825 AssertRC(rc);
2826 if (RT_FAILURE(rc))
2827 goto failure;
2828
2829 }
2830
2831#ifdef LOG_ENABLED
2832 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2833 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2834 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
2835#endif
2836
2837 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2838 pPatch->pTempInfo->nrIllegalInstr = 0;
2839
2840 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2841
2842 pPatch->uState = PATCH_ENABLED;
2843 return VINF_SUCCESS;
2844
2845failure:
2846 if (pPatchRec->CoreOffset.Key)
2847 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
2848
2849 patmEmptyTree(pVM, &pPatch->FixupTree);
2850 pPatch->nrFixups = 0;
2851
2852 patmEmptyTree(pVM, &pPatch->JumpTree);
2853 pPatch->nrJumpRecs = 0;
2854
2855 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2856 pPatch->pTempInfo->nrIllegalInstr = 0;
2857
2858 /* Turn this cli patch into a dummy. */
2859 pPatch->uState = PATCH_REFUSED;
2860 pPatch->pPatchBlockOffset = 0;
2861
2862 // Give back the patch memory we no longer need
2863 Assert(orgOffsetPatchMem != (uint32_t)~0);
2864 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2865
2866 return rc;
2867}
2868
2869/**
2870 * Patch IDT handler
2871 *
2872 * @returns VBox status code.
2873 * @param pVM The VM to operate on.
2874 * @param pInstrGC Guest context point to privileged instruction
2875 * @param pInstrHC Host context point to privileged instruction
2876 * @param uOpSize Size of starting instruction
2877 * @param pPatchRec Patch record
2878 *
2879 * @note returns failure if patching is not allowed or possible
2880 *
2881 */
2882static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2883 uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2884{
2885 PPATCHINFO pPatch = &pPatchRec->patch;
2886 bool disret;
2887 DISCPUSTATE cpuPush, cpuJmp;
2888 uint32_t opsize;
2889 RTRCPTR pCurInstrGC = pInstrGC;
2890 uint8_t *pCurInstrHC = pInstrHC;
2891 uint32_t orgOffsetPatchMem = ~0;
2892
2893 /*
2894 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
2895 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
2896 * condition here and only patch the common entypoint once.
2897 */
2898 cpuPush.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2899 disret = PATMR3DISInstr(pVM, pPatch, &cpuPush, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2900 Assert(disret);
2901 if (disret && cpuPush.pCurInstr->opcode == OP_PUSH)
2902 {
2903 RTRCPTR pJmpInstrGC;
2904 int rc;
2905
2906 pCurInstrGC += opsize;
2907 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2908
2909 cpuJmp.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2910 disret = PATMR3DISInstr(pVM, pPatch, &cpuJmp, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2911 if ( disret
2912 && cpuJmp.pCurInstr->opcode == OP_JMP
2913 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
2914 )
2915 {
2916 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2917 if (pJmpPatch == 0)
2918 {
2919 /* Patch it first! */
2920 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
2921 if (rc != VINF_SUCCESS)
2922 goto failure;
2923 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2924 Assert(pJmpPatch);
2925 }
2926 if (pJmpPatch->patch.uState != PATCH_ENABLED)
2927 goto failure;
2928
2929 /* save original offset (in case of failures later on) */
2930 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2931
2932 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2933 pPatch->uCurPatchOffset = 0;
2934 pPatch->nrPatch2GuestRecs = 0;
2935
2936#ifdef VBOX_WITH_STATISTICS
2937 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2938 if (RT_FAILURE(rc))
2939 goto failure;
2940#endif
2941
2942 /* Install fake cli patch (to clear the virtual IF) */
2943 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2944 if (RT_FAILURE(rc))
2945 goto failure;
2946
2947 /* Add lookup record for patch to guest address translation (for the push) */
2948 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
2949
2950 /* Duplicate push. */
2951 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
2952 if (RT_FAILURE(rc))
2953 goto failure;
2954
2955 /* Generate jump to common entrypoint. */
2956 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
2957 if (RT_FAILURE(rc))
2958 goto failure;
2959
2960 /* size of patch block */
2961 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2962
2963 /* Update free pointer in patch memory. */
2964 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2965 /* Round to next 8 byte boundary */
2966 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2967
2968 /* There's no jump from guest to patch code. */
2969 pPatch->cbPatchJump = 0;
2970
2971
2972#ifdef LOG_ENABLED
2973 Log(("Patch code ----------------------------------------------------------\n"));
2974 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
2975 Log(("Patch code ends -----------------------------------------------------\n"));
2976#endif
2977 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
2978
2979 /*
2980 * Insert into patch to guest lookup tree
2981 */
2982 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2983 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2984 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2985 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2986
2987 pPatch->uState = PATCH_ENABLED;
2988
2989 return VINF_SUCCESS;
2990 }
2991 }
2992failure:
2993 /* Give back the patch memory we no longer need */
2994 if (orgOffsetPatchMem != (uint32_t)~0)
2995 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2996
2997 return PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
2998}
2999
3000/**
3001 * Install a trampoline to call a guest trap handler directly
3002 *
3003 * @returns VBox status code.
3004 * @param pVM The VM to operate on.
3005 * @param pInstrGC Guest context point to privileged instruction
3006 * @param pPatchRec Patch record
3007 *
3008 */
3009static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3010{
3011 PPATCHINFO pPatch = &pPatchRec->patch;
3012 int rc = VERR_PATCHING_REFUSED;
3013 uint32_t orgOffsetPatchMem = ~0;
3014#ifdef LOG_ENABLED
3015 bool disret;
3016 DISCPUSTATE cpu;
3017 uint32_t opsize;
3018 char szOutput[256];
3019#endif
3020
3021 // save original offset (in case of failures later on)
3022 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3023
3024 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3025 pPatch->uCurPatchOffset = 0;
3026 pPatch->nrPatch2GuestRecs = 0;
3027
3028#ifdef VBOX_WITH_STATISTICS
3029 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3030 if (RT_FAILURE(rc))
3031 goto failure;
3032#endif
3033
3034 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3035 if (RT_FAILURE(rc))
3036 goto failure;
3037
3038 /* size of patch block */
3039 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3040
3041 /* Update free pointer in patch memory. */
3042 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3043 /* Round to next 8 byte boundary */
3044 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3045
3046 /* There's no jump from guest to patch code. */
3047 pPatch->cbPatchJump = 0;
3048
3049#ifdef LOG_ENABLED
3050 Log(("Patch code ----------------------------------------------------------\n"));
3051 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
3052 Log(("Patch code ends -----------------------------------------------------\n"));
3053#endif
3054
3055#ifdef LOG_ENABLED
3056 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3057 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3058 Log(("TRAP handler patch: %s", szOutput));
3059#endif
3060 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3061
3062 /*
3063 * Insert into patch to guest lookup tree
3064 */
3065 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3066 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3067 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3068 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3069
3070 pPatch->uState = PATCH_ENABLED;
3071 return VINF_SUCCESS;
3072
3073failure:
3074 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3075
3076 /* Turn this cli patch into a dummy. */
3077 pPatch->uState = PATCH_REFUSED;
3078 pPatch->pPatchBlockOffset = 0;
3079
3080 /* Give back the patch memory we no longer need */
3081 Assert(orgOffsetPatchMem != (uint32_t)~0);
3082 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3083
3084 return rc;
3085}
3086
3087
3088#ifdef LOG_ENABLED
3089/**
3090 * Check if the instruction is patched as a common idt handler
3091 *
3092 * @returns true or false
3093 * @param pVM The VM to operate on.
3094 * @param pInstrGC Guest context point to the instruction
3095 *
3096 */
3097static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3098{
3099 PPATMPATCHREC pRec;
3100
3101 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3102 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3103 return true;
3104 return false;
3105}
3106#endif //DEBUG
3107
3108
3109/**
3110 * Duplicates a complete function
3111 *
3112 * @returns VBox status code.
3113 * @param pVM The VM to operate on.
3114 * @param pInstrGC Guest context point to privileged instruction
3115 * @param pPatchRec Patch record
3116 *
3117 */
3118static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3119{
3120 PPATCHINFO pPatch = &pPatchRec->patch;
3121 int rc = VERR_PATCHING_REFUSED;
3122 DISCPUSTATE cpu;
3123 uint32_t orgOffsetPatchMem = ~0;
3124
3125 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3126 /* Save original offset (in case of failures later on). */
3127 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3128
3129 /* We will not go on indefinitely with call instruction handling. */
3130 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3131 {
3132 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3133 return VERR_PATCHING_REFUSED;
3134 }
3135
3136 pVM->patm.s.ulCallDepth++;
3137
3138#ifdef PATM_ENABLE_CALL
3139 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3140#endif
3141
3142 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3143
3144 pPatch->nrPatch2GuestRecs = 0;
3145 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3146 pPatch->uCurPatchOffset = 0;
3147
3148 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3149
3150 /** @note Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3151 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3152 if (RT_FAILURE(rc))
3153 goto failure;
3154
3155#ifdef VBOX_WITH_STATISTICS
3156 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3157 if (RT_FAILURE(rc))
3158 goto failure;
3159#endif
3160 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pPatch);
3161 if (rc != VINF_SUCCESS)
3162 {
3163 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3164 goto failure;
3165 }
3166
3167 //size of patch block
3168 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3169
3170 //update free pointer in patch memory
3171 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3172 /* Round to next 8 byte boundary. */
3173 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3174
3175 pPatch->uState = PATCH_ENABLED;
3176
3177 /*
3178 * Insert into patch to guest lookup tree
3179 */
3180 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3181 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3182 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3183 AssertMsg(rc, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3184 if (!rc)
3185 {
3186 rc = VERR_PATCHING_REFUSED;
3187 goto failure;
3188 }
3189
3190 /* Note that patmr3SetBranchTargets can install additional patches!! */
3191 rc = patmr3SetBranchTargets(pVM, pPatch);
3192 if (rc != VINF_SUCCESS)
3193 {
3194 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3195 goto failure;
3196 }
3197
3198#ifdef LOG_ENABLED
3199 Log(("Patch code ----------------------------------------------------------\n"));
3200 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
3201 Log(("Patch code ends -----------------------------------------------------\n"));
3202#endif
3203
3204 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3205
3206 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3207 pPatch->pTempInfo->nrIllegalInstr = 0;
3208
3209 pVM->patm.s.ulCallDepth--;
3210 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3211 return VINF_SUCCESS;
3212
3213failure:
3214 if (pPatchRec->CoreOffset.Key)
3215 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3216
3217 patmEmptyTree(pVM, &pPatch->FixupTree);
3218 pPatch->nrFixups = 0;
3219
3220 patmEmptyTree(pVM, &pPatch->JumpTree);
3221 pPatch->nrJumpRecs = 0;
3222
3223 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3224 pPatch->pTempInfo->nrIllegalInstr = 0;
3225
3226 /* Turn this cli patch into a dummy. */
3227 pPatch->uState = PATCH_REFUSED;
3228 pPatch->pPatchBlockOffset = 0;
3229
3230 // Give back the patch memory we no longer need
3231 Assert(orgOffsetPatchMem != (uint32_t)~0);
3232 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3233
3234 pVM->patm.s.ulCallDepth--;
3235 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3236 return rc;
3237}
3238
3239/**
3240 * Creates trampoline code to jump inside an existing patch
3241 *
3242 * @returns VBox status code.
3243 * @param pVM The VM to operate on.
3244 * @param pInstrGC Guest context point to privileged instruction
3245 * @param pPatchRec Patch record
3246 *
3247 */
3248static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3249{
3250 PPATCHINFO pPatch = &pPatchRec->patch;
3251 RTRCPTR pPage, pPatchTargetGC = 0;
3252 uint32_t orgOffsetPatchMem = ~0;
3253 int rc = VERR_PATCHING_REFUSED;
3254
3255 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3256 /* Save original offset (in case of failures later on). */
3257 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3258
3259 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3260 /** @todo we already checked this before */
3261 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3262
3263 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3264 if (pPatchPage)
3265 {
3266 uint32_t i;
3267
3268 for (i=0;i<pPatchPage->cCount;i++)
3269 {
3270 if (pPatchPage->aPatch[i])
3271 {
3272 PPATCHINFO pPatch = pPatchPage->aPatch[i];
3273
3274 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3275 && pPatch->uState == PATCH_ENABLED)
3276 {
3277 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pInstrGC);
3278 if (pPatchTargetGC)
3279 {
3280 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3281 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, offsetPatch, false);
3282 Assert(pPatchToGuestRec);
3283
3284 pPatchToGuestRec->fJumpTarget = true;
3285 Assert(pPatchTargetGC != pPatch->pPrivInstrGC);
3286 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv\n", pPatch->pPrivInstrGC));
3287 pPatch->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3288 break;
3289 }
3290 }
3291 }
3292 }
3293 }
3294 AssertReturn(pPatchPage && pPatchTargetGC, VERR_PATCHING_REFUSED);
3295
3296 pPatch->nrPatch2GuestRecs = 0;
3297 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3298 pPatch->uCurPatchOffset = 0;
3299
3300 /** @note Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3301 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3302 if (RT_FAILURE(rc))
3303 goto failure;
3304
3305#ifdef VBOX_WITH_STATISTICS
3306 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3307 if (RT_FAILURE(rc))
3308 goto failure;
3309#endif
3310
3311 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3312 if (RT_FAILURE(rc))
3313 goto failure;
3314
3315 /*
3316 * Insert into patch to guest lookup tree
3317 */
3318 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3319 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3320 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3321 AssertMsg(rc, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3322 if (!rc)
3323 {
3324 rc = VERR_PATCHING_REFUSED;
3325 goto failure;
3326 }
3327
3328 /* size of patch block */
3329 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3330
3331 /* Update free pointer in patch memory. */
3332 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3333 /* Round to next 8 byte boundary */
3334 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3335
3336 /* There's no jump from guest to patch code. */
3337 pPatch->cbPatchJump = 0;
3338
3339 /* Enable the patch. */
3340 pPatch->uState = PATCH_ENABLED;
3341 /* We allow this patch to be called as a function. */
3342 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3343 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3344 return VINF_SUCCESS;
3345
3346failure:
3347 if (pPatchRec->CoreOffset.Key)
3348 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3349
3350 patmEmptyTree(pVM, &pPatch->FixupTree);
3351 pPatch->nrFixups = 0;
3352
3353 patmEmptyTree(pVM, &pPatch->JumpTree);
3354 pPatch->nrJumpRecs = 0;
3355
3356 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3357 pPatch->pTempInfo->nrIllegalInstr = 0;
3358
3359 /* Turn this cli patch into a dummy. */
3360 pPatch->uState = PATCH_REFUSED;
3361 pPatch->pPatchBlockOffset = 0;
3362
3363 // Give back the patch memory we no longer need
3364 Assert(orgOffsetPatchMem != (uint32_t)~0);
3365 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3366
3367 return rc;
3368}
3369
3370
3371/**
3372 * Patch branch target function for call/jump at specified location.
3373 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3374 *
3375 * @returns VBox status code.
3376 * @param pVM The VM to operate on.
3377 * @param pCtx Guest context
3378 *
3379 */
3380VMMR3DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3381{
3382 RTRCPTR pBranchTarget, pPage;
3383 int rc;
3384 RTRCPTR pPatchTargetGC = 0;
3385
3386 pBranchTarget = pCtx->edx;
3387 pBranchTarget = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3388
3389 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3390 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3391
3392 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3393 if (pPatchPage)
3394 {
3395 uint32_t i;
3396
3397 for (i=0;i<pPatchPage->cCount;i++)
3398 {
3399 if (pPatchPage->aPatch[i])
3400 {
3401 PPATCHINFO pPatch = pPatchPage->aPatch[i];
3402
3403 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3404 && pPatch->uState == PATCH_ENABLED)
3405 {
3406 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3407 if (pPatchTargetGC)
3408 {
3409 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3410 break;
3411 }
3412 }
3413 }
3414 }
3415 }
3416
3417 if (pPatchTargetGC)
3418 {
3419 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3420 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3421 }
3422 else
3423 {
3424 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3425 }
3426
3427 if (rc == VINF_SUCCESS)
3428 {
3429 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3430 Assert(pPatchTargetGC);
3431 }
3432
3433 if (pPatchTargetGC)
3434 {
3435 pCtx->eax = pPatchTargetGC;
3436 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3437 }
3438 else
3439 {
3440 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3441 pCtx->eax = 0;
3442 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3443 }
3444 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3445 rc = PATMAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3446 AssertRC(rc);
3447
3448 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3449 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3450 return VINF_SUCCESS;
3451}
3452
3453/**
3454 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3455 *
3456 * @returns VBox status code.
3457 * @param pVM The VM to operate on.
3458 * @param pCpu Disassembly CPU structure ptr
3459 * @param pInstrGC Guest context point to privileged instruction
3460 * @param pPatch Patch record
3461 *
3462 */
3463static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3464{
3465 int rc = VERR_PATCHING_REFUSED;
3466 DISCPUSTATE cpu;
3467 RTRCPTR pTargetGC;
3468 PPATMPATCHREC pPatchFunction;
3469 uint32_t opsize;
3470 bool disret;
3471#ifdef LOG_ENABLED
3472 char szOutput[256];
3473#endif
3474
3475 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3476 Assert((pCpu->pCurInstr->opcode == OP_CALL || pCpu->pCurInstr->opcode == OP_JMP) && pCpu->opsize == SIZEOF_NEARJUMP32);
3477
3478 if ((pCpu->pCurInstr->opcode != OP_CALL && pCpu->pCurInstr->opcode != OP_JMP) || pCpu->opsize != SIZEOF_NEARJUMP32)
3479 {
3480 rc = VERR_PATCHING_REFUSED;
3481 goto failure;
3482 }
3483
3484 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3485 if (pTargetGC == 0)
3486 {
3487 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
3488 rc = VERR_PATCHING_REFUSED;
3489 goto failure;
3490 }
3491
3492 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3493 if (pPatchFunction == NULL)
3494 {
3495 for(;;)
3496 {
3497 /* It could be an indirect call (call -> jmp dest).
3498 * Note that it's dangerous to assume the jump will never change...
3499 */
3500 uint8_t *pTmpInstrHC;
3501
3502 pTmpInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pTargetGC);
3503 Assert(pTmpInstrHC);
3504 if (pTmpInstrHC == 0)
3505 break;
3506
3507 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3508 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pTargetGC, pTmpInstrHC, &opsize, NULL);
3509 if (disret == false || cpu.pCurInstr->opcode != OP_JMP)
3510 break;
3511
3512 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3513 if (pTargetGC == 0)
3514 {
3515 break;
3516 }
3517
3518 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3519 break;
3520 }
3521 if (pPatchFunction == 0)
3522 {
3523 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3524 rc = VERR_PATCHING_REFUSED;
3525 goto failure;
3526 }
3527 }
3528
3529 // make a copy of the guest code bytes that will be overwritten
3530 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3531
3532 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3533 AssertRC(rc);
3534
3535 /* Now replace the original call in the guest code */
3536 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), true);
3537 AssertRC(rc);
3538 if (RT_FAILURE(rc))
3539 goto failure;
3540
3541 /* Lowest and highest address for write monitoring. */
3542 pPatch->pInstrGCLowest = pInstrGC;
3543 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3544
3545#ifdef LOG_ENABLED
3546 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3547 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3548 Log(("Call patch: %s", szOutput));
3549#endif
3550
3551 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3552
3553 pPatch->uState = PATCH_ENABLED;
3554 return VINF_SUCCESS;
3555
3556failure:
3557 /* Turn this patch into a dummy. */
3558 pPatch->uState = PATCH_REFUSED;
3559
3560 return rc;
3561}
3562
3563/**
3564 * Replace the address in an MMIO instruction with the cached version.
3565 *
3566 * @returns VBox status code.
3567 * @param pVM The VM to operate on.
3568 * @param pInstrGC Guest context point to privileged instruction
3569 * @param pCpu Disassembly CPU structure ptr
3570 * @param pPatch Patch record
3571 *
3572 * @note returns failure if patching is not allowed or possible
3573 *
3574 */
3575static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3576{
3577 uint8_t *pPB;
3578 int rc = VERR_PATCHING_REFUSED;
3579#ifdef LOG_ENABLED
3580 DISCPUSTATE cpu;
3581 uint32_t opsize;
3582 bool disret;
3583 char szOutput[256];
3584#endif
3585
3586 Assert(pVM->patm.s.mmio.pCachedData);
3587 if (!pVM->patm.s.mmio.pCachedData)
3588 goto failure;
3589
3590 if (pCpu->param2.flags != USE_DISPLACEMENT32)
3591 goto failure;
3592
3593 pPB = pPatch->pPrivInstrHC;
3594
3595 /* Add relocation record for cached data access. */
3596 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3597 {
3598 Log(("Relocation failed for cached mmio address!!\n"));
3599 return VERR_PATCHING_REFUSED;
3600 }
3601#ifdef LOG_ENABLED
3602 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3603 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3604 Log(("MMIO patch old instruction: %s", szOutput));
3605#endif
3606
3607 /* Save original instruction. */
3608 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3609 AssertRC(rc);
3610
3611 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3612
3613 /* Replace address with that of the cached item. */
3614 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->opsize - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3615 AssertRC(rc);
3616 if (RT_FAILURE(rc))
3617 {
3618 goto failure;
3619 }
3620
3621#ifdef LOG_ENABLED
3622 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3623 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3624 Log(("MMIO patch: %s", szOutput));
3625#endif
3626 pVM->patm.s.mmio.pCachedData = 0;
3627 pVM->patm.s.mmio.GCPhys = 0;
3628 pPatch->uState = PATCH_ENABLED;
3629 return VINF_SUCCESS;
3630
3631failure:
3632 /* Turn this patch into a dummy. */
3633 pPatch->uState = PATCH_REFUSED;
3634
3635 return rc;
3636}
3637
3638
3639/**
3640 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3641 *
3642 * @returns VBox status code.
3643 * @param pVM The VM to operate on.
3644 * @param pInstrGC Guest context point to privileged instruction
3645 * @param pPatch Patch record
3646 *
3647 * @note returns failure if patching is not allowed or possible
3648 *
3649 */
3650static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3651{
3652 DISCPUSTATE cpu;
3653 uint32_t opsize;
3654 bool disret;
3655 uint8_t *pInstrHC;
3656#ifdef LOG_ENABLED
3657 char szOutput[256];
3658#endif
3659
3660 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3661
3662 /* Convert GC to HC address. */
3663 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3664 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3665
3666 /* Disassemble mmio instruction. */
3667 cpu.mode = pPatch->uOpMode;
3668 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
3669 if (disret == false)
3670 {
3671 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3672 return VERR_PATCHING_REFUSED;
3673 }
3674
3675 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
3676 if (opsize > MAX_INSTR_SIZE)
3677 return VERR_PATCHING_REFUSED;
3678 if (cpu.param2.flags != USE_DISPLACEMENT32)
3679 return VERR_PATCHING_REFUSED;
3680
3681 /* Add relocation record for cached data access. */
3682 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3683 {
3684 Log(("Relocation failed for cached mmio address!!\n"));
3685 return VERR_PATCHING_REFUSED;
3686 }
3687 /* Replace address with that of the cached item. */
3688 *(RTRCPTR *)&pInstrHC[cpu.opsize - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3689
3690 /* Lowest and highest address for write monitoring. */
3691 pPatch->pInstrGCLowest = pInstrGC;
3692 pPatch->pInstrGCHighest = pInstrGC + cpu.opsize;
3693
3694#ifdef LOG_ENABLED
3695 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3696 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3697 Log(("MMIO patch: %s", szOutput));
3698#endif
3699
3700 pVM->patm.s.mmio.pCachedData = 0;
3701 pVM->patm.s.mmio.GCPhys = 0;
3702 return VINF_SUCCESS;
3703}
3704
3705/**
3706 * Activates an int3 patch
3707 *
3708 * @returns VBox status code.
3709 * @param pVM The VM to operate on.
3710 * @param pPatch Patch record
3711 */
3712static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3713{
3714 uint8_t ASMInt3 = 0xCC;
3715 int rc;
3716
3717 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3718 Assert(pPatch->uState != PATCH_ENABLED);
3719
3720 /* Replace first opcode byte with 'int 3'. */
3721 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &ASMInt3, sizeof(ASMInt3));
3722 AssertRC(rc);
3723
3724 pPatch->cbPatchJump = sizeof(ASMInt3);
3725
3726 return rc;
3727}
3728
3729/**
3730 * Deactivates an int3 patch
3731 *
3732 * @returns VBox status code.
3733 * @param pVM The VM to operate on.
3734 * @param pPatch Patch record
3735 */
3736static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3737{
3738 uint8_t ASMInt3 = 0xCC;
3739 int rc;
3740
3741 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3742 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3743
3744 /* Restore first opcode byte. */
3745 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3746 AssertRC(rc);
3747 return rc;
3748}
3749
3750/**
3751 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically in the guest context.
3752 *
3753 * @returns VBox status code.
3754 * @param pVM The VM to operate on.
3755 * @param pInstrGC Guest context point to privileged instruction
3756 * @param pInstrHC Host context point to privileged instruction
3757 * @param pCpu Disassembly CPU structure ptr
3758 * @param pPatch Patch record
3759 *
3760 * @note returns failure if patching is not allowed or possible
3761 *
3762 */
3763VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3764{
3765 uint8_t ASMInt3 = 0xCC;
3766 int rc;
3767
3768 /** @note Do not use patch memory here! It might called during patch installation too. */
3769
3770#ifdef LOG_ENABLED
3771 DISCPUSTATE cpu;
3772 char szOutput[256];
3773 uint32_t opsize;
3774
3775 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3776 PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3777 Log(("PATMR3PatchInstrInt3: %s", szOutput));
3778#endif
3779
3780 /* Save the original instruction. */
3781 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3782 AssertRC(rc);
3783 pPatch->cbPatchJump = sizeof(ASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3784
3785 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3786
3787 /* Replace first opcode byte with 'int 3'. */
3788 rc = patmActivateInt3Patch(pVM, pPatch);
3789 if (RT_FAILURE(rc))
3790 goto failure;
3791
3792 /* Lowest and highest address for write monitoring. */
3793 pPatch->pInstrGCLowest = pInstrGC;
3794 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3795
3796 pPatch->uState = PATCH_ENABLED;
3797 return VINF_SUCCESS;
3798
3799failure:
3800 /* Turn this patch into a dummy. */
3801 return VERR_PATCHING_REFUSED;
3802}
3803
3804#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3805/**
3806 * Patch a jump instruction at specified location
3807 *
3808 * @returns VBox status code.
3809 * @param pVM The VM to operate on.
3810 * @param pInstrGC Guest context point to privileged instruction
3811 * @param pInstrHC Host context point to privileged instruction
3812 * @param pCpu Disassembly CPU structure ptr
3813 * @param pPatchRec Patch record
3814 *
3815 * @note returns failure if patching is not allowed or possible
3816 *
3817 */
3818int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3819{
3820 PPATCHINFO pPatch = &pPatchRec->patch;
3821 int rc = VERR_PATCHING_REFUSED;
3822#ifdef LOG_ENABLED
3823 bool disret;
3824 DISCPUSTATE cpu;
3825 uint32_t opsize;
3826 char szOutput[256];
3827#endif
3828
3829 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3830 pPatch->uCurPatchOffset = 0;
3831 pPatch->cbPatchBlockSize = 0;
3832 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3833
3834 /*
3835 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3836 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3837 */
3838 switch (pCpu->pCurInstr->opcode)
3839 {
3840 case OP_JO:
3841 case OP_JNO:
3842 case OP_JC:
3843 case OP_JNC:
3844 case OP_JE:
3845 case OP_JNE:
3846 case OP_JBE:
3847 case OP_JNBE:
3848 case OP_JS:
3849 case OP_JNS:
3850 case OP_JP:
3851 case OP_JNP:
3852 case OP_JL:
3853 case OP_JNL:
3854 case OP_JLE:
3855 case OP_JNLE:
3856 case OP_JMP:
3857 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3858 Assert(pCpu->param1.flags & USE_IMMEDIATE32_REL);
3859 if (!(pCpu->param1.flags & USE_IMMEDIATE32_REL))
3860 goto failure;
3861
3862 Assert(pCpu->opsize == SIZEOF_NEARJUMP32 || pCpu->opsize == SIZEOF_NEAR_COND_JUMP32);
3863 if (pCpu->opsize != SIZEOF_NEARJUMP32 && pCpu->opsize != SIZEOF_NEAR_COND_JUMP32)
3864 goto failure;
3865
3866 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->opsize))
3867 {
3868 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
3869 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
3870 rc = VERR_PATCHING_REFUSED;
3871 goto failure;
3872 }
3873
3874 break;
3875
3876 default:
3877 goto failure;
3878 }
3879
3880 // make a copy of the guest code bytes that will be overwritten
3881 Assert(pCpu->opsize <= sizeof(pPatch->aPrivInstr));
3882 Assert(pCpu->opsize >= SIZEOF_NEARJUMP32);
3883 pPatch->cbPatchJump = pCpu->opsize;
3884
3885 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3886 AssertRC(rc);
3887
3888 /* Now insert a jump in the guest code. */
3889 /*
3890 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
3891 * references the target instruction in the conflict patch.
3892 */
3893 RTRCPTR pJmpDest = PATMR3GuestGCPtrToPatchGCPtr(pVM, pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval);
3894
3895 AssertMsg(pJmpDest, ("PATMR3GuestGCPtrToPatchGCPtr failed for %RRv\n", pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval));
3896 pPatch->pPatchJumpDestGC = pJmpDest;
3897
3898 rc = patmGenJumpToPatch(pVM, pPatch, true);
3899 AssertRC(rc);
3900 if (RT_FAILURE(rc))
3901 goto failure;
3902
3903 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
3904
3905#ifdef LOG_ENABLED
3906 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3907 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3908 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
3909#endif
3910
3911 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3912
3913 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
3914
3915 /* Lowest and highest address for write monitoring. */
3916 pPatch->pInstrGCLowest = pInstrGC;
3917 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
3918
3919 pPatch->uState = PATCH_ENABLED;
3920 return VINF_SUCCESS;
3921
3922failure:
3923 /* Turn this cli patch into a dummy. */
3924 pPatch->uState = PATCH_REFUSED;
3925
3926 return rc;
3927}
3928#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
3929
3930
3931/**
3932 * Gives hint to PATM about supervisor guest instructions
3933 *
3934 * @returns VBox status code.
3935 * @param pVM The VM to operate on.
3936 * @param pInstr Guest context point to privileged instruction
3937 * @param flags Patch flags
3938 */
3939VMMR3DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
3940{
3941 Assert(pInstrGC);
3942 Assert(flags == PATMFL_CODE32);
3943
3944 Log(("PATMR3AddHint %RRv\n", pInstrGC));
3945 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
3946}
3947
3948/**
3949 * Patch privileged instruction at specified location
3950 *
3951 * @returns VBox status code.
3952 * @param pVM The VM to operate on.
3953 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
3954 * @param flags Patch flags
3955 *
3956 * @note returns failure if patching is not allowed or possible
3957 */
3958VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
3959{
3960 DISCPUSTATE cpu;
3961 R3PTRTYPE(uint8_t *) pInstrHC;
3962 uint32_t opsize;
3963 PPATMPATCHREC pPatchRec;
3964 PCPUMCTX pCtx = 0;
3965 bool disret;
3966 int rc;
3967 PVMCPU pVCpu = VMMGetCpu0(pVM);
3968
3969 if (!pVM || pInstrGC == 0 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
3970 {
3971 AssertFailed();
3972 return VERR_INVALID_PARAMETER;
3973 }
3974
3975 if (PATMIsEnabled(pVM) == false)
3976 return VERR_PATCHING_REFUSED;
3977
3978 /* Test for patch conflict only with patches that actually change guest code. */
3979 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
3980 {
3981 PPATCHINFO pConflictPatch = PATMFindActivePatchByEntrypoint(pVM, pInstrGC);
3982 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
3983 if (pConflictPatch != 0)
3984 return VERR_PATCHING_REFUSED;
3985 }
3986
3987 if (!(flags & PATMFL_CODE32))
3988 {
3989 /** @todo Only 32 bits code right now */
3990 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
3991 return VERR_NOT_IMPLEMENTED;
3992 }
3993
3994 /* We ran out of patch memory; don't bother anymore. */
3995 if (pVM->patm.s.fOutOfMemory == true)
3996 return VERR_PATCHING_REFUSED;
3997
3998 /* Make sure the code selector is wide open; otherwise refuse. */
3999 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4000 if (CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0)
4001 {
4002 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4003 if (pInstrGCFlat != pInstrGC)
4004 {
4005 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4006 return VERR_PATCHING_REFUSED;
4007 }
4008 }
4009
4010 /** @note the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4011 if (!(flags & PATMFL_GUEST_SPECIFIC))
4012 {
4013 /* New code. Make sure CSAM has a go at it first. */
4014 CSAMR3CheckCode(pVM, pInstrGC);
4015 }
4016
4017 /** @note obsolete */
4018 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4019 && (flags & PATMFL_MMIO_ACCESS))
4020 {
4021 RTRCUINTPTR offset;
4022 void *pvPatchCoreOffset;
4023
4024 /* Find the patch record. */
4025 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4026 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4027 if (pvPatchCoreOffset == NULL)
4028 {
4029 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4030 return VERR_PATCH_NOT_FOUND; //fatal error
4031 }
4032 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4033
4034 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4035 }
4036
4037 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4038
4039 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4040 if (pPatchRec)
4041 {
4042 Assert(!(flags & PATMFL_TRAMPOLINE));
4043
4044 /* Hints about existing patches are ignored. */
4045 if (flags & PATMFL_INSTR_HINT)
4046 return VERR_PATCHING_REFUSED;
4047
4048 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4049 {
4050 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4051 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4052 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4053 }
4054
4055 if (pPatchRec->patch.uState == PATCH_DISABLED)
4056 {
4057 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4058 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4059 {
4060 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4061 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4062 }
4063 else
4064 Log(("Enabling patch %RRv again\n", pInstrGC));
4065
4066 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4067 rc = PATMR3EnablePatch(pVM, pInstrGC);
4068 if (RT_SUCCESS(rc))
4069 return VWRN_PATCH_ENABLED;
4070
4071 return rc;
4072 }
4073 if ( pPatchRec->patch.uState == PATCH_ENABLED
4074 || pPatchRec->patch.uState == PATCH_DIRTY)
4075 {
4076 /*
4077 * The patch might have been overwritten.
4078 */
4079 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4080 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4081 {
4082 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4083 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4084 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4085 {
4086 if (flags & PATMFL_IDTHANDLER)
4087 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4088
4089 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4090 }
4091 }
4092 rc = PATMR3RemovePatch(pVM, pInstrGC);
4093 if (RT_FAILURE(rc))
4094 return VERR_PATCHING_REFUSED;
4095 }
4096 else
4097 {
4098 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4099 /* already tried it once! */
4100 return VERR_PATCHING_REFUSED;
4101 }
4102 }
4103
4104 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4105 if (RT_FAILURE(rc))
4106 {
4107 Log(("Out of memory!!!!\n"));
4108 return VERR_NO_MEMORY;
4109 }
4110 pPatchRec->Core.Key = pInstrGC;
4111 pPatchRec->patch.uState = PATCH_REFUSED; //default
4112 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4113 Assert(rc);
4114
4115 RTGCPHYS GCPhys;
4116 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4117 if (rc != VINF_SUCCESS)
4118 {
4119 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4120 return rc;
4121 }
4122 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4123 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4124 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4125 {
4126 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4127 return VERR_PATCHING_REFUSED;
4128 }
4129 GCPhys = GCPhys + (pInstrGC & PAGE_OFFSET_MASK);
4130 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, MAX_INSTR_SIZE, (void **)&pInstrHC);
4131 AssertRCReturn(rc, rc);
4132
4133 pPatchRec->patch.pPrivInstrHC = pInstrHC;
4134 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4135 pPatchRec->patch.flags = flags;
4136 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4137
4138 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4139 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4140
4141 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4142 {
4143 /*
4144 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4145 */
4146 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4147 if (pPatchNear)
4148 {
4149 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4150 {
4151 Log(("Dangerous patch; would overwrite the ususable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4152
4153 pPatchRec->patch.uState = PATCH_UNUSABLE;
4154 /*
4155 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4156 */
4157 return VERR_PATCHING_REFUSED;
4158 }
4159 }
4160 }
4161
4162 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4163 if (pPatchRec->patch.pTempInfo == 0)
4164 {
4165 Log(("Out of memory!!!!\n"));
4166 return VERR_NO_MEMORY;
4167 }
4168
4169 cpu.mode = pPatchRec->patch.uOpMode;
4170 disret = PATMR3DISInstr(pVM, &pPatchRec->patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
4171 if (disret == false)
4172 {
4173 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4174 return VERR_PATCHING_REFUSED;
4175 }
4176
4177 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
4178 if (opsize > MAX_INSTR_SIZE)
4179 {
4180 return VERR_PATCHING_REFUSED;
4181 }
4182
4183 pPatchRec->patch.cbPrivInstr = opsize;
4184 pPatchRec->patch.opcode = cpu.pCurInstr->opcode;
4185
4186 /* Restricted hinting for now. */
4187 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->opcode == OP_CLI);
4188
4189 /* Allocate statistics slot */
4190 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4191 {
4192 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4193 }
4194 else
4195 {
4196 Log(("WARNING: Patch index wrap around!!\n"));
4197 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4198 }
4199
4200 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4201 {
4202 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec);
4203 }
4204 else
4205 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4206 {
4207 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec);
4208 }
4209 else
4210 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4211 {
4212 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4213 }
4214 else
4215 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4216 {
4217 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &pPatchRec->patch);
4218 }
4219 else
4220 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4221 {
4222 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4223 }
4224 else
4225 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4226 {
4227 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &pPatchRec->patch);
4228 }
4229 else
4230 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4231 {
4232 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4233 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4234
4235 rc = patmIdtHandler(pVM, pInstrGC, pInstrHC, opsize, pPatchRec);
4236#ifdef VBOX_WITH_STATISTICS
4237 if ( rc == VINF_SUCCESS
4238 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4239 {
4240 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4241 }
4242#endif
4243 }
4244 else
4245 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4246 {
4247 switch (cpu.pCurInstr->opcode)
4248 {
4249 case OP_SYSENTER:
4250 case OP_PUSH:
4251 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4252 if (rc == VINF_SUCCESS)
4253 {
4254 if (rc == VINF_SUCCESS)
4255 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4256 return rc;
4257 }
4258 break;
4259
4260 default:
4261 rc = VERR_NOT_IMPLEMENTED;
4262 break;
4263 }
4264 }
4265 else
4266 {
4267 switch (cpu.pCurInstr->opcode)
4268 {
4269 case OP_SYSENTER:
4270 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4271 if (rc == VINF_SUCCESS)
4272 {
4273 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4274 return VINF_SUCCESS;
4275 }
4276 break;
4277
4278#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4279 case OP_JO:
4280 case OP_JNO:
4281 case OP_JC:
4282 case OP_JNC:
4283 case OP_JE:
4284 case OP_JNE:
4285 case OP_JBE:
4286 case OP_JNBE:
4287 case OP_JS:
4288 case OP_JNS:
4289 case OP_JP:
4290 case OP_JNP:
4291 case OP_JL:
4292 case OP_JNL:
4293 case OP_JLE:
4294 case OP_JNLE:
4295 case OP_JECXZ:
4296 case OP_LOOP:
4297 case OP_LOOPNE:
4298 case OP_LOOPE:
4299 case OP_JMP:
4300 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4301 {
4302 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4303 break;
4304 }
4305 return VERR_NOT_IMPLEMENTED;
4306#endif
4307
4308 case OP_PUSHF:
4309 case OP_CLI:
4310 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4311 rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->opcode, opsize, pPatchRec);
4312 break;
4313
4314 case OP_STR:
4315 case OP_SGDT:
4316 case OP_SLDT:
4317 case OP_SIDT:
4318 case OP_CPUID:
4319 case OP_LSL:
4320 case OP_LAR:
4321 case OP_SMSW:
4322 case OP_VERW:
4323 case OP_VERR:
4324 case OP_IRET:
4325 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4326 break;
4327
4328 default:
4329 return VERR_NOT_IMPLEMENTED;
4330 }
4331 }
4332
4333 if (rc != VINF_SUCCESS)
4334 {
4335 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4336 {
4337 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4338 pPatchRec->patch.nrPatch2GuestRecs = 0;
4339 }
4340 pVM->patm.s.uCurrentPatchIdx--;
4341 }
4342 else
4343 {
4344 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4345 AssertRCReturn(rc, rc);
4346
4347 /* Keep track upper and lower boundaries of patched instructions */
4348 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4349 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4350 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4351 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4352
4353 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4354 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4355
4356 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4357 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4358
4359 rc = VINF_SUCCESS;
4360
4361 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4362 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4363 {
4364 rc = PATMR3DisablePatch(pVM, pInstrGC);
4365 AssertRCReturn(rc, rc);
4366 }
4367
4368#ifdef VBOX_WITH_STATISTICS
4369 /* Register statistics counter */
4370 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4371 {
4372 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4373 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4374#ifndef DEBUG_sandervl
4375 /* Full breakdown for the GUI. */
4376 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4377 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4378 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4379 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4380 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4381 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4382 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4383 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4384 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4385 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4386 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4387 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4388 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4389 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4390 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4391 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4392#endif
4393 }
4394#endif
4395 }
4396 return rc;
4397}
4398
4399/**
4400 * Query instruction size
4401 *
4402 * @returns VBox status code.
4403 * @param pVM The VM to operate on.
4404 * @param pPatch Patch record
4405 * @param pInstrGC Instruction address
4406 */
4407static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4408{
4409 uint8_t *pInstrHC;
4410
4411 int rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pInstrGC, (PRTR3PTR)&pInstrHC);
4412 if (rc == VINF_SUCCESS)
4413 {
4414 DISCPUSTATE cpu;
4415 bool disret;
4416 uint32_t opsize;
4417
4418 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4419 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL, PATMREAD_ORGCODE | PATMREAD_NOCHECK);
4420 if (disret)
4421 return opsize;
4422 }
4423 return 0;
4424}
4425
4426/**
4427 * Add patch to page record
4428 *
4429 * @returns VBox status code.
4430 * @param pVM The VM to operate on.
4431 * @param pPage Page address
4432 * @param pPatch Patch record
4433 */
4434int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4435{
4436 PPATMPATCHPAGE pPatchPage;
4437 int rc;
4438
4439 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4440
4441 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4442 if (pPatchPage)
4443 {
4444 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4445 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4446 {
4447 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4448 PPATCHINFO *paPatchOld = pPatchPage->aPatch;
4449
4450 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4451 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4452 if (RT_FAILURE(rc))
4453 {
4454 Log(("Out of memory!!!!\n"));
4455 return VERR_NO_MEMORY;
4456 }
4457 memcpy(pPatchPage->aPatch, paPatchOld, cMaxPatchesOld*sizeof(PPATCHINFO));
4458 MMHyperFree(pVM, paPatchOld);
4459 }
4460 pPatchPage->aPatch[pPatchPage->cCount] = pPatch;
4461 pPatchPage->cCount++;
4462 }
4463 else
4464 {
4465 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4466 if (RT_FAILURE(rc))
4467 {
4468 Log(("Out of memory!!!!\n"));
4469 return VERR_NO_MEMORY;
4470 }
4471 pPatchPage->Core.Key = pPage;
4472 pPatchPage->cCount = 1;
4473 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4474
4475 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4476 if (RT_FAILURE(rc))
4477 {
4478 Log(("Out of memory!!!!\n"));
4479 MMHyperFree(pVM, pPatchPage);
4480 return VERR_NO_MEMORY;
4481 }
4482 pPatchPage->aPatch[0] = pPatch;
4483
4484 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4485 Assert(rc);
4486 pVM->patm.s.cPageRecords++;
4487
4488 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4489 }
4490 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4491
4492 /* Get the closest guest instruction (from below) */
4493 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4494 Assert(pGuestToPatchRec);
4495 if (pGuestToPatchRec)
4496 {
4497 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4498 if ( pPatchPage->pLowestAddrGC == 0
4499 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4500 {
4501 RTRCUINTPTR offset;
4502
4503 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4504
4505 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4506 /* If we're too close to the page boundary, then make sure an instruction from the previous page doesn't cross the boundary itself. */
4507 if (offset && offset < MAX_INSTR_SIZE)
4508 {
4509 /* Get the closest guest instruction (from above) */
4510 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4511
4512 if (pGuestToPatchRec)
4513 {
4514 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4515 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4516 {
4517 pPatchPage->pLowestAddrGC = pPage;
4518 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4519 }
4520 }
4521 }
4522 }
4523 }
4524
4525 /* Get the closest guest instruction (from above) */
4526 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4527 Assert(pGuestToPatchRec);
4528 if (pGuestToPatchRec)
4529 {
4530 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4531 if ( pPatchPage->pHighestAddrGC == 0
4532 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4533 {
4534 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4535 /* Increase by instruction size. */
4536 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4537//// Assert(size);
4538 pPatchPage->pHighestAddrGC += size;
4539 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4540 }
4541 }
4542
4543 return VINF_SUCCESS;
4544}
4545
4546/**
4547 * Remove patch from page record
4548 *
4549 * @returns VBox status code.
4550 * @param pVM The VM to operate on.
4551 * @param pPage Page address
4552 * @param pPatch Patch record
4553 */
4554int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4555{
4556 PPATMPATCHPAGE pPatchPage;
4557 int rc;
4558
4559 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4560 Assert(pPatchPage);
4561
4562 if (!pPatchPage)
4563 return VERR_INVALID_PARAMETER;
4564
4565 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4566
4567 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4568 if (pPatchPage->cCount > 1)
4569 {
4570 uint32_t i;
4571
4572 /* Used by multiple patches */
4573 for (i=0;i<pPatchPage->cCount;i++)
4574 {
4575 if (pPatchPage->aPatch[i] == pPatch)
4576 {
4577 pPatchPage->aPatch[i] = 0;
4578 break;
4579 }
4580 }
4581 /* close the gap between the remaining pointers. */
4582 if (i < pPatchPage->cCount - 1)
4583 {
4584 memcpy(&pPatchPage->aPatch[i], &pPatchPage->aPatch[i+1], sizeof(PPATCHINFO)*(pPatchPage->cCount - (i+1)));
4585 }
4586 AssertMsg(i < pPatchPage->cCount, ("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4587
4588 pPatchPage->cCount--;
4589 }
4590 else
4591 {
4592 PPATMPATCHPAGE pPatchNode;
4593
4594 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4595
4596 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4597 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4598 Assert(pPatchNode && pPatchNode == pPatchPage);
4599
4600 Assert(pPatchPage->aPatch);
4601 rc = MMHyperFree(pVM, pPatchPage->aPatch);
4602 AssertRC(rc);
4603 rc = MMHyperFree(pVM, pPatchPage);
4604 AssertRC(rc);
4605 pVM->patm.s.cPageRecords--;
4606 }
4607 return VINF_SUCCESS;
4608}
4609
4610/**
4611 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4612 *
4613 * @returns VBox status code.
4614 * @param pVM The VM to operate on.
4615 * @param pPatch Patch record
4616 */
4617int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4618{
4619 int rc;
4620 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4621
4622 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4623 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4624 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4625
4626 /** @todo optimize better (large gaps between current and next used page) */
4627 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4628 {
4629 /* Get the closest guest instruction (from above) */
4630 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4631 if ( pGuestToPatchRec
4632 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4633 )
4634 {
4635 /* Code in page really patched -> add record */
4636 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4637 AssertRC(rc);
4638 }
4639 }
4640 pPatch->flags |= PATMFL_CODE_MONITORED;
4641 return VINF_SUCCESS;
4642}
4643
4644/**
4645 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4646 *
4647 * @returns VBox status code.
4648 * @param pVM The VM to operate on.
4649 * @param pPatch Patch record
4650 */
4651int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4652{
4653 int rc;
4654 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4655
4656 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4657 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4658 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4659
4660 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4661 {
4662 /* Get the closest guest instruction (from above) */
4663 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4664 if ( pGuestToPatchRec
4665 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4666 )
4667 {
4668 /* Code in page really patched -> remove record */
4669 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4670 AssertRC(rc);
4671 }
4672 }
4673 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4674 return VINF_SUCCESS;
4675}
4676
4677/**
4678 * Notifies PATM about a (potential) write to code that has been patched.
4679 *
4680 * @returns VBox status code.
4681 * @param pVM The VM to operate on.
4682 * @param GCPtr GC pointer to write address
4683 * @param cbWrite Nr of bytes to write
4684 *
4685 */
4686VMMR3DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4687{
4688 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4689
4690 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4691
4692 Assert(VM_IS_EMT(pVM));
4693
4694 /* Quick boundary check */
4695 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4696 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4697 )
4698 return VINF_SUCCESS;
4699
4700 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4701
4702 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4703 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4704
4705 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4706 {
4707loop_start:
4708 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4709 if (pPatchPage)
4710 {
4711 uint32_t i;
4712 bool fValidPatchWrite = false;
4713
4714 /* Quick check to see if the write is in the patched part of the page */
4715 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4716 || pPatchPage->pHighestAddrGC < GCPtr)
4717 {
4718 break;
4719 }
4720
4721 for (i=0;i<pPatchPage->cCount;i++)
4722 {
4723 if (pPatchPage->aPatch[i])
4724 {
4725 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4726 RTRCPTR pPatchInstrGC;
4727 //unused: bool fForceBreak = false;
4728
4729 Assert(pPatchPage->aPatch[i]->flags & PATMFL_CODE_MONITORED);
4730 /** @todo inefficient and includes redundant checks for multiple pages. */
4731 for (uint32_t j=0; j<cbWrite; j++)
4732 {
4733 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4734
4735 if ( pPatch->cbPatchJump
4736 && pGuestPtrGC >= pPatch->pPrivInstrGC
4737 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4738 {
4739 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4740 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4741 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4742 if (rc == VINF_SUCCESS)
4743 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4744 goto loop_start;
4745
4746 continue;
4747 }
4748
4749 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4750 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4751 if (!pPatchInstrGC)
4752 {
4753 RTRCPTR pClosestInstrGC;
4754 uint32_t size;
4755
4756 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4757 if (pPatchInstrGC)
4758 {
4759 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4760 Assert(pClosestInstrGC <= pGuestPtrGC);
4761 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4762 /* Check if this is not a write into a gap between two patches */
4763 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4764 pPatchInstrGC = 0;
4765 }
4766 }
4767 if (pPatchInstrGC)
4768 {
4769 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4770
4771 fValidPatchWrite = true;
4772
4773 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4774 Assert(pPatchToGuestRec);
4775 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4776 {
4777 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4778
4779 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4780 {
4781 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4782
4783 PATMR3MarkDirtyPatch(pVM, pPatch);
4784
4785 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4786 goto loop_start;
4787 }
4788 else
4789 {
4790 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4791 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4792
4793 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4794 pPatchToGuestRec->fDirty = true;
4795
4796 *pInstrHC = 0xCC;
4797
4798 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4799 }
4800 }
4801 /* else already marked dirty */
4802 }
4803 }
4804 }
4805 } /* for each patch */
4806
4807 if (fValidPatchWrite == false)
4808 {
4809 /* Write to a part of the page that either:
4810 * - doesn't contain any code (shared code/data); rather unlikely
4811 * - old code page that's no longer in active use.
4812 */
4813invalid_write_loop_start:
4814 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4815
4816 if (pPatchPage)
4817 {
4818 for (i=0;i<pPatchPage->cCount;i++)
4819 {
4820 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4821
4822 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
4823 {
4824 /** @note possibly dangerous assumption that all future writes will be harmless. */
4825 if (pPatch->flags & PATMFL_IDTHANDLER)
4826 {
4827 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4828
4829 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
4830 int rc = patmRemovePatchPages(pVM, pPatch);
4831 AssertRC(rc);
4832 }
4833 else
4834 {
4835 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4836 PATMR3MarkDirtyPatch(pVM, pPatch);
4837 }
4838 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4839 goto invalid_write_loop_start;
4840 }
4841 } /* for */
4842 }
4843 }
4844 }
4845 }
4846 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
4847 return VINF_SUCCESS;
4848
4849}
4850
4851/**
4852 * Disable all patches in a flushed page
4853 *
4854 * @returns VBox status code
4855 * @param pVM The VM to operate on.
4856 * @param addr GC address of the page to flush
4857 */
4858/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
4859 */
4860VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
4861{
4862 addr &= PAGE_BASE_GC_MASK;
4863
4864 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
4865 if (pPatchPage)
4866 {
4867 int i;
4868
4869 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
4870 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
4871 {
4872 if (pPatchPage->aPatch[i])
4873 {
4874 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4875
4876 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
4877 PATMR3MarkDirtyPatch(pVM, pPatch);
4878 }
4879 }
4880 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
4881 }
4882 return VINF_SUCCESS;
4883}
4884
4885/**
4886 * Checks if the instructions at the specified address has been patched already.
4887 *
4888 * @returns boolean, patched or not
4889 * @param pVM The VM to operate on.
4890 * @param pInstrGC Guest context pointer to instruction
4891 */
4892VMMR3DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
4893{
4894 PPATMPATCHREC pPatchRec;
4895 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4896 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
4897 return true;
4898 return false;
4899}
4900
4901/**
4902 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
4903 *
4904 * @returns VBox status code.
4905 * @param pVM The VM to operate on.
4906 * @param pInstrGC GC address of instr
4907 * @param pByte opcode byte pointer (OUT)
4908 *
4909 */
4910VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
4911{
4912 PPATMPATCHREC pPatchRec;
4913
4914 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
4915
4916 /* Shortcut. */
4917 if ( !PATMIsEnabled(pVM)
4918 || pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
4919 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
4920 {
4921 return VERR_PATCH_NOT_FOUND;
4922 }
4923
4924 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
4925 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
4926 if ( pPatchRec
4927 && pPatchRec->patch.uState == PATCH_ENABLED
4928 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
4929 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
4930 {
4931 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
4932 *pByte = pPatchRec->patch.aPrivInstr[offset];
4933
4934 if (pPatchRec->patch.cbPatchJump == 1)
4935 {
4936 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
4937 }
4938 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
4939 return VINF_SUCCESS;
4940 }
4941 return VERR_PATCH_NOT_FOUND;
4942}
4943
4944/**
4945 * Disable patch for privileged instruction at specified location
4946 *
4947 * @returns VBox status code.
4948 * @param pVM The VM to operate on.
4949 * @param pInstr Guest context point to privileged instruction
4950 *
4951 * @note returns failure if patching is not allowed or possible
4952 *
4953 */
4954VMMR3DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
4955{
4956 PPATMPATCHREC pPatchRec;
4957 PPATCHINFO pPatch;
4958
4959 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
4960 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4961 if (pPatchRec)
4962 {
4963 int rc = VINF_SUCCESS;
4964
4965 pPatch = &pPatchRec->patch;
4966
4967 /* Already disabled? */
4968 if (pPatch->uState == PATCH_DISABLED)
4969 return VINF_SUCCESS;
4970
4971 /* Clear the IDT entries for the patch we're disabling. */
4972 /** @note very important as we clear IF in the patch itself */
4973 /** @todo this needs to be changed */
4974 if (pPatch->flags & PATMFL_IDTHANDLER)
4975 {
4976 uint32_t iGate;
4977
4978 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
4979 if (iGate != (uint32_t)~0)
4980 {
4981 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
4982 if (++cIDTHandlersDisabled < 256)
4983 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
4984 }
4985 }
4986
4987 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
4988 if ( pPatch->pPatchBlockOffset
4989 && pPatch->uState == PATCH_ENABLED)
4990 {
4991 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
4992 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
4993 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
4994 }
4995
4996 /* IDT or function patches haven't changed any guest code. */
4997 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
4998 {
4999 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5000 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5001
5002 if (pPatch->uState != PATCH_REFUSED)
5003 {
5004 AssertMsg(pPatch->pPrivInstrHC, ("Invalid HC pointer?!? (%RRv)\n", pInstrGC));
5005 Assert(pPatch->cbPatchJump);
5006
5007 /** pPrivInstrHC is probably not valid anymore */
5008 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatchRec->patch.pPrivInstrGC, (PRTR3PTR)&pPatchRec->patch.pPrivInstrHC);
5009 if (rc == VINF_SUCCESS)
5010 {
5011 uint8_t temp[16];
5012
5013 Assert(pPatch->cbPatchJump < sizeof(temp));
5014
5015 /* Let's first check if the guest code is still the same. */
5016 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5017 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5018 if (rc == VINF_SUCCESS)
5019 {
5020 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5021
5022 if ( temp[0] != 0xE9 /* jmp opcode */
5023 || *(RTRCINTPTR *)(&temp[1]) != displ
5024 )
5025 {
5026 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5027 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5028 /* Remove it completely */
5029 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5030 rc = PATMR3RemovePatch(pVM, pInstrGC);
5031 AssertRC(rc);
5032 return VWRN_PATCH_REMOVED;
5033 }
5034 }
5035 patmRemoveJumpToPatch(pVM, pPatch);
5036
5037 }
5038 else
5039 {
5040 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5041 pPatch->uState = PATCH_DISABLE_PENDING;
5042 }
5043 }
5044 else
5045 {
5046 AssertMsgFailed(("Patch was refused!\n"));
5047 return VERR_PATCH_ALREADY_DISABLED;
5048 }
5049 }
5050 else
5051 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5052 {
5053 uint8_t temp[16];
5054
5055 Assert(pPatch->cbPatchJump < sizeof(temp));
5056
5057 /* Let's first check if the guest code is still the same. */
5058 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5059 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5060 if (rc == VINF_SUCCESS)
5061 {
5062 if (temp[0] != 0xCC)
5063 {
5064 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5065 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5066 /* Remove it completely */
5067 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5068 rc = PATMR3RemovePatch(pVM, pInstrGC);
5069 AssertRC(rc);
5070 return VWRN_PATCH_REMOVED;
5071 }
5072 patmDeactivateInt3Patch(pVM, pPatch);
5073 }
5074 }
5075
5076 if (rc == VINF_SUCCESS)
5077 {
5078 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5079 if (pPatch->uState == PATCH_DISABLE_PENDING)
5080 {
5081 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5082 pPatch->uState = PATCH_UNUSABLE;
5083 }
5084 else
5085 if (pPatch->uState != PATCH_DIRTY)
5086 {
5087 pPatch->uOldState = pPatch->uState;
5088 pPatch->uState = PATCH_DISABLED;
5089 }
5090 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5091 }
5092
5093 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5094 return VINF_SUCCESS;
5095 }
5096 Log(("Patch not found!\n"));
5097 return VERR_PATCH_NOT_FOUND;
5098}
5099
5100/**
5101 * Permanently disable patch for privileged instruction at specified location
5102 *
5103 * @returns VBox status code.
5104 * @param pVM The VM to operate on.
5105 * @param pInstr Guest context instruction pointer
5106 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5107 * @param pConflictPatch Conflicting patch
5108 *
5109 */
5110static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5111{
5112#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5113 PATCHINFO patch = {0};
5114 DISCPUSTATE cpu;
5115 R3PTRTYPE(uint8_t *) pInstrHC;
5116 uint32_t opsize;
5117 bool disret;
5118 int rc;
5119
5120 pInstrHC = PATMGCVirtToHCVirt(pVM, &patch, pInstrGC);
5121 cpu.mode = (pConflictPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5122 disret = PATMR3DISInstr(pVM, &patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
5123 /*
5124 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5125 * with one that jumps right into the conflict patch.
5126 * Otherwise we must disable the conflicting patch to avoid serious problems.
5127 */
5128 if ( disret == true
5129 && (pConflictPatch->flags & PATMFL_CODE32)
5130 && (cpu.pCurInstr->opcode == OP_JMP || (cpu.pCurInstr->optype & OPTYPE_COND_CONTROLFLOW))
5131 && (cpu.param1.flags & USE_IMMEDIATE32_REL))
5132 {
5133 /* Hint patches must be enabled first. */
5134 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5135 {
5136 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5137 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5138 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5139 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5140 /* Enabling might fail if the patched code has changed in the meantime. */
5141 if (rc != VINF_SUCCESS)
5142 return rc;
5143 }
5144
5145 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5146 if (RT_SUCCESS(rc))
5147 {
5148 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5149 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5150 return VINF_SUCCESS;
5151 }
5152 }
5153#endif
5154
5155 if (pConflictPatch->opcode == OP_CLI)
5156 {
5157 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5158 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5159 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5160 if (rc == VWRN_PATCH_REMOVED)
5161 return VINF_SUCCESS;
5162 if (RT_SUCCESS(rc))
5163 {
5164 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5165 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5166 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5167 if (rc == VERR_PATCH_NOT_FOUND)
5168 return VINF_SUCCESS; /* removed already */
5169
5170 AssertRC(rc);
5171 if (RT_SUCCESS(rc))
5172 {
5173 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5174 return VINF_SUCCESS;
5175 }
5176 }
5177 /* else turned into unusable patch (see below) */
5178 }
5179 else
5180 {
5181 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5182 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5183 if (rc == VWRN_PATCH_REMOVED)
5184 return VINF_SUCCESS;
5185 }
5186
5187 /* No need to monitor the code anymore. */
5188 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5189 {
5190 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5191 AssertRC(rc);
5192 }
5193 pConflictPatch->uState = PATCH_UNUSABLE;
5194 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5195 return VERR_PATCH_DISABLED;
5196}
5197
5198/**
5199 * Enable patch for privileged instruction at specified location
5200 *
5201 * @returns VBox status code.
5202 * @param pVM The VM to operate on.
5203 * @param pInstr Guest context point to privileged instruction
5204 *
5205 * @note returns failure if patching is not allowed or possible
5206 *
5207 */
5208VMMR3DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5209{
5210 PPATMPATCHREC pPatchRec;
5211 PPATCHINFO pPatch;
5212
5213 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5214 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5215 if (pPatchRec)
5216 {
5217 int rc = VINF_SUCCESS;
5218
5219 pPatch = &pPatchRec->patch;
5220
5221 if (pPatch->uState == PATCH_DISABLED)
5222 {
5223 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5224 {
5225 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5226 /** @todo -> pPrivInstrHC is probably not valid anymore */
5227 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatchRec->patch.pPrivInstrGC, (PRTR3PTR)&pPatchRec->patch.pPrivInstrHC);
5228 if (rc == VINF_SUCCESS)
5229 {
5230#ifdef DEBUG
5231 DISCPUSTATE cpu;
5232 char szOutput[256];
5233 uint32_t opsize, i = 0;
5234#endif
5235 uint8_t temp[16];
5236
5237 Assert(pPatch->cbPatchJump < sizeof(temp));
5238
5239 // let's first check if the guest code is still the same
5240 int rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5241 AssertRC(rc);
5242
5243 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5244 {
5245 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5246 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5247 /* Remove it completely */
5248 rc = PATMR3RemovePatch(pVM, pInstrGC);
5249 AssertRC(rc);
5250 return VERR_PATCH_NOT_FOUND;
5251 }
5252
5253 rc = patmGenJumpToPatch(pVM, pPatch, false);
5254 AssertRC(rc);
5255 if (RT_FAILURE(rc))
5256 return rc;
5257
5258#ifdef DEBUG
5259 bool disret;
5260 i = 0;
5261 while(i < pPatch->cbPatchJump)
5262 {
5263 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5264 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
5265 Log(("Renewed patch instr: %s", szOutput));
5266 i += opsize;
5267 }
5268#endif
5269 }
5270 }
5271 else
5272 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5273 {
5274 uint8_t temp[16];
5275
5276 Assert(pPatch->cbPatchJump < sizeof(temp));
5277
5278 /* Let's first check if the guest code is still the same. */
5279 int rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5280 AssertRC(rc);
5281
5282 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5283 {
5284 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5285 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5286 rc = PATMR3RemovePatch(pVM, pInstrGC);
5287 AssertRC(rc);
5288 return VERR_PATCH_NOT_FOUND;
5289 }
5290
5291 rc = patmActivateInt3Patch(pVM, pPatch);
5292 if (RT_FAILURE(rc))
5293 return rc;
5294 }
5295
5296 pPatch->uState = pPatch->uOldState; //restore state
5297
5298 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5299 if (pPatch->pPatchBlockOffset)
5300 {
5301 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5302 }
5303
5304 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5305 }
5306 else
5307 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5308
5309 return rc;
5310 }
5311 return VERR_PATCH_NOT_FOUND;
5312}
5313
5314/**
5315 * Remove patch for privileged instruction at specified location
5316 *
5317 * @returns VBox status code.
5318 * @param pVM The VM to operate on.
5319 * @param pPatchRec Patch record
5320 * @param fForceRemove Remove *all* patches
5321 */
5322int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5323{
5324 PPATCHINFO pPatch;
5325
5326 pPatch = &pPatchRec->patch;
5327
5328 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5329 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5330 {
5331 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5332 return VERR_ACCESS_DENIED;
5333 }
5334 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5335
5336 /** @note NEVER EVER REUSE PATCH MEMORY */
5337 /** @note PATMR3DisablePatch put a breakpoint (0xCC) at the entry of this patch */
5338
5339 if (pPatchRec->patch.pPatchBlockOffset)
5340 {
5341 PAVLOU32NODECORE pNode;
5342
5343 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5344 Assert(pNode);
5345 }
5346
5347 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5348 {
5349 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5350 AssertRC(rc);
5351 }
5352
5353#ifdef VBOX_WITH_STATISTICS
5354 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5355 {
5356 STAMR3Deregister(pVM, &pPatchRec->patch);
5357#ifndef DEBUG_sandervl
5358 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5359 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5360 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5361 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5362 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5363 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5364 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5365 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5366 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5367 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5368 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5369 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5370 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5371 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5372#endif
5373 }
5374#endif
5375
5376 /** @note no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5377 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5378 pPatch->nrPatch2GuestRecs = 0;
5379 Assert(pPatch->Patch2GuestAddrTree == 0);
5380
5381 patmEmptyTree(pVM, &pPatch->FixupTree);
5382 pPatch->nrFixups = 0;
5383 Assert(pPatch->FixupTree == 0);
5384
5385 if (pPatchRec->patch.pTempInfo)
5386 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5387
5388 /** @note might fail, because it has already been removed (e.g. during reset). */
5389 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5390
5391 /* Free the patch record */
5392 MMHyperFree(pVM, pPatchRec);
5393 return VINF_SUCCESS;
5394}
5395
5396/**
5397 * Attempt to refresh the patch by recompiling its entire code block
5398 *
5399 * @returns VBox status code.
5400 * @param pVM The VM to operate on.
5401 * @param pPatchRec Patch record
5402 */
5403int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5404{
5405 PPATCHINFO pPatch;
5406 int rc;
5407 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5408
5409 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5410
5411 pPatch = &pPatchRec->patch;
5412 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5413 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5414 {
5415 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist\n"));
5416 return VERR_PATCHING_REFUSED;
5417 }
5418
5419 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5420
5421 rc = PATMR3DisablePatch(pVM, pInstrGC);
5422 AssertRC(rc);
5423
5424 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5425 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5426#ifdef VBOX_WITH_STATISTICS
5427 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5428 {
5429 STAMR3Deregister(pVM, &pPatchRec->patch);
5430#ifndef DEBUG_sandervl
5431 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5432 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5433 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5434 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5435 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5436 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5437 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5438 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5439 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5440 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5441 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5442 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5443 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5444 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5445#endif
5446 }
5447#endif
5448
5449 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5450
5451 /* Attempt to install a new patch. */
5452 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5453 if (RT_SUCCESS(rc))
5454 {
5455 RTRCPTR pPatchTargetGC;
5456 PPATMPATCHREC pNewPatchRec;
5457
5458 /* Determine target address in new patch */
5459 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5460 Assert(pPatchTargetGC);
5461 if (!pPatchTargetGC)
5462 {
5463 rc = VERR_PATCHING_REFUSED;
5464 goto failure;
5465 }
5466
5467 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5468 pPatch->uCurPatchOffset = 0;
5469
5470 /* insert jump to new patch in old patch block */
5471 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5472 if (RT_FAILURE(rc))
5473 goto failure;
5474
5475 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5476 Assert(pNewPatchRec); /* can't fail */
5477
5478 /* Remove old patch (only do that when everything is finished) */
5479 int rc2 = PATMRemovePatch(pVM, pPatchRec, true /* force removal */);
5480 AssertRC(rc2);
5481
5482 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5483 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5484
5485 LogRel(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5486 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5487
5488 /* Used by another patch, so don't remove it! */
5489 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5490 }
5491
5492failure:
5493 if (RT_FAILURE(rc))
5494 {
5495 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5496
5497 /* Remove the new inactive patch */
5498 rc = PATMR3RemovePatch(pVM, pInstrGC);
5499 AssertRC(rc);
5500
5501 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5502 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5503
5504 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5505 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5506 AssertRC(rc2);
5507
5508 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5509 }
5510 return rc;
5511}
5512
5513/**
5514 * Find patch for privileged instruction at specified location
5515 *
5516 * @returns Patch structure pointer if found; else NULL
5517 * @param pVM The VM to operate on.
5518 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5519 * @param fIncludeHints Include hinted patches or not
5520 *
5521 */
5522PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5523{
5524 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5525 /* if the patch is enabled, the pointer is not indentical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5526 if (pPatchRec)
5527 {
5528 if ( pPatchRec->patch.uState == PATCH_ENABLED
5529 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5530 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5531 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5532 {
5533 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5534 return &pPatchRec->patch;
5535 }
5536 else
5537 if ( fIncludeHints
5538 && pPatchRec->patch.uState == PATCH_DISABLED
5539 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5540 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5541 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5542 {
5543 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5544 return &pPatchRec->patch;
5545 }
5546 }
5547 return NULL;
5548}
5549
5550/**
5551 * Checks whether the GC address is inside a generated patch jump
5552 *
5553 * @returns true -> yes, false -> no
5554 * @param pVM The VM to operate on.
5555 * @param pAddr Guest context address
5556 * @param pPatchAddr Guest context patch address (if true)
5557 */
5558VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5559{
5560 RTRCPTR addr;
5561 PPATCHINFO pPatch;
5562
5563 if (PATMIsEnabled(pVM) == false)
5564 return false;
5565
5566 if (pPatchAddr == NULL)
5567 pPatchAddr = &addr;
5568
5569 *pPatchAddr = 0;
5570
5571 pPatch = PATMFindActivePatchByEntrypoint(pVM, pAddr);
5572 if (pPatch)
5573 {
5574 *pPatchAddr = pPatch->pPrivInstrGC;
5575 }
5576 return *pPatchAddr == 0 ? false : true;
5577}
5578
5579/**
5580 * Remove patch for privileged instruction at specified location
5581 *
5582 * @returns VBox status code.
5583 * @param pVM The VM to operate on.
5584 * @param pInstr Guest context point to privileged instruction
5585 *
5586 * @note returns failure if patching is not allowed or possible
5587 *
5588 */
5589VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5590{
5591 PPATMPATCHREC pPatchRec;
5592
5593 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5594 if (pPatchRec)
5595 {
5596 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5597 if (rc == VWRN_PATCH_REMOVED)
5598 return VINF_SUCCESS;
5599 return PATMRemovePatch(pVM, pPatchRec, false);
5600 }
5601 AssertFailed();
5602 return VERR_PATCH_NOT_FOUND;
5603}
5604
5605/**
5606 * Mark patch as dirty
5607 *
5608 * @returns VBox status code.
5609 * @param pVM The VM to operate on.
5610 * @param pPatch Patch record
5611 *
5612 * @note returns failure if patching is not allowed or possible
5613 *
5614 */
5615VMMR3DECL(int) PATMR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5616{
5617 if (pPatch->pPatchBlockOffset)
5618 {
5619 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5620 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5621 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5622 }
5623
5624 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5625 /* Put back the replaced instruction. */
5626 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5627 if (rc == VWRN_PATCH_REMOVED)
5628 return VINF_SUCCESS;
5629
5630 /** @note we don't restore patch pages for patches that are not enabled! */
5631 /** @note be careful when changing this behaviour!! */
5632
5633 /* The patch pages are no longer marked for self-modifying code detection */
5634 if (pPatch->flags & PATMFL_CODE_MONITORED)
5635 {
5636 int rc = patmRemovePatchPages(pVM, pPatch);
5637 AssertRCReturn(rc, rc);
5638 }
5639 pPatch->uState = PATCH_DIRTY;
5640
5641 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5642 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5643
5644 return VINF_SUCCESS;
5645}
5646
5647/**
5648 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5649 *
5650 * @returns VBox status code.
5651 * @param pVM The VM to operate on.
5652 * @param pPatch Patch block structure pointer
5653 * @param pPatchGC GC address in patch block
5654 */
5655RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5656{
5657 Assert(pPatch->Patch2GuestAddrTree);
5658 /* Get the closest record from below. */
5659 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5660 if (pPatchToGuestRec)
5661 return pPatchToGuestRec->pOrgInstrGC;
5662
5663 return 0;
5664}
5665
5666/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5667 *
5668 * @returns corresponding GC pointer in patch block
5669 * @param pVM The VM to operate on.
5670 * @param pPatch Current patch block pointer
5671 * @param pInstrGC Guest context pointer to privileged instruction
5672 *
5673 */
5674RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5675{
5676 if (pPatch->Guest2PatchAddrTree)
5677 {
5678 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
5679 if (pGuestToPatchRec)
5680 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5681 }
5682
5683 return 0;
5684}
5685
5686/* Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no identical match)
5687 *
5688 * @returns corresponding GC pointer in patch block
5689 * @param pVM The VM to operate on.
5690 * @param pPatch Current patch block pointer
5691 * @param pInstrGC Guest context pointer to privileged instruction
5692 *
5693 */
5694RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5695{
5696 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
5697 if (pGuestToPatchRec)
5698 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5699
5700 return 0;
5701}
5702
5703/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5704 *
5705 * @returns corresponding GC pointer in patch block
5706 * @param pVM The VM to operate on.
5707 * @param pInstrGC Guest context pointer to privileged instruction
5708 *
5709 */
5710VMMR3DECL(RTRCPTR) PATMR3GuestGCPtrToPatchGCPtr(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
5711{
5712 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5713 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
5714 {
5715 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
5716 }
5717 return 0;
5718}
5719
5720/**
5721 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5722 *
5723 * @returns original GC instruction pointer or 0 if not found
5724 * @param pVM The VM to operate on.
5725 * @param pPatchGC GC address in patch block
5726 * @param pEnmState State of the translated address (out)
5727 *
5728 */
5729VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
5730{
5731 PPATMPATCHREC pPatchRec;
5732 void *pvPatchCoreOffset;
5733 RTRCPTR pPrivInstrGC;
5734
5735 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
5736 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5737 if (pvPatchCoreOffset == 0)
5738 {
5739 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
5740 return 0;
5741 }
5742 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
5743 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
5744 if (pEnmState)
5745 {
5746 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
5747 || pPatchRec->patch.uState == PATCH_DIRTY
5748 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
5749 || pPatchRec->patch.uState == PATCH_UNUSABLE),
5750 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
5751
5752 if ( !pPrivInstrGC
5753 || pPatchRec->patch.uState == PATCH_UNUSABLE
5754 || pPatchRec->patch.uState == PATCH_REFUSED)
5755 {
5756 pPrivInstrGC = 0;
5757 *pEnmState = PATMTRANS_FAILED;
5758 }
5759 else
5760 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
5761 {
5762 *pEnmState = PATMTRANS_INHIBITIRQ;
5763 }
5764 else
5765 if ( pPatchRec->patch.uState == PATCH_ENABLED
5766 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
5767 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
5768 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5769 {
5770 *pEnmState = PATMTRANS_OVERWRITTEN;
5771 }
5772 else
5773 if (PATMFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
5774 {
5775 *pEnmState = PATMTRANS_OVERWRITTEN;
5776 }
5777 else
5778 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
5779 {
5780 *pEnmState = PATMTRANS_PATCHSTART;
5781 }
5782 else
5783 *pEnmState = PATMTRANS_SAFE;
5784 }
5785 return pPrivInstrGC;
5786}
5787
5788/**
5789 * Returns the GC pointer of the patch for the specified GC address
5790 *
5791 * @returns VBox status code.
5792 * @param pVM The VM to operate on.
5793 * @param pAddrGC Guest context address
5794 */
5795VMMR3DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
5796{
5797 PPATMPATCHREC pPatchRec;
5798
5799 // Find the patch record
5800 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
5801 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
5802 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
5803 return PATCHCODE_PTR_GC(&pPatchRec->patch);
5804
5805 return 0;
5806}
5807
5808/**
5809 * Attempt to recover dirty instructions
5810 *
5811 * @returns VBox status code.
5812 * @param pVM The VM to operate on.
5813 * @param pCtx CPU context
5814 * @param pPatch Patch record
5815 * @param pPatchToGuestRec Patch to guest address record
5816 * @param pEip GC pointer of trapping instruction
5817 */
5818static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
5819{
5820 DISCPUSTATE CpuOld, CpuNew;
5821 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
5822 int rc;
5823 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
5824 uint32_t cbDirty;
5825 PRECPATCHTOGUEST pRec;
5826 PVMCPU pVCpu = VMMGetCpu0(pVM);
5827
5828 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pPatchToGuestRec->pOrgInstrGC));
5829
5830 pRec = pPatchToGuestRec;
5831 pCurInstrGC = pPatchToGuestRec->pOrgInstrGC;
5832 pCurPatchInstrGC = pEip;
5833 cbDirty = 0;
5834 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5835
5836 /* Find all adjacent dirty instructions */
5837 while (true)
5838 {
5839 if (pRec->fJumpTarget)
5840 {
5841 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pPatchToGuestRec->pOrgInstrGC));
5842 pRec->fDirty = false;
5843 return VERR_PATCHING_REFUSED;
5844 }
5845
5846 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
5847 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5848 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
5849
5850 /* Only harmless instructions are acceptable. */
5851 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
5852 if ( RT_FAILURE(rc)
5853 || !(CpuOld.pCurInstr->optype & OPTYPE_HARMLESS))
5854 {
5855 if (RT_SUCCESS(rc))
5856 cbDirty += CpuOld.opsize;
5857 else
5858 if (!cbDirty)
5859 cbDirty = 1;
5860 break;
5861 }
5862
5863#ifdef DEBUG
5864 char szBuf[256];
5865 szBuf[0] = '\0';
5866 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, 0, szBuf, sizeof(szBuf), NULL);
5867 Log(("DIRTY: %s\n", szBuf));
5868#endif
5869 /* Mark as clean; if we fail we'll let it always fault. */
5870 pRec->fDirty = false;
5871
5872 /** Remove old lookup record. */
5873 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
5874
5875 pCurPatchInstrGC += CpuOld.opsize;
5876 cbDirty += CpuOld.opsize;
5877
5878 /* Let's see if there's another dirty instruction right after. */
5879 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
5880 if (!pRec || !pRec->fDirty)
5881 break; /* no more dirty instructions */
5882
5883 /* In case of complex instructions the next guest instruction could be quite far off. */
5884 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
5885 }
5886
5887 if ( RT_SUCCESS(rc)
5888 && (CpuOld.pCurInstr->optype & OPTYPE_HARMLESS)
5889 )
5890 {
5891 uint32_t cbLeft;
5892
5893 pCurPatchInstrHC = pPatchInstrHC;
5894 pCurPatchInstrGC = pEip;
5895 cbLeft = cbDirty;
5896
5897 while (cbLeft && RT_SUCCESS(rc))
5898 {
5899 bool fValidInstr;
5900
5901 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
5902
5903 fValidInstr = !!(CpuNew.pCurInstr->optype & OPTYPE_HARMLESS);
5904 if ( !fValidInstr
5905 && (CpuNew.pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
5906 )
5907 {
5908 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
5909
5910 if ( pTargetGC >= pPatchToGuestRec->pOrgInstrGC
5911 && pTargetGC <= pPatchToGuestRec->pOrgInstrGC + cbDirty
5912 )
5913 {
5914 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
5915 fValidInstr = true;
5916 }
5917 }
5918
5919 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
5920 if ( rc == VINF_SUCCESS
5921 && CpuNew.opsize <= cbLeft /* must still fit */
5922 && fValidInstr
5923 )
5924 {
5925#ifdef DEBUG
5926 char szBuf[256];
5927 szBuf[0] = '\0';
5928 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, 0, szBuf, sizeof(szBuf), NULL);
5929 Log(("NEW: %s\n", szBuf));
5930#endif
5931
5932 /* Copy the new instruction. */
5933 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.opsize);
5934 AssertRC(rc);
5935
5936 /* Add a new lookup record for the duplicated instruction. */
5937 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
5938 }
5939 else
5940 {
5941#ifdef DEBUG
5942 char szBuf[256];
5943 szBuf[0] = '\0';
5944 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, 0, szBuf, sizeof(szBuf), NULL);
5945 Log(("NEW: %s (FAILED)\n", szBuf));
5946#endif
5947 /* Restore the old lookup record for the duplicated instruction. */
5948 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
5949
5950 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
5951 rc = VERR_PATCHING_REFUSED;
5952 break;
5953 }
5954 pCurInstrGC += CpuNew.opsize;
5955 pCurPatchInstrHC += CpuNew.opsize;
5956 pCurPatchInstrGC += CpuNew.opsize;
5957 cbLeft -= CpuNew.opsize;
5958 }
5959 }
5960 else
5961 rc = VERR_PATCHING_REFUSED;
5962
5963 if (RT_SUCCESS(rc))
5964 {
5965 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
5966 }
5967 else
5968 {
5969 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
5970 Assert(cbDirty);
5971
5972 /* Mark the whole instruction stream with breakpoints. */
5973 if (cbDirty)
5974 memset(pPatchInstrHC, 0xCC, cbDirty);
5975
5976 if ( pVM->patm.s.fOutOfMemory == false
5977 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
5978 {
5979 rc = patmR3RefreshPatch(pVM, pPatch);
5980 if (RT_FAILURE(rc))
5981 {
5982 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
5983 }
5984 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
5985 rc = VERR_PATCHING_REFUSED;
5986 }
5987 }
5988 return rc;
5989}
5990
5991/**
5992 * Handle trap inside patch code
5993 *
5994 * @returns VBox status code.
5995 * @param pVM The VM to operate on.
5996 * @param pCtx CPU context
5997 * @param pEip GC pointer of trapping instruction
5998 * @param ppNewEip GC pointer to new instruction
5999 */
6000VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6001{
6002 PPATMPATCHREC pPatch = 0;
6003 void *pvPatchCoreOffset;
6004 RTRCUINTPTR offset;
6005 RTRCPTR pNewEip;
6006 int rc ;
6007 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6008 PVMCPU pVCpu = VMMGetCpu0(pVM);
6009
6010 Assert(pVM->cCPUs == 1);
6011
6012 pNewEip = 0;
6013 *ppNewEip = 0;
6014
6015 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6016
6017 /* Find the patch record. */
6018 /** @note there might not be a patch to guest translation record (global function) */
6019 offset = pEip - pVM->patm.s.pPatchMemGC;
6020 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6021 if (pvPatchCoreOffset)
6022 {
6023 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6024
6025 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6026
6027 if (pPatch->patch.uState == PATCH_DIRTY)
6028 {
6029 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6030 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6031 {
6032 /* Function duplication patches set fPIF to 1 on entry */
6033 pVM->patm.s.pGCStateHC->fPIF = 1;
6034 }
6035 }
6036 else
6037 if (pPatch->patch.uState == PATCH_DISABLED)
6038 {
6039 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6040 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6041 {
6042 /* Function duplication patches set fPIF to 1 on entry */
6043 pVM->patm.s.pGCStateHC->fPIF = 1;
6044 }
6045 }
6046 else
6047 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6048 {
6049 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6050
6051 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6052 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6053 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6054 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6055 }
6056
6057 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6058 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6059
6060 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6061 pPatch->patch.cTraps++;
6062 PATM_STAT_FAULT_INC(&pPatch->patch);
6063 }
6064 else
6065 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6066
6067 /* Check if we were interrupted in PATM generated instruction code. */
6068 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6069 {
6070 DISCPUSTATE Cpu;
6071 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6072 AssertRC(rc);
6073
6074 if ( rc == VINF_SUCCESS
6075 && ( Cpu.pCurInstr->opcode == OP_PUSHF
6076 || Cpu.pCurInstr->opcode == OP_PUSH
6077 || Cpu.pCurInstr->opcode == OP_CALL)
6078 )
6079 {
6080 uint64_t fFlags;
6081
6082 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6083
6084 if (Cpu.pCurInstr->opcode == OP_PUSH)
6085 {
6086 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6087 if ( rc == VINF_SUCCESS
6088 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6089 {
6090 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6091
6092 /* Reset the PATM stack. */
6093 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6094
6095 pVM->patm.s.pGCStateHC->fPIF = 1;
6096
6097 Log(("Faulting push -> go back to the original instruction\n"));
6098
6099 /* continue at the original instruction */
6100 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6101 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6102 return VINF_SUCCESS;
6103 }
6104 }
6105
6106 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6107 rc = PGMShwModifyPage(pVCpu, pCtx->esp, 1, X86_PTE_RW, ~(uint64_t)X86_PTE_RW);
6108 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6109 if (rc == VINF_SUCCESS)
6110 {
6111
6112 /* The guest page *must* be present. */
6113 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6114 if (rc == VINF_SUCCESS && (fFlags & X86_PTE_P))
6115 {
6116 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6117 return VINF_PATCH_CONTINUE;
6118 }
6119 }
6120 }
6121 else
6122 if (pPatch->patch.pPrivInstrGC == pNewEip)
6123 {
6124 /* Invalidated patch or first instruction overwritten.
6125 * We can ignore the fPIF state in this case.
6126 */
6127 /* Reset the PATM stack. */
6128 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6129
6130 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6131
6132 pVM->patm.s.pGCStateHC->fPIF = 1;
6133
6134 /* continue at the original instruction */
6135 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6136 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6137 return VINF_SUCCESS;
6138 }
6139
6140 char szBuf[256];
6141 szBuf[0] = '\0';
6142 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pEip, 0, szBuf, sizeof(szBuf), NULL);
6143
6144 /* Very bad. We crashed in emitted code. Probably stack? */
6145 if (pPatch)
6146 {
6147 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6148 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6149 }
6150 else
6151 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6152 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6153 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6154 }
6155
6156 /* From here on, we must have a valid patch to guest translation. */
6157 if (pvPatchCoreOffset == 0)
6158 {
6159 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6160 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6161 return VERR_PATCH_NOT_FOUND; //fatal error
6162 }
6163
6164 /* Take care of dirty/changed instructions. */
6165 if (pPatchToGuestRec->fDirty)
6166 {
6167 Assert(pPatchToGuestRec->Core.Key == offset);
6168 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6169
6170 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6171 if (RT_SUCCESS(rc))
6172 {
6173 /* Retry the current instruction. */
6174 pNewEip = pEip;
6175 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6176 }
6177 else
6178 {
6179 /* Reset the PATM stack. */
6180 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6181
6182 rc = VINF_SUCCESS; /* Continue at original instruction. */
6183 }
6184
6185 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6186 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6187 return rc;
6188 }
6189
6190#ifdef VBOX_STRICT
6191 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6192 {
6193 DISCPUSTATE cpu;
6194 bool disret;
6195 uint32_t opsize;
6196
6197 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6198 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6199 if (disret && cpu.pCurInstr->opcode == OP_RETN)
6200 {
6201 RTRCPTR retaddr;
6202 PCPUMCTX pCtx;
6203
6204 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
6205
6206 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx->esp, sizeof(retaddr));
6207 AssertRC(rc);
6208
6209 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6210 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6211 }
6212 }
6213#endif
6214
6215 /* Return original address, correct by subtracting the CS base address. */
6216 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6217
6218 /* Reset the PATM stack. */
6219 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6220
6221 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6222 {
6223 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6224 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6225#ifdef VBOX_STRICT
6226 DISCPUSTATE cpu;
6227 bool disret;
6228 uint32_t opsize;
6229
6230 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6231 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_ORGCODE);
6232
6233 if (disret && (cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_INT3))
6234 {
6235 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6236 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6237
6238 Assert(cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_IRET);
6239 }
6240#endif
6241 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6242 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6243 }
6244
6245 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6246#ifdef LOG_ENABLED
6247 CPUMR3DisasmInstr(pVM, pVCpu, pCtx, pNewEip, "PATCHRET: ");
6248#endif
6249 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6250 {
6251 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6252 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6253 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6254 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6255 return VERR_PATCH_DISABLED;
6256 }
6257
6258#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6259 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6260 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6261 {
6262 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6263 //we are only wasting time, back out the patch
6264 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6265 pTrapRec->pNextPatchInstr = 0;
6266 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6267 return VERR_PATCH_DISABLED;
6268 }
6269#endif
6270
6271 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6272 return VINF_SUCCESS;
6273}
6274
6275
6276/**
6277 * Handle page-fault in monitored page
6278 *
6279 * @returns VBox status code.
6280 * @param pVM The VM to operate on.
6281 */
6282VMMR3DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6283{
6284 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6285
6286 addr &= PAGE_BASE_GC_MASK;
6287
6288 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6289 AssertRC(rc); NOREF(rc);
6290
6291 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6292 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6293 {
6294 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6295 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6296 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6297 if (rc == VWRN_PATCH_REMOVED)
6298 return VINF_SUCCESS;
6299
6300 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6301
6302 if (addr == pPatchRec->patch.pPrivInstrGC)
6303 addr++;
6304 }
6305
6306 for(;;)
6307 {
6308 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6309
6310 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6311 break;
6312
6313 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6314 {
6315 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6316 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6317 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6318 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6319 }
6320 addr = pPatchRec->patch.pPrivInstrGC + 1;
6321 }
6322
6323 pVM->patm.s.pvFaultMonitor = 0;
6324 return VINF_SUCCESS;
6325}
6326
6327
6328#ifdef VBOX_WITH_STATISTICS
6329
6330static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6331{
6332 if (pPatch->flags & PATMFL_SYSENTER)
6333 {
6334 return "SYSENT";
6335 }
6336 else
6337 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6338 {
6339 static char szTrap[16];
6340 uint32_t iGate;
6341
6342 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6343 if (iGate < 256)
6344 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6345 else
6346 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6347 return szTrap;
6348 }
6349 else
6350 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6351 return "DUPFUNC";
6352 else
6353 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6354 return "FUNCCALL";
6355 else
6356 if (pPatch->flags & PATMFL_TRAMPOLINE)
6357 return "TRAMP";
6358 else
6359 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6360}
6361
6362static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6363{
6364 switch(pPatch->uState)
6365 {
6366 case PATCH_ENABLED:
6367 return "ENA";
6368 case PATCH_DISABLED:
6369 return "DIS";
6370 case PATCH_DIRTY:
6371 return "DIR";
6372 case PATCH_UNUSABLE:
6373 return "UNU";
6374 case PATCH_REFUSED:
6375 return "REF";
6376 case PATCH_DISABLE_PENDING:
6377 return "DIP";
6378 default:
6379 AssertFailed();
6380 return " ";
6381 }
6382}
6383
6384/**
6385 * Resets the sample.
6386 * @param pVM The VM handle.
6387 * @param pvSample The sample registered using STAMR3RegisterCallback.
6388 */
6389static void patmResetStat(PVM pVM, void *pvSample)
6390{
6391 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6392 Assert(pPatch);
6393
6394 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6395 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6396}
6397
6398/**
6399 * Prints the sample into the buffer.
6400 *
6401 * @param pVM The VM handle.
6402 * @param pvSample The sample registered using STAMR3RegisterCallback.
6403 * @param pszBuf The buffer to print into.
6404 * @param cchBuf The size of the buffer.
6405 */
6406static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6407{
6408 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6409 Assert(pPatch);
6410
6411 Assert(pPatch->uState != PATCH_REFUSED);
6412 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6413
6414 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6415 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6416 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6417}
6418
6419/**
6420 * Returns the GC address of the corresponding patch statistics counter
6421 *
6422 * @returns Stat address
6423 * @param pVM The VM to operate on.
6424 * @param pPatch Patch structure
6425 */
6426RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6427{
6428 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6429 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6430}
6431
6432#endif /* VBOX_WITH_STATISTICS */
6433
6434#ifdef VBOX_WITH_DEBUGGER
6435/**
6436 * The '.patmoff' command.
6437 *
6438 * @returns VBox status.
6439 * @param pCmd Pointer to the command descriptor (as registered).
6440 * @param pCmdHlp Pointer to command helper functions.
6441 * @param pVM Pointer to the current VM (if any).
6442 * @param paArgs Pointer to (readonly) array of arguments.
6443 * @param cArgs Number of arguments in the array.
6444 */
6445static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
6446{
6447 /*
6448 * Validate input.
6449 */
6450 if (!pVM)
6451 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6452
6453 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6454 PATMR3AllowPatching(pVM, false);
6455 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6456}
6457
6458/**
6459 * The '.patmon' command.
6460 *
6461 * @returns VBox status.
6462 * @param pCmd Pointer to the command descriptor (as registered).
6463 * @param pCmdHlp Pointer to command helper functions.
6464 * @param pVM Pointer to the current VM (if any).
6465 * @param paArgs Pointer to (readonly) array of arguments.
6466 * @param cArgs Number of arguments in the array.
6467 */
6468static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
6469{
6470 /*
6471 * Validate input.
6472 */
6473 if (!pVM)
6474 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6475
6476 PATMR3AllowPatching(pVM, true);
6477 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6478 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6479}
6480#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette