VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 46136

Last change on this file since 46136 was 46136, checked in by vboxsync, 12 years ago

temporary build fix

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 261.8 KB
Line 
1/* $Id: PATM.cpp 46136 2013-05-17 06:53:00Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * @note Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2013 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/cpumdis.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/hm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/trpm.h>
35#include <VBox/vmm/cfgm.h>
36#include <VBox/param.h>
37#include <VBox/vmm/selm.h>
38#include <VBox/vmm/csam.h>
39#include <iprt/avl.h>
40#include "PATMInternal.h"
41#include "PATMPatch.h"
42#include <VBox/vmm/vm.h>
43#include <VBox/vmm/uvm.h>
44#include <VBox/dbg.h>
45#include <VBox/err.h>
46#include <VBox/log.h>
47#include <iprt/assert.h>
48#include <iprt/asm.h>
49#include <VBox/dis.h>
50#include <VBox/disopcode.h>
51#include "internal/pgm.h"
52
53#include <iprt/string.h>
54#include "PATMA.h"
55
56//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
57//#define PATM_DISABLE_ALL
58
59/**
60 * Refresh trampoline patch state.
61 */
62typedef struct PATMREFRESHPATCH
63{
64 /** Pointer to the VM structure. */
65 PVM pVM;
66 /** The trampoline patch record. */
67 PPATCHINFO pPatchTrampoline;
68 /** The new patch we want to jump to. */
69 PPATCHINFO pPatchRec;
70} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
71
72
73#define PATMREAD_RAWCODE 1 /* read code as-is */
74#define PATMREAD_ORGCODE 2 /* read original guest opcode bytes; not the patched bytes */
75#define PATMREAD_NOCHECK 4 /* don't check for patch conflicts */
76
77/*
78 * Private structure used during disassembly
79 */
80typedef struct
81{
82 PVM pVM;
83 PPATCHINFO pPatchInfo;
84 R3PTRTYPE(uint8_t *) pbInstrHC;
85 RTRCPTR pInstrGC;
86 uint32_t fReadFlags;
87} PATMDISASM, *PPATMDISASM;
88
89
90/*******************************************************************************
91* Internal Functions *
92*******************************************************************************/
93
94static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
95static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
96static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
97
98#ifdef LOG_ENABLED // keep gcc quiet
99static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
100#endif
101#ifdef VBOX_WITH_STATISTICS
102static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
103static void patmResetStat(PVM pVM, void *pvSample);
104static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
105#endif
106
107#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
108#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
109
110static int patmReinit(PVM pVM);
111static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
112static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC);
113static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch);
114
115#ifdef VBOX_WITH_DEBUGGER
116static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
117static FNDBGCCMD patmr3CmdOn;
118static FNDBGCCMD patmr3CmdOff;
119
120/** Command descriptors. */
121static const DBGCCMD g_aCmds[] =
122{
123 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
124 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
125 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
126};
127#endif
128
129/* Don't want to break saved states, so put it here as a global variable. */
130static unsigned int cIDTHandlersDisabled = 0;
131
132/**
133 * Initializes the PATM.
134 *
135 * @returns VBox status code.
136 * @param pVM Pointer to the VM.
137 */
138VMMR3_INT_DECL(int) PATMR3Init(PVM pVM)
139{
140 int rc;
141
142 /*
143 * We only need a saved state dummy loader if HM is enabled.
144 */
145 if (HMIsEnabled(pVM))
146 {
147 pVM->fPATMEnabled = false;
148 return SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, 0,
149 NULL, NULL, NULL,
150 NULL, NULL, NULL,
151 NULL, patmR3LoadDummy, NULL);
152 }
153
154 /*
155 * Raw-mode.
156 */
157 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
158
159 /* These values can't change as they are hardcoded in patch code (old saved states!) */
160 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
161 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
162 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
163 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
164
165 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
166 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
167
168 /* Allocate patch memory and GC patch state memory. */
169 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
170 /* Add another page in case the generated code is much larger than expected. */
171 /** @todo bad safety precaution */
172 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
173 if (RT_FAILURE(rc))
174 {
175 Log(("MMHyperAlloc failed with %Rrc\n", rc));
176 return rc;
177 }
178 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
179
180 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
181 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
182 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
183
184 /*
185 * Hypervisor memory for GC status data (read/write)
186 *
187 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
188 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
189 *
190 */
191 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
192 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
193 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
194
195 /* Hypervisor memory for patch statistics */
196 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
197 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
198
199 /* Memory for patch lookup trees. */
200 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
201 AssertRCReturn(rc, rc);
202 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
203
204#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
205 /* Check CFGM option. */
206 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
207 if (RT_FAILURE(rc))
208# ifdef PATM_DISABLE_ALL
209 pVM->fPATMEnabled = false;
210# else
211 pVM->fPATMEnabled = true;
212# endif
213#endif
214
215 rc = patmReinit(pVM);
216 AssertRC(rc);
217 if (RT_FAILURE(rc))
218 return rc;
219
220 /*
221 * Register save and load state notifiers.
222 */
223 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
224 NULL, NULL, NULL,
225 NULL, patmR3Save, NULL,
226 NULL, patmR3Load, NULL);
227 AssertRCReturn(rc, rc);
228
229#ifdef VBOX_WITH_DEBUGGER
230 /*
231 * Debugger commands.
232 */
233 static bool s_fRegisteredCmds = false;
234 if (!s_fRegisteredCmds)
235 {
236 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
237 if (RT_SUCCESS(rc2))
238 s_fRegisteredCmds = true;
239 }
240#endif
241
242#ifdef VBOX_WITH_STATISTICS
243 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
244 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
245 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
246 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
247 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
248 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
249 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
250 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
251
252 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
253 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
254
255 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
256 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
257 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
258
259 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
260 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
261 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
262 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
263 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
264
265 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
266 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
267
268 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
269 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
270
271 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
272 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
273 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
274
275 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
276 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
277 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
278
279 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
280 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
281
282 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
283 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
284 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
285 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
286
287 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
288 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
289
290 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
291 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
292
293 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
294 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
295 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
296
297 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
298 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
299 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
300 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
301
302 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
303 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
304 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
305 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
306 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
307
308 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
309#endif /* VBOX_WITH_STATISTICS */
310
311 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
312 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
313 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
314 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
315 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
316 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
317 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
318 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
319
320 return rc;
321}
322
323/**
324 * Finalizes HMA page attributes.
325 *
326 * @returns VBox status code.
327 * @param pVM Pointer to the VM.
328 */
329VMMR3_INT_DECL(int) PATMR3InitFinalize(PVM pVM)
330{
331 if (HMIsEnabled(pVM))
332 return VINF_SUCCESS;
333
334 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
335 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
336 if (RT_FAILURE(rc))
337 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
338
339 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
340 if (RT_FAILURE(rc))
341 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
342
343 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
344 if (RT_FAILURE(rc))
345 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
346
347 return rc;
348}
349
350/**
351 * (Re)initializes PATM
352 *
353 * @param pVM The VM.
354 */
355static int patmReinit(PVM pVM)
356{
357 int rc;
358
359 /*
360 * Assert alignment and sizes.
361 */
362 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
363 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
364
365 /*
366 * Setup any fixed pointers and offsets.
367 */
368 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
369
370#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
371#ifndef PATM_DISABLE_ALL
372 pVM->fPATMEnabled = true;
373#endif
374#endif
375
376 Assert(pVM->patm.s.pGCStateHC);
377 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
378 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
379
380 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
381 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
382
383 Assert(pVM->patm.s.pGCStackHC);
384 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
385 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
386 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
387 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
388
389 Assert(pVM->patm.s.pStatsHC);
390 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
391 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
392
393 Assert(pVM->patm.s.pPatchMemHC);
394 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
395 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
396 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
397
398 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
399 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
400
401 Assert(pVM->patm.s.PatchLookupTreeHC);
402 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
403
404 /*
405 * (Re)Initialize PATM structure
406 */
407 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
408 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
409 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
410 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
411 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
412 pVM->patm.s.pvFaultMonitor = 0;
413 pVM->patm.s.deltaReloc = 0;
414
415 /* Lowest and highest patched instruction */
416 pVM->patm.s.pPatchedInstrGCLowest = ~0;
417 pVM->patm.s.pPatchedInstrGCHighest = 0;
418
419 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
420 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
421 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
422
423 pVM->patm.s.pfnSysEnterPatchGC = 0;
424 pVM->patm.s.pfnSysEnterGC = 0;
425
426 pVM->patm.s.fOutOfMemory = false;
427
428 pVM->patm.s.pfnHelperCallGC = 0;
429// patmR3DbgReset(pVM);
430
431 /* Generate all global functions to be used by future patches. */
432 /* We generate a fake patch in order to use the existing code for relocation. */
433 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
434 if (RT_FAILURE(rc))
435 {
436 Log(("Out of memory!!!!\n"));
437 return VERR_NO_MEMORY;
438 }
439 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
440 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
441 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
442
443 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
444 AssertRC(rc);
445
446 /* Update free pointer in patch memory. */
447 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
448 /* Round to next 8 byte boundary. */
449 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
450
451
452 return rc;
453}
454
455
456/**
457 * Applies relocations to data and code managed by this
458 * component. This function will be called at init and
459 * whenever the VMM need to relocate it self inside the GC.
460 *
461 * The PATM will update the addresses used by the switcher.
462 *
463 * @param pVM The VM.
464 */
465VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM)
466{
467 if (HMIsEnabled(pVM))
468 return;
469
470 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
471 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
472
473 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
474 if (delta)
475 {
476 PCPUMCTX pCtx;
477
478 /* Update CPUMCTX guest context pointer. */
479 pVM->patm.s.pCPUMCtxGC += delta;
480
481 pVM->patm.s.deltaReloc = delta;
482
483 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
484
485 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
486
487 /* If we are running patch code right now, then also adjust EIP. */
488 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
489 pCtx->eip += delta;
490
491 pVM->patm.s.pGCStateGC = GCPtrNew;
492 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
493
494 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
495
496 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
497
498 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
499
500 if (pVM->patm.s.pfnSysEnterPatchGC)
501 pVM->patm.s.pfnSysEnterPatchGC += delta;
502
503 /* Deal with the global patch functions. */
504 pVM->patm.s.pfnHelperCallGC += delta;
505 pVM->patm.s.pfnHelperRetGC += delta;
506 pVM->patm.s.pfnHelperIretGC += delta;
507 pVM->patm.s.pfnHelperJumpGC += delta;
508
509 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
510 }
511}
512
513
514/**
515 * Terminates the PATM.
516 *
517 * Termination means cleaning up and freeing all resources,
518 * the VM it self is at this point powered off or suspended.
519 *
520 * @returns VBox status code.
521 * @param pVM Pointer to the VM.
522 */
523VMMR3_INT_DECL(int) PATMR3Term(PVM pVM)
524{
525 if (HMIsEnabled(pVM))
526 return VINF_SUCCESS;
527
528// patmR3DbgTerm(pVM);
529
530 /* Memory was all allocated from the two MM heaps and requires no freeing. */
531 return VINF_SUCCESS;
532}
533
534
535/**
536 * PATM reset callback.
537 *
538 * @returns VBox status code.
539 * @param pVM The VM which is reset.
540 */
541VMMR3_INT_DECL(int) PATMR3Reset(PVM pVM)
542{
543 Log(("PATMR3Reset\n"));
544 if (HMIsEnabled(pVM))
545 return VINF_SUCCESS;
546
547 /* Free all patches. */
548 for (;;)
549 {
550 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
551 if (pPatchRec)
552 patmR3RemovePatch(pVM, pPatchRec, true);
553 else
554 break;
555 }
556 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
557 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
558 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
559 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
560
561 int rc = patmReinit(pVM);
562 if (RT_SUCCESS(rc))
563 rc = PATMR3InitFinalize(pVM); /* paranoia */
564
565 return rc;
566}
567
568/**
569 * @callback_method_impl{FNDISREADBYTES}
570 */
571static DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
572{
573 PATMDISASM *pDisInfo = (PATMDISASM *)pDis->pvUser;
574
575/** @todo change this to read more! */
576 /*
577 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
578 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
579 */
580 /** @todo could change in the future! */
581 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
582 {
583 size_t cbRead = cbMaxRead;
584 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
585 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
586 if (RT_SUCCESS(rc))
587 {
588 if (cbRead >= cbMinRead)
589 {
590 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
591 return VINF_SUCCESS;
592 }
593
594 cbMinRead -= (uint8_t)cbRead;
595 cbMaxRead -= (uint8_t)cbRead;
596 offInstr += (uint8_t)cbRead;
597 uSrcAddr += cbRead;
598 }
599
600#ifdef VBOX_STRICT
601 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
602 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
603 {
604 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr, NULL) == false);
605 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr + cbMinRead-1, NULL) == false);
606 }
607#endif
608 }
609
610 int rc = VINF_SUCCESS;
611 RTGCPTR32 uSrcAddr = (RTGCPTR32)pDis->uInstrAddr + offInstr;
612 if ( !pDisInfo->pbInstrHC
613 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(uSrcAddr + cbMinRead - 1)
614 && !PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr)))
615 {
616 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr));
617 rc = PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
618 offInstr += cbMinRead;
619 }
620 else
621 {
622 /*
623 * pbInstrHC is the base address; adjust according to the GC pointer.
624 *
625 * Try read the max number of bytes here. Since the disassembler only
626 * ever uses these bytes for the current instruction, it doesn't matter
627 * much if we accidentally read the start of the next instruction even
628 * if it happens to be a patch jump or int3.
629 */
630 uint8_t const *pbInstrHC = pDisInfo->pbInstrHC; AssertPtr(pbInstrHC);
631 pbInstrHC += uSrcAddr - pDisInfo->pInstrGC;
632
633 size_t cbMaxRead1 = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
634 size_t cbMaxRead2 = PAGE_SIZE - ((uintptr_t)pbInstrHC & PAGE_OFFSET_MASK);
635 size_t cbToRead = RT_MIN(cbMaxRead1, RT_MAX(cbMaxRead2, cbMinRead));
636 if (cbToRead > cbMaxRead)
637 cbToRead = cbMaxRead;
638
639 memcpy(&pDis->abInstr[offInstr], pbInstrHC, cbToRead);
640 offInstr += (uint8_t)cbToRead;
641 }
642
643 pDis->cbCachedInstr = offInstr;
644 return rc;
645}
646
647
648DECLINLINE(bool) patmR3DisInstrToStr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
649 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
650{
651 PATMDISASM disinfo;
652 disinfo.pVM = pVM;
653 disinfo.pPatchInfo = pPatch;
654 disinfo.pbInstrHC = pbInstrHC;
655 disinfo.pInstrGC = InstrGCPtr32;
656 disinfo.fReadFlags = fReadFlags;
657 return RT_SUCCESS(DISInstrToStrWithReader(InstrGCPtr32,
658 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
659 patmReadBytes, &disinfo,
660 pCpu, pcbInstr, pszOutput, cbOutput));
661}
662
663
664DECLINLINE(bool) patmR3DisInstr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
665 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
666{
667 PATMDISASM disinfo;
668 disinfo.pVM = pVM;
669 disinfo.pPatchInfo = pPatch;
670 disinfo.pbInstrHC = pbInstrHC;
671 disinfo.pInstrGC = InstrGCPtr32;
672 disinfo.fReadFlags = fReadFlags;
673 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32,
674 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
675 patmReadBytes, &disinfo,
676 pCpu, pcbInstr));
677}
678
679
680DECLINLINE(bool) patmR3DisInstrNoStrOpMode(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC,
681 uint32_t fReadFlags,
682 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
683{
684 PATMDISASM disinfo;
685 disinfo.pVM = pVM;
686 disinfo.pPatchInfo = pPatch;
687 disinfo.pbInstrHC = pbInstrHC;
688 disinfo.pInstrGC = InstrGCPtr32;
689 disinfo.fReadFlags = fReadFlags;
690 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32, pPatch->uOpMode, patmReadBytes, &disinfo,
691 pCpu, pcbInstr));
692}
693
694#ifdef LOG_ENABLED
695# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
696 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_ORGCODE, a_szComment, " patch:")
697# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
698 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_RAWCODE, a_szComment, " patch:")
699
700# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) \
701 do { \
702 if (LogIsEnabled()) \
703 patmLogRawPatchInstr(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2); \
704 } while (0)
705
706static void patmLogRawPatchInstr(PVM pVM, PPATCHINFO pPatch, uint32_t fFlags,
707 const char *pszComment1, const char *pszComment2)
708{
709 DISCPUSTATE DisState;
710 char szOutput[128];
711 szOutput[0] = '\0';
712 patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC, NULL, fFlags,
713 &DisState, NULL, szOutput, sizeof(szOutput));
714 Log(("%s%s %s", pszComment1, pszComment2, szOutput));
715}
716
717#else
718# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
719# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
720# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) do { } while (0)
721#endif
722
723
724/**
725 * Callback function for RTAvloU32DoWithAll
726 *
727 * Updates all fixups in the patches
728 *
729 * @returns VBox status code.
730 * @param pNode Current node
731 * @param pParam Pointer to the VM.
732 */
733static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
734{
735 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
736 PVM pVM = (PVM)pParam;
737 RTRCINTPTR delta;
738 int rc;
739
740 /* Nothing to do if the patch is not active. */
741 if (pPatch->patch.uState == PATCH_REFUSED)
742 return 0;
743
744 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
745 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Org patch jump:", "");
746
747 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
748 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
749
750 /*
751 * Apply fixups
752 */
753 PRELOCREC pRec = 0;
754 AVLPVKEY key = 0;
755
756 while (true)
757 {
758 /* Get the record that's closest from above */
759 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
760 if (pRec == 0)
761 break;
762
763 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
764
765 switch (pRec->uType)
766 {
767 case FIXUP_ABSOLUTE:
768 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
769 if ( !pRec->pSource
770 || PATMIsPatchGCAddr(pVM, pRec->pSource))
771 {
772 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
773 }
774 else
775 {
776 uint8_t curInstr[15];
777 uint8_t oldInstr[15];
778 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
779
780 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
781
782 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
783 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
784
785 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
786 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
787
788 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
789
790 if ( rc == VERR_PAGE_NOT_PRESENT
791 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
792 {
793 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
794
795 Log(("PATM: Patch page not present -> check later!\n"));
796 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
797 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
798 }
799 else
800 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
801 {
802 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
803 /*
804 * Disable patch; this is not a good solution
805 */
806 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
807 pPatch->patch.uState = PATCH_DISABLED;
808 }
809 else
810 if (RT_SUCCESS(rc))
811 {
812 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
813 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
814 AssertRC(rc);
815 }
816 }
817 break;
818
819 case FIXUP_REL_JMPTOPATCH:
820 {
821 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
822
823 if ( pPatch->patch.uState == PATCH_ENABLED
824 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
825 {
826 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
827 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
828 RTRCPTR pJumpOffGC;
829 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
830 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
831
832#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
833 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
834#else
835 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
836#endif
837
838 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
839#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
840 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
841 {
842 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
843
844 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
845 oldJump[0] = pPatch->patch.aPrivInstr[0];
846 oldJump[1] = pPatch->patch.aPrivInstr[1];
847 *(RTRCUINTPTR *)&oldJump[2] = displOld;
848 }
849 else
850#endif
851 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
852 {
853 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
854 oldJump[0] = 0xE9;
855 *(RTRCUINTPTR *)&oldJump[1] = displOld;
856 }
857 else
858 {
859 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
860 continue; //this should never happen!!
861 }
862 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
863
864 /*
865 * Read old patch jump and compare it to the one we previously installed
866 */
867 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
868 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
869
870 if ( rc == VERR_PAGE_NOT_PRESENT
871 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
872 {
873 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
874
875 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
876 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
877 }
878 else
879 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
880 {
881 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
882 /*
883 * Disable patch; this is not a good solution
884 */
885 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
886 pPatch->patch.uState = PATCH_DISABLED;
887 }
888 else
889 if (RT_SUCCESS(rc))
890 {
891 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
892 AssertRC(rc);
893 }
894 else
895 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
896 }
897 else
898 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
899
900 pRec->pDest = pTarget;
901 break;
902 }
903
904 case FIXUP_REL_JMPTOGUEST:
905 {
906 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
907 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
908
909 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
910 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
911 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
912 pRec->pSource = pSource;
913 break;
914 }
915
916 default:
917 AssertMsg(0, ("Invalid fixup type!!\n"));
918 return VERR_INVALID_PARAMETER;
919 }
920 }
921
922 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
923 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Rel patch jump:", "");
924 return 0;
925}
926
927/**
928 * \#PF Handler callback for virtual access handler ranges.
929 *
930 * Important to realize that a physical page in a range can have aliases, and
931 * for ALL and WRITE handlers these will also trigger.
932 *
933 * @returns VINF_SUCCESS if the handler have carried out the operation.
934 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
935 * @param pVM Pointer to the VM.
936 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
937 * @param pvPtr The HC mapping of that address.
938 * @param pvBuf What the guest is reading/writing.
939 * @param cbBuf How much it's reading/writing.
940 * @param enmAccessType The access type.
941 * @param pvUser User argument.
942 */
943DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
944 PGMACCESSTYPE enmAccessType, void *pvUser)
945{
946 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
947 NOREF(pvPtr); NOREF(pvBuf); NOREF(cbBuf); NOREF(pvUser);
948
949 /** @todo could be the wrong virtual address (alias) */
950 pVM->patm.s.pvFaultMonitor = GCPtr;
951 PATMR3HandleMonitoredPage(pVM);
952 return VINF_PGM_HANDLER_DO_DEFAULT;
953}
954
955#ifdef VBOX_WITH_DEBUGGER
956
957/**
958 * Callback function for RTAvloU32DoWithAll
959 *
960 * Enables the patch that's being enumerated
961 *
962 * @returns 0 (continue enumeration).
963 * @param pNode Current node
964 * @param pVM Pointer to the VM.
965 */
966static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
967{
968 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
969
970 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
971 return 0;
972}
973
974
975/**
976 * Callback function for RTAvloU32DoWithAll
977 *
978 * Disables the patch that's being enumerated
979 *
980 * @returns 0 (continue enumeration).
981 * @param pNode Current node
982 * @param pVM Pointer to the VM.
983 */
984static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
985{
986 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
987
988 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
989 return 0;
990}
991
992#endif /* VBOX_WITH_DEBUGGER */
993#ifdef UNUSED_FUNCTIONS
994
995/**
996 * Returns the host context pointer and size of the patch memory block
997 *
998 * @returns Host context pointer.
999 * @param pVM Pointer to the VM.
1000 * @param pcb Size of the patch memory block
1001 * @internal
1002 */
1003VMMR3_INT_DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
1004{
1005 AssertReturn(!HMIsEnabled(pVM), NULL);
1006 if (pcb)
1007 *pcb = pVM->patm.s.cbPatchMem;
1008 return pVM->patm.s.pPatchMemHC;
1009}
1010
1011
1012/**
1013 * Returns the guest context pointer and size of the patch memory block
1014 *
1015 * @returns Guest context pointer.
1016 * @param pVM Pointer to the VM.
1017 * @param pcb Size of the patch memory block
1018 */
1019VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
1020{
1021 AssertReturn(!HMIsEnabled(pVM), NIL_RTRCPTR);
1022 if (pcb)
1023 *pcb = pVM->patm.s.cbPatchMem;
1024 return pVM->patm.s.pPatchMemGC;
1025}
1026
1027#endif /* UNUSED_FUNCTIONS */
1028
1029/**
1030 * Returns the host context pointer of the GC context structure
1031 *
1032 * @returns VBox status code.
1033 * @param pVM Pointer to the VM.
1034 */
1035VMMR3_INT_DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
1036{
1037 AssertReturn(!HMIsEnabled(pVM), NULL);
1038 return pVM->patm.s.pGCStateHC;
1039}
1040
1041
1042#ifdef UNUSED_FUNCTION
1043/**
1044 * Checks whether the HC address is part of our patch region
1045 *
1046 * @returns true/false.
1047 * @param pVM Pointer to the VM.
1048 * @param pAddrHC Host context ring-3 address to check.
1049 */
1050VMMR3_INT_DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, void *pAddrHC)
1051{
1052 return (uintptr_t)pAddrHC >= (uintptr_t)pVM->patm.s.pPatchMemHC
1053 && (uintptr_t)pAddrHC < (uintptr_t)pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem;
1054}
1055#endif
1056
1057
1058/**
1059 * Allows or disallow patching of privileged instructions executed by the guest OS
1060 *
1061 * @returns VBox status code.
1062 * @param pUVM The user mode VM handle.
1063 * @param fAllowPatching Allow/disallow patching
1064 */
1065VMMR3DECL(int) PATMR3AllowPatching(PUVM pUVM, bool fAllowPatching)
1066{
1067 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1068 PVM pVM = pUVM->pVM;
1069 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1070
1071 if (!HMIsEnabled(pVM))
1072 pVM->fPATMEnabled = fAllowPatching;
1073 else
1074 Assert(!pVM->fPATMEnabled);
1075 return VINF_SUCCESS;
1076}
1077
1078
1079/**
1080 * Checks if the patch manager is enabled or not.
1081 *
1082 * @returns true if enabled, false if not (or if invalid handle).
1083 * @param pUVM The user mode VM handle.
1084 */
1085VMMR3DECL(bool) PATMR3IsEnabled(PUVM pUVM)
1086{
1087 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1088 PVM pVM = pUVM->pVM;
1089 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1090 return PATMIsEnabled(pVM);
1091}
1092
1093
1094/**
1095 * Convert a GC patch block pointer to a HC patch pointer
1096 *
1097 * @returns HC pointer or NULL if it's not a GC patch pointer
1098 * @param pVM Pointer to the VM.
1099 * @param pAddrGC GC pointer
1100 */
1101VMMR3_INT_DECL(void *) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
1102{
1103 AssertReturn(!HMIsEnabled(pVM), NULL);
1104 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
1105 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
1106 return NULL;
1107}
1108
1109
1110/**
1111 * Convert guest context address to host context pointer
1112 *
1113 * @returns VBox status code.
1114 * @param pVM Pointer to the VM.
1115 * @param pCacheRec Address conversion cache record
1116 * @param pGCPtr Guest context pointer
1117 *
1118 * @returns Host context pointer or NULL in case of an error
1119 *
1120 */
1121R3PTRTYPE(uint8_t *) patmR3GCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
1122{
1123 int rc;
1124 R3PTRTYPE(uint8_t *) pHCPtr;
1125 uint32_t offset;
1126
1127 if (PATMIsPatchGCAddr(pVM, pGCPtr))
1128 {
1129 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1130 Assert(pPatch);
1131 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
1132 }
1133
1134 offset = pGCPtr & PAGE_OFFSET_MASK;
1135 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1136 return pCacheRec->pPageLocStartHC + offset;
1137
1138 /* Release previous lock if any. */
1139 if (pCacheRec->Lock.pvMap)
1140 {
1141 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1142 pCacheRec->Lock.pvMap = NULL;
1143 }
1144
1145 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1146 if (rc != VINF_SUCCESS)
1147 {
1148 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1149 return NULL;
1150 }
1151 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1152 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1153 return pHCPtr;
1154}
1155
1156
1157/**
1158 * Calculates and fills in all branch targets
1159 *
1160 * @returns VBox status code.
1161 * @param pVM Pointer to the VM.
1162 * @param pPatch Current patch block pointer
1163 *
1164 */
1165static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1166{
1167 int32_t displ;
1168
1169 PJUMPREC pRec = 0;
1170 unsigned nrJumpRecs = 0;
1171
1172 /*
1173 * Set all branch targets inside the patch block.
1174 * We remove all jump records as they are no longer needed afterwards.
1175 */
1176 while (true)
1177 {
1178 RCPTRTYPE(uint8_t *) pInstrGC;
1179 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1180
1181 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1182 if (pRec == 0)
1183 break;
1184
1185 nrJumpRecs++;
1186
1187 /* HC in patch block to GC in patch block. */
1188 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1189
1190 if (pRec->opcode == OP_CALL)
1191 {
1192 /* Special case: call function replacement patch from this patch block.
1193 */
1194 PPATMPATCHREC pFunctionRec = patmQueryFunctionPatch(pVM, pRec->pTargetGC);
1195 if (!pFunctionRec)
1196 {
1197 int rc;
1198
1199 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1200 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1201 else
1202 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1203
1204 if (RT_FAILURE(rc))
1205 {
1206 uint8_t *pPatchHC;
1207 RTRCPTR pPatchGC;
1208 RTRCPTR pOrgInstrGC;
1209
1210 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1211 Assert(pOrgInstrGC);
1212
1213 /* Failure for some reason -> mark exit point with int 3. */
1214 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1215
1216 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1217 Assert(pPatchGC);
1218
1219 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1220
1221 /* Set a breakpoint at the very beginning of the recompiled instruction */
1222 *pPatchHC = 0xCC;
1223
1224 continue;
1225 }
1226 }
1227 else
1228 {
1229 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1230 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1231 }
1232
1233 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1234 }
1235 else
1236 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1237
1238 if (pBranchTargetGC == 0)
1239 {
1240 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1241 return VERR_PATCHING_REFUSED;
1242 }
1243 /* Our jumps *always* have a dword displacement (to make things easier). */
1244 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1245 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1246 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1247 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1248 }
1249 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1250 Assert(pPatch->JumpTree == 0);
1251 return VINF_SUCCESS;
1252}
1253
1254/**
1255 * Add an illegal instruction record
1256 *
1257 * @param pVM Pointer to the VM.
1258 * @param pPatch Patch structure ptr
1259 * @param pInstrGC Guest context pointer to privileged instruction
1260 *
1261 */
1262static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1263{
1264 PAVLPVNODECORE pRec;
1265
1266 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1267 Assert(pRec);
1268 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1269
1270 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1271 Assert(ret); NOREF(ret);
1272 pPatch->pTempInfo->nrIllegalInstr++;
1273}
1274
1275static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1276{
1277 PAVLPVNODECORE pRec;
1278
1279 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1280 if (pRec)
1281 return true;
1282 else
1283 return false;
1284}
1285
1286/**
1287 * Add a patch to guest lookup record
1288 *
1289 * @param pVM Pointer to the VM.
1290 * @param pPatch Patch structure ptr
1291 * @param pPatchInstrHC Guest context pointer to patch block
1292 * @param pInstrGC Guest context pointer to privileged instruction
1293 * @param enmType Lookup type
1294 * @param fDirty Dirty flag
1295 *
1296 * @note Be extremely careful with this function. Make absolutely sure the guest
1297 * address is correct! (to avoid executing instructions twice!)
1298 */
1299void patmR3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1300{
1301 bool ret;
1302 PRECPATCHTOGUEST pPatchToGuestRec;
1303 PRECGUESTTOPATCH pGuestToPatchRec;
1304 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1305
1306 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1307 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1308
1309 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1310 {
1311 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1312 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1313 return; /* already there */
1314
1315 Assert(!pPatchToGuestRec);
1316 }
1317#ifdef VBOX_STRICT
1318 else
1319 {
1320 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1321 Assert(!pPatchToGuestRec);
1322 }
1323#endif
1324
1325 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1326 Assert(pPatchToGuestRec);
1327 pPatchToGuestRec->Core.Key = PatchOffset;
1328 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1329 pPatchToGuestRec->enmType = enmType;
1330 pPatchToGuestRec->fDirty = fDirty;
1331
1332 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1333 Assert(ret);
1334
1335 /* GC to patch address */
1336 if (enmType == PATM_LOOKUP_BOTHDIR)
1337 {
1338 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1339 if (!pGuestToPatchRec)
1340 {
1341 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1342 pGuestToPatchRec->Core.Key = pInstrGC;
1343 pGuestToPatchRec->PatchOffset = PatchOffset;
1344
1345 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1346 Assert(ret);
1347 }
1348 }
1349
1350 pPatch->nrPatch2GuestRecs++;
1351}
1352
1353
1354/**
1355 * Removes a patch to guest lookup record
1356 *
1357 * @param pVM Pointer to the VM.
1358 * @param pPatch Patch structure ptr
1359 * @param pPatchInstrGC Guest context pointer to patch block
1360 */
1361void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1362{
1363 PAVLU32NODECORE pNode;
1364 PAVLU32NODECORE pNode2;
1365 PRECPATCHTOGUEST pPatchToGuestRec;
1366 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1367
1368 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1369 Assert(pPatchToGuestRec);
1370 if (pPatchToGuestRec)
1371 {
1372 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1373 {
1374 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1375
1376 Assert(pGuestToPatchRec->Core.Key);
1377 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1378 Assert(pNode2);
1379 }
1380 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1381 Assert(pNode);
1382
1383 MMR3HeapFree(pPatchToGuestRec);
1384 pPatch->nrPatch2GuestRecs--;
1385 }
1386}
1387
1388
1389/**
1390 * RTAvlPVDestroy callback.
1391 */
1392static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1393{
1394 MMR3HeapFree(pNode);
1395 return 0;
1396}
1397
1398/**
1399 * Empty the specified tree (PV tree, MMR3 heap)
1400 *
1401 * @param pVM Pointer to the VM.
1402 * @param ppTree Tree to empty
1403 */
1404static void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1405{
1406 NOREF(pVM);
1407 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1408}
1409
1410
1411/**
1412 * RTAvlU32Destroy callback.
1413 */
1414static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1415{
1416 MMR3HeapFree(pNode);
1417 return 0;
1418}
1419
1420/**
1421 * Empty the specified tree (U32 tree, MMR3 heap)
1422 *
1423 * @param pVM Pointer to the VM.
1424 * @param ppTree Tree to empty
1425 */
1426static void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1427{
1428 NOREF(pVM);
1429 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1430}
1431
1432
1433/**
1434 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1435 *
1436 * @returns VBox status code.
1437 * @param pVM Pointer to the VM.
1438 * @param pCpu CPU disassembly state
1439 * @param pInstrGC Guest context pointer to privileged instruction
1440 * @param pCurInstrGC Guest context pointer to the current instruction
1441 * @param pCacheRec Cache record ptr
1442 *
1443 */
1444static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1445{
1446 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1447 bool fIllegalInstr = false;
1448
1449 /*
1450 * Preliminary heuristics:
1451 *- no call instructions without a fixed displacement between cli and sti/popf
1452 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1453 *- no nested pushf/cli
1454 *- sti/popf should be the (eventual) target of all branches
1455 *- no near or far returns; no int xx, no into
1456 *
1457 * Note: Later on we can impose less stricter guidelines if the need arises
1458 */
1459
1460 /* Bail out if the patch gets too big. */
1461 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1462 {
1463 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1464 fIllegalInstr = true;
1465 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1466 }
1467 else
1468 {
1469 /* No unconditional jumps or calls without fixed displacements. */
1470 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1471 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1472 )
1473 {
1474 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1475 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1476 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1477 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1478 )
1479 {
1480 fIllegalInstr = true;
1481 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1482 }
1483 }
1484
1485 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1486 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->uOpcode == OP_JMP)
1487 {
1488 if ( pCurInstrGC > pPatch->pPrivInstrGC
1489 && pCurInstrGC + pCpu->cbInstr < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1490 {
1491 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1492 /* We turn this one into a int 3 callable patch. */
1493 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1494 }
1495 }
1496 else
1497 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1498 if (pPatch->opcode == OP_PUSHF)
1499 {
1500 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->uOpcode == OP_PUSHF)
1501 {
1502 fIllegalInstr = true;
1503 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1504 }
1505 }
1506
1507 /* no far returns */
1508 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1509 {
1510 pPatch->pTempInfo->nrRetInstr++;
1511 fIllegalInstr = true;
1512 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1513 }
1514 else if ( pCpu->pCurInstr->uOpcode == OP_INT3
1515 || pCpu->pCurInstr->uOpcode == OP_INT
1516 || pCpu->pCurInstr->uOpcode == OP_INTO)
1517 {
1518 /* No int xx or into either. */
1519 fIllegalInstr = true;
1520 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1521 }
1522 }
1523
1524 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1525
1526 /* Illegal instruction -> end of analysis phase for this code block */
1527 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1528 return VINF_SUCCESS;
1529
1530 /* Check for exit points. */
1531 switch (pCpu->pCurInstr->uOpcode)
1532 {
1533 case OP_SYSEXIT:
1534 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1535
1536 case OP_SYSENTER:
1537 case OP_ILLUD2:
1538 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1539 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1540 return VINF_SUCCESS;
1541
1542 case OP_STI:
1543 case OP_POPF:
1544 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1545 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1546 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1547 {
1548 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1549 return VERR_PATCHING_REFUSED;
1550 }
1551 if (pPatch->opcode == OP_PUSHF)
1552 {
1553 if (pCpu->pCurInstr->uOpcode == OP_POPF)
1554 {
1555 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1556 return VINF_SUCCESS;
1557
1558 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1559 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1560 pPatch->flags |= PATMFL_CHECK_SIZE;
1561 }
1562 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1563 }
1564 /* else: fall through. */
1565 case OP_RETN: /* exit point for function replacement */
1566 return VINF_SUCCESS;
1567
1568 case OP_IRET:
1569 return VINF_SUCCESS; /* exitpoint */
1570
1571 case OP_CPUID:
1572 case OP_CALL:
1573 case OP_JMP:
1574 break;
1575
1576#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1577 case OP_STR:
1578 break;
1579#endif
1580
1581 default:
1582 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1583 {
1584 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1585 return VINF_SUCCESS; /* exit point */
1586 }
1587 break;
1588 }
1589
1590 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1591 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW))
1592 {
1593 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1594 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->cbInstr));
1595 return VINF_SUCCESS;
1596 }
1597
1598 return VWRN_CONTINUE_ANALYSIS;
1599}
1600
1601/**
1602 * Analyses the instructions inside a function for compliance
1603 *
1604 * @returns VBox status code.
1605 * @param pVM Pointer to the VM.
1606 * @param pCpu CPU disassembly state
1607 * @param pInstrGC Guest context pointer to privileged instruction
1608 * @param pCurInstrGC Guest context pointer to the current instruction
1609 * @param pCacheRec Cache record ptr
1610 *
1611 */
1612static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1613{
1614 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1615 bool fIllegalInstr = false;
1616 NOREF(pInstrGC);
1617
1618 //Preliminary heuristics:
1619 //- no call instructions
1620 //- ret ends a block
1621
1622 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1623
1624 // bail out if the patch gets too big
1625 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1626 {
1627 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1628 fIllegalInstr = true;
1629 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1630 }
1631 else
1632 {
1633 // no unconditional jumps or calls without fixed displacements
1634 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1635 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1636 )
1637 {
1638 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1639 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1640 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1641 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1642 )
1643 {
1644 fIllegalInstr = true;
1645 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1646 }
1647 }
1648 else /* no far returns */
1649 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1650 {
1651 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1652 fIllegalInstr = true;
1653 }
1654 else /* no int xx or into either */
1655 if (pCpu->pCurInstr->uOpcode == OP_INT3 || pCpu->pCurInstr->uOpcode == OP_INT || pCpu->pCurInstr->uOpcode == OP_INTO)
1656 {
1657 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1658 fIllegalInstr = true;
1659 }
1660
1661 #if 0
1662 ///@todo we can handle certain in/out and privileged instructions in the guest context
1663 if (pCpu->pCurInstr->fOpType & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr->uOpcode != OP_STI)
1664 {
1665 Log(("Illegal instructions for function patch!!\n"));
1666 return VERR_PATCHING_REFUSED;
1667 }
1668 #endif
1669 }
1670
1671 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1672
1673 /* Illegal instruction -> end of analysis phase for this code block */
1674 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1675 {
1676 return VINF_SUCCESS;
1677 }
1678
1679 // Check for exit points
1680 switch (pCpu->pCurInstr->uOpcode)
1681 {
1682 case OP_ILLUD2:
1683 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1684 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1685 return VINF_SUCCESS;
1686
1687 case OP_IRET:
1688 case OP_SYSEXIT: /* will fault or emulated in GC */
1689 case OP_RETN:
1690 return VINF_SUCCESS;
1691
1692#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1693 case OP_STR:
1694 break;
1695#endif
1696
1697 case OP_POPF:
1698 case OP_STI:
1699 return VWRN_CONTINUE_ANALYSIS;
1700 default:
1701 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1702 {
1703 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1704 return VINF_SUCCESS; /* exit point */
1705 }
1706 return VWRN_CONTINUE_ANALYSIS;
1707 }
1708
1709 return VWRN_CONTINUE_ANALYSIS;
1710}
1711
1712/**
1713 * Recompiles the instructions in a code block
1714 *
1715 * @returns VBox status code.
1716 * @param pVM Pointer to the VM.
1717 * @param pCpu CPU disassembly state
1718 * @param pInstrGC Guest context pointer to privileged instruction
1719 * @param pCurInstrGC Guest context pointer to the current instruction
1720 * @param pCacheRec Cache record ptr
1721 *
1722 */
1723static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1724{
1725 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1726 int rc = VINF_SUCCESS;
1727 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1728
1729 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1730
1731 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1732 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1733 {
1734 /*
1735 * Been there, done that; so insert a jump (we don't want to duplicate code)
1736 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1737 */
1738 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1739 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1740 }
1741
1742 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1743 {
1744 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1745 }
1746 else
1747 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1748
1749 if (RT_FAILURE(rc))
1750 return rc;
1751
1752 /* Note: Never do a direct return unless a failure is encountered! */
1753
1754 /* Clear recompilation of next instruction flag; we are doing that right here. */
1755 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1756 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1757
1758 /* Add lookup record for patch to guest address translation */
1759 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1760
1761 /* Update lowest and highest instruction address for this patch */
1762 if (pCurInstrGC < pPatch->pInstrGCLowest)
1763 pPatch->pInstrGCLowest = pCurInstrGC;
1764 else
1765 if (pCurInstrGC > pPatch->pInstrGCHighest)
1766 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->cbInstr;
1767
1768 /* Illegal instruction -> end of recompile phase for this code block. */
1769 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1770 {
1771 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1772 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1773 goto end;
1774 }
1775
1776 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1777 * Indirect calls are handled below.
1778 */
1779 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1780 && (pCpu->pCurInstr->uOpcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1781 && (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J))
1782 {
1783 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1784 if (pTargetGC == 0)
1785 {
1786 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
1787 return VERR_PATCHING_REFUSED;
1788 }
1789
1790 if (pCpu->pCurInstr->uOpcode == OP_CALL)
1791 {
1792 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1793 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1794 if (RT_FAILURE(rc))
1795 goto end;
1796 }
1797 else
1798 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->uOpcode, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1799
1800 if (RT_SUCCESS(rc))
1801 rc = VWRN_CONTINUE_RECOMPILE;
1802
1803 goto end;
1804 }
1805
1806 switch (pCpu->pCurInstr->uOpcode)
1807 {
1808 case OP_CLI:
1809 {
1810 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1811 * until we've found the proper exit point(s).
1812 */
1813 if ( pCurInstrGC != pInstrGC
1814 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1815 )
1816 {
1817 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1818 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1819 }
1820 /* Set by irq inhibition; no longer valid now. */
1821 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1822
1823 rc = patmPatchGenCli(pVM, pPatch);
1824 if (RT_SUCCESS(rc))
1825 rc = VWRN_CONTINUE_RECOMPILE;
1826 break;
1827 }
1828
1829 case OP_MOV:
1830 if (pCpu->pCurInstr->fOpType & DISOPTYPE_POTENTIALLY_DANGEROUS)
1831 {
1832 /* mov ss, src? */
1833 if ( (pCpu->Param1.fUse & DISUSE_REG_SEG)
1834 && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS))
1835 {
1836 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1837 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1838 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1839 }
1840#if 0 /* necessary for Haiku */
1841 else
1842 if ( (pCpu->Param2.fUse & DISUSE_REG_SEG)
1843 && (pCpu->Param2.Base.idxSegReg == USE_REG_SS)
1844 && (pCpu->Param1.fUse & (DISUSE_REG_GEN32|DISUSE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1845 {
1846 /* mov GPR, ss */
1847 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1848 if (RT_SUCCESS(rc))
1849 rc = VWRN_CONTINUE_RECOMPILE;
1850 break;
1851 }
1852#endif
1853 }
1854 goto duplicate_instr;
1855
1856 case OP_POP:
1857 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1858 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS)
1859 {
1860 Assert(pCpu->pCurInstr->fOpType & DISOPTYPE_INHIBIT_IRQS);
1861
1862 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1863 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1864 }
1865 goto duplicate_instr;
1866
1867 case OP_STI:
1868 {
1869 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1870
1871 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1872 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1873 {
1874 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1875 fInhibitIRQInstr = true;
1876 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1877 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1878 }
1879 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1880
1881 if (RT_SUCCESS(rc))
1882 {
1883 DISCPUSTATE cpu = *pCpu;
1884 unsigned cbInstr;
1885 int disret;
1886 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1887
1888 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1889
1890 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1891 { /* Force pNextInstrHC out of scope after using it */
1892 uint8_t *pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1893 if (pNextInstrHC == NULL)
1894 {
1895 AssertFailed();
1896 return VERR_PATCHING_REFUSED;
1897 }
1898
1899 // Disassemble the next instruction
1900 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
1901 }
1902 if (disret == false)
1903 {
1904 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1905 return VERR_PATCHING_REFUSED;
1906 }
1907 pReturnInstrGC = pNextInstrGC + cbInstr;
1908
1909 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1910 || pReturnInstrGC <= pInstrGC
1911 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1912 )
1913 {
1914 /* Not an exit point for function duplication patches */
1915 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1916 && RT_SUCCESS(rc))
1917 {
1918 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1919 rc = VWRN_CONTINUE_RECOMPILE;
1920 }
1921 else
1922 rc = VINF_SUCCESS; //exit point
1923 }
1924 else {
1925 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1926 rc = VERR_PATCHING_REFUSED; //not allowed!!
1927 }
1928 }
1929 break;
1930 }
1931
1932 case OP_POPF:
1933 {
1934 bool fGenerateJmpBack = (pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32);
1935
1936 /* Not an exit point for IDT handler or function replacement patches */
1937 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1938 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1939 fGenerateJmpBack = false;
1940
1941 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->cbInstr, !!(pCpu->fPrefix & DISPREFIX_OPSIZE), fGenerateJmpBack);
1942 if (RT_SUCCESS(rc))
1943 {
1944 if (fGenerateJmpBack == false)
1945 {
1946 /* Not an exit point for IDT handler or function replacement patches */
1947 rc = VWRN_CONTINUE_RECOMPILE;
1948 }
1949 else
1950 {
1951 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1952 rc = VINF_SUCCESS; /* exit point! */
1953 }
1954 }
1955 break;
1956 }
1957
1958 case OP_PUSHF:
1959 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1960 if (RT_SUCCESS(rc))
1961 rc = VWRN_CONTINUE_RECOMPILE;
1962 break;
1963
1964 case OP_PUSH:
1965 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1966 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS)
1967 {
1968 rc = patmPatchGenPushCS(pVM, pPatch);
1969 if (RT_SUCCESS(rc))
1970 rc = VWRN_CONTINUE_RECOMPILE;
1971 break;
1972 }
1973 goto duplicate_instr;
1974
1975 case OP_IRET:
1976 Log(("IRET at %RRv\n", pCurInstrGC));
1977 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1978 if (RT_SUCCESS(rc))
1979 {
1980 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1981 rc = VINF_SUCCESS; /* exit point by definition */
1982 }
1983 break;
1984
1985 case OP_ILLUD2:
1986 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1987 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1988 if (RT_SUCCESS(rc))
1989 rc = VINF_SUCCESS; /* exit point by definition */
1990 Log(("Illegal opcode (0xf 0xb)\n"));
1991 break;
1992
1993 case OP_CPUID:
1994 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1995 if (RT_SUCCESS(rc))
1996 rc = VWRN_CONTINUE_RECOMPILE;
1997 break;
1998
1999 case OP_STR:
2000#ifdef VBOX_WITH_SAFE_STR /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table and move OP_STR into #ifndef */
2001 /* Now safe because our shadow TR entry is identical to the guest's. */
2002 goto duplicate_instr;
2003#endif
2004 case OP_SLDT:
2005 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
2006 if (RT_SUCCESS(rc))
2007 rc = VWRN_CONTINUE_RECOMPILE;
2008 break;
2009
2010 case OP_SGDT:
2011 case OP_SIDT:
2012 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
2013 if (RT_SUCCESS(rc))
2014 rc = VWRN_CONTINUE_RECOMPILE;
2015 break;
2016
2017 case OP_RETN:
2018 /* retn is an exit point for function patches */
2019 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
2020 if (RT_SUCCESS(rc))
2021 rc = VINF_SUCCESS; /* exit point by definition */
2022 break;
2023
2024 case OP_SYSEXIT:
2025 /* Duplicate it, so it can be emulated in GC (or fault). */
2026 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2027 if (RT_SUCCESS(rc))
2028 rc = VINF_SUCCESS; /* exit point by definition */
2029 break;
2030
2031 case OP_CALL:
2032 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2033 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2034 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2035 */
2036 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2037 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far calls! */)
2038 {
2039 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
2040 if (RT_SUCCESS(rc))
2041 {
2042 rc = VWRN_CONTINUE_RECOMPILE;
2043 }
2044 break;
2045 }
2046 goto gen_illegal_instr;
2047
2048 case OP_JMP:
2049 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2050 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2051 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2052 */
2053 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2054 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far jumps! */)
2055 {
2056 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
2057 if (RT_SUCCESS(rc))
2058 rc = VINF_SUCCESS; /* end of branch */
2059 break;
2060 }
2061 goto gen_illegal_instr;
2062
2063 case OP_INT3:
2064 case OP_INT:
2065 case OP_INTO:
2066 goto gen_illegal_instr;
2067
2068 case OP_MOV_DR:
2069 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2070 if (pCpu->pCurInstr->fParam2 == OP_PARM_Dd)
2071 {
2072 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
2073 if (RT_SUCCESS(rc))
2074 rc = VWRN_CONTINUE_RECOMPILE;
2075 break;
2076 }
2077 goto duplicate_instr;
2078
2079 case OP_MOV_CR:
2080 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2081 if (pCpu->pCurInstr->fParam2 == OP_PARM_Cd)
2082 {
2083 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
2084 if (RT_SUCCESS(rc))
2085 rc = VWRN_CONTINUE_RECOMPILE;
2086 break;
2087 }
2088 goto duplicate_instr;
2089
2090 default:
2091 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW | DISOPTYPE_PRIVILEGED_NOTRAP))
2092 {
2093gen_illegal_instr:
2094 rc = patmPatchGenIllegalInstr(pVM, pPatch);
2095 if (RT_SUCCESS(rc))
2096 rc = VINF_SUCCESS; /* exit point by definition */
2097 }
2098 else
2099 {
2100duplicate_instr:
2101 Log(("patmPatchGenDuplicate\n"));
2102 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2103 if (RT_SUCCESS(rc))
2104 rc = VWRN_CONTINUE_RECOMPILE;
2105 }
2106 break;
2107 }
2108
2109end:
2110
2111 if ( !fInhibitIRQInstr
2112 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2113 {
2114 int rc2;
2115 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2116
2117 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2118 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
2119 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
2120 {
2121 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
2122
2123 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2124 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
2125 rc = VINF_SUCCESS; /* end of the line */
2126 }
2127 else
2128 {
2129 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
2130 }
2131 if (RT_FAILURE(rc2))
2132 rc = rc2;
2133 }
2134
2135 if (RT_SUCCESS(rc))
2136 {
2137 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
2138 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
2139 && pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32
2140 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
2141 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
2142 )
2143 {
2144 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2145
2146 // The end marker for this kind of patch is any instruction at a location outside our patch jump
2147 Log(("patmRecompileCallback: end found for single instruction patch at %RRv cbInstr %d\n", pNextInstrGC, pCpu->cbInstr));
2148
2149 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2150 AssertRC(rc);
2151 }
2152 }
2153 return rc;
2154}
2155
2156
2157#ifdef LOG_ENABLED
2158
2159/**
2160 * Add a disasm jump record (temporary for prevent duplicate analysis)
2161 *
2162 * @param pVM Pointer to the VM.
2163 * @param pPatch Patch structure ptr
2164 * @param pInstrGC Guest context pointer to privileged instruction
2165 *
2166 */
2167static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2168{
2169 PAVLPVNODECORE pRec;
2170
2171 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2172 Assert(pRec);
2173 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2174
2175 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2176 Assert(ret);
2177}
2178
2179/**
2180 * Checks if jump target has been analysed before.
2181 *
2182 * @returns VBox status code.
2183 * @param pPatch Patch struct
2184 * @param pInstrGC Jump target
2185 *
2186 */
2187static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2188{
2189 PAVLPVNODECORE pRec;
2190
2191 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2192 if (pRec)
2193 return true;
2194 return false;
2195}
2196
2197/**
2198 * For proper disassembly of the final patch block
2199 *
2200 * @returns VBox status code.
2201 * @param pVM Pointer to the VM.
2202 * @param pCpu CPU disassembly state
2203 * @param pInstrGC Guest context pointer to privileged instruction
2204 * @param pCurInstrGC Guest context pointer to the current instruction
2205 * @param pCacheRec Cache record ptr
2206 *
2207 */
2208int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2209{
2210 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2211 NOREF(pInstrGC);
2212
2213 if (pCpu->pCurInstr->uOpcode == OP_INT3)
2214 {
2215 /* Could be an int3 inserted in a call patch. Check to be sure */
2216 DISCPUSTATE cpu;
2217 RTRCPTR pOrgJumpGC;
2218
2219 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2220
2221 { /* Force pOrgJumpHC out of scope after using it */
2222 uint8_t *pOrgJumpHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2223
2224 bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL);
2225 if (!disret || cpu.pCurInstr->uOpcode != OP_CALL || cpu.Param1.cb != 4 /* only near calls */)
2226 return VINF_SUCCESS;
2227 }
2228 return VWRN_CONTINUE_ANALYSIS;
2229 }
2230
2231 if ( pCpu->pCurInstr->uOpcode == OP_ILLUD2
2232 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2233 {
2234 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2235 return VWRN_CONTINUE_ANALYSIS;
2236 }
2237
2238 if ( (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2239 || pCpu->pCurInstr->uOpcode == OP_INT
2240 || pCpu->pCurInstr->uOpcode == OP_IRET
2241 || pCpu->pCurInstr->uOpcode == OP_RETN
2242 || pCpu->pCurInstr->uOpcode == OP_RETF
2243 )
2244 {
2245 return VINF_SUCCESS;
2246 }
2247
2248 if (pCpu->pCurInstr->uOpcode == OP_ILLUD2)
2249 return VINF_SUCCESS;
2250
2251 return VWRN_CONTINUE_ANALYSIS;
2252}
2253
2254
2255/**
2256 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2257 *
2258 * @returns VBox status code.
2259 * @param pVM Pointer to the VM.
2260 * @param pInstrGC Guest context pointer to the initial privileged instruction
2261 * @param pCurInstrGC Guest context pointer to the current instruction
2262 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2263 * @param pCacheRec Cache record ptr
2264 *
2265 */
2266int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2267{
2268 DISCPUSTATE cpu;
2269 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2270 int rc = VWRN_CONTINUE_ANALYSIS;
2271 uint32_t cbInstr, delta;
2272 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2273 bool disret;
2274 char szOutput[256];
2275
2276 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2277
2278 /* We need this to determine branch targets (and for disassembling). */
2279 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2280
2281 while (rc == VWRN_CONTINUE_ANALYSIS)
2282 {
2283 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2284 if (pCurInstrHC == NULL)
2285 {
2286 rc = VERR_PATCHING_REFUSED;
2287 goto end;
2288 }
2289
2290 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_RAWCODE,
2291 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2292 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2293 {
2294 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2295
2296 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2297 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2298 else
2299 Log(("DIS %s", szOutput));
2300
2301 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2302 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2303 {
2304 rc = VINF_SUCCESS;
2305 goto end;
2306 }
2307 }
2308 else
2309 Log(("DIS: %s", szOutput));
2310
2311 if (disret == false)
2312 {
2313 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2314 rc = VINF_SUCCESS;
2315 goto end;
2316 }
2317
2318 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2319 if (rc != VWRN_CONTINUE_ANALYSIS) {
2320 break; //done!
2321 }
2322
2323 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2324 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2325 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2326 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2327 )
2328 {
2329 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2330 RTRCPTR pOrgTargetGC;
2331
2332 if (pTargetGC == 0)
2333 {
2334 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2335 rc = VERR_PATCHING_REFUSED;
2336 break;
2337 }
2338
2339 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2340 {
2341 //jump back to guest code
2342 rc = VINF_SUCCESS;
2343 goto end;
2344 }
2345 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2346
2347 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2348 {
2349 rc = VINF_SUCCESS;
2350 goto end;
2351 }
2352
2353 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2354 {
2355 /* New jump, let's check it. */
2356 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2357
2358 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2359 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2360 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2361
2362 if (rc != VINF_SUCCESS) {
2363 break; //done!
2364 }
2365 }
2366 if (cpu.pCurInstr->uOpcode == OP_JMP)
2367 {
2368 /* Unconditional jump; return to caller. */
2369 rc = VINF_SUCCESS;
2370 goto end;
2371 }
2372
2373 rc = VWRN_CONTINUE_ANALYSIS;
2374 }
2375 pCurInstrGC += cbInstr;
2376 }
2377end:
2378 return rc;
2379}
2380
2381/**
2382 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2383 *
2384 * @returns VBox status code.
2385 * @param pVM Pointer to the VM.
2386 * @param pInstrGC Guest context pointer to the initial privileged instruction
2387 * @param pCurInstrGC Guest context pointer to the current instruction
2388 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2389 * @param pCacheRec Cache record ptr
2390 *
2391 */
2392int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2393{
2394 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2395
2396 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2397 /* Free all disasm jump records. */
2398 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2399 return rc;
2400}
2401
2402#endif /* LOG_ENABLED */
2403
2404/**
2405 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2406 * If so, this patch is permanently disabled.
2407 *
2408 * @param pVM Pointer to the VM.
2409 * @param pInstrGC Guest context pointer to instruction
2410 * @param pConflictGC Guest context pointer to check
2411 *
2412 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2413 *
2414 */
2415VMMR3_INT_DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2416{
2417 AssertReturn(!HMIsEnabled(pVM), VERR_PATCH_NO_CONFLICT);
2418 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2419 if (pTargetPatch)
2420 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2421 return VERR_PATCH_NO_CONFLICT;
2422}
2423
2424/**
2425 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2426 *
2427 * @returns VBox status code.
2428 * @param pVM Pointer to the VM.
2429 * @param pInstrGC Guest context pointer to privileged instruction
2430 * @param pCurInstrGC Guest context pointer to the current instruction
2431 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2432 * @param pCacheRec Cache record ptr
2433 *
2434 */
2435static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2436{
2437 DISCPUSTATE cpu;
2438 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2439 int rc = VWRN_CONTINUE_ANALYSIS;
2440 uint32_t cbInstr;
2441 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2442 bool disret;
2443#ifdef LOG_ENABLED
2444 char szOutput[256];
2445#endif
2446
2447 while (rc == VWRN_CONTINUE_RECOMPILE)
2448 {
2449 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2450 if (pCurInstrHC == NULL)
2451 {
2452 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2453 goto end;
2454 }
2455#ifdef LOG_ENABLED
2456 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE,
2457 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2458 Log(("Recompile: %s", szOutput));
2459#else
2460 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
2461#endif
2462 if (disret == false)
2463 {
2464 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2465
2466 /* Add lookup record for patch to guest address translation */
2467 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2468 patmPatchGenIllegalInstr(pVM, pPatch);
2469 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2470 goto end;
2471 }
2472
2473 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2474 if (rc != VWRN_CONTINUE_RECOMPILE)
2475 {
2476 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2477 if ( rc == VINF_SUCCESS
2478 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2479 {
2480 DISCPUSTATE cpunext;
2481 uint32_t opsizenext;
2482 uint8_t *pNextInstrHC;
2483 RTRCPTR pNextInstrGC = pCurInstrGC + cbInstr;
2484
2485 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2486
2487 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2488 * Recompile the next instruction as well
2489 */
2490 pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2491 if (pNextInstrHC == NULL)
2492 {
2493 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2494 goto end;
2495 }
2496 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpunext, &opsizenext);
2497 if (disret == false)
2498 {
2499 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2500 goto end;
2501 }
2502 switch(cpunext.pCurInstr->uOpcode)
2503 {
2504 case OP_IRET: /* inhibit cleared in generated code */
2505 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2506 case OP_HLT:
2507 break; /* recompile these */
2508
2509 default:
2510 if (cpunext.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2511 {
2512 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2513
2514 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2515 AssertRC(rc);
2516 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2517 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2518 }
2519 break;
2520 }
2521
2522 /* Note: after a cli we must continue to a proper exit point */
2523 if (cpunext.pCurInstr->uOpcode != OP_CLI)
2524 {
2525 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2526 if (RT_SUCCESS(rc))
2527 {
2528 rc = VINF_SUCCESS;
2529 goto end;
2530 }
2531 break;
2532 }
2533 else
2534 rc = VWRN_CONTINUE_RECOMPILE;
2535 }
2536 else
2537 break; /* done! */
2538 }
2539
2540 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2541
2542
2543 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2544 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2545 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2546 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2547 )
2548 {
2549 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2550 if (addr == 0)
2551 {
2552 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2553 rc = VERR_PATCHING_REFUSED;
2554 break;
2555 }
2556
2557 Log(("Jump encountered target %RRv\n", addr));
2558
2559 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2560 if (!(cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW))
2561 {
2562 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2563 /* First we need to finish this linear code stream until the next exit point. */
2564 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+cbInstr, pfnPATMR3Recompile, pCacheRec);
2565 if (RT_FAILURE(rc))
2566 {
2567 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2568 break; //fatal error
2569 }
2570 }
2571
2572 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2573 {
2574 /* New code; let's recompile it. */
2575 Log(("patmRecompileCodeStream continue with jump\n"));
2576
2577 /*
2578 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2579 * this patch so we can continue our analysis
2580 *
2581 * We rely on CSAM to detect and resolve conflicts
2582 */
2583 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, addr);
2584 if(pTargetPatch)
2585 {
2586 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2587 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2588 }
2589
2590 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2591 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2592 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2593
2594 if(pTargetPatch)
2595 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2596
2597 if (RT_FAILURE(rc))
2598 {
2599 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2600 break; //done!
2601 }
2602 }
2603 /* Always return to caller here; we're done! */
2604 rc = VINF_SUCCESS;
2605 goto end;
2606 }
2607 else
2608 if (cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW)
2609 {
2610 rc = VINF_SUCCESS;
2611 goto end;
2612 }
2613 pCurInstrGC += cbInstr;
2614 }
2615end:
2616 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2617 return rc;
2618}
2619
2620
2621/**
2622 * Generate the jump from guest to patch code
2623 *
2624 * @returns VBox status code.
2625 * @param pVM Pointer to the VM.
2626 * @param pPatch Patch record
2627 * @param pCacheRec Guest translation lookup cache record
2628 */
2629static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2630{
2631 uint8_t temp[8];
2632 uint8_t *pPB;
2633 int rc;
2634
2635 Assert(pPatch->cbPatchJump <= sizeof(temp));
2636 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2637
2638 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2639 Assert(pPB);
2640
2641#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2642 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2643 {
2644 Assert(pPatch->pPatchJumpDestGC);
2645
2646 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2647 {
2648 // jmp [PatchCode]
2649 if (fAddFixup)
2650 {
2651 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2652 {
2653 Log(("Relocation failed for the jump in the guest code!!\n"));
2654 return VERR_PATCHING_REFUSED;
2655 }
2656 }
2657
2658 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2659 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2660 }
2661 else
2662 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2663 {
2664 // jmp [PatchCode]
2665 if (fAddFixup)
2666 {
2667 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2668 {
2669 Log(("Relocation failed for the jump in the guest code!!\n"));
2670 return VERR_PATCHING_REFUSED;
2671 }
2672 }
2673
2674 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2675 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2676 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2677 }
2678 else
2679 {
2680 Assert(0);
2681 return VERR_PATCHING_REFUSED;
2682 }
2683 }
2684 else
2685#endif
2686 {
2687 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2688
2689 // jmp [PatchCode]
2690 if (fAddFixup)
2691 {
2692 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2693 {
2694 Log(("Relocation failed for the jump in the guest code!!\n"));
2695 return VERR_PATCHING_REFUSED;
2696 }
2697 }
2698 temp[0] = 0xE9; //jmp
2699 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2700 }
2701 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2702 AssertRC(rc);
2703
2704 if (rc == VINF_SUCCESS)
2705 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2706
2707 return rc;
2708}
2709
2710/**
2711 * Remove the jump from guest to patch code
2712 *
2713 * @returns VBox status code.
2714 * @param pVM Pointer to the VM.
2715 * @param pPatch Patch record
2716 */
2717static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2718{
2719#ifdef DEBUG
2720 DISCPUSTATE cpu;
2721 char szOutput[256];
2722 uint32_t cbInstr, i = 0;
2723 bool disret;
2724
2725 while (i < pPatch->cbPrivInstr)
2726 {
2727 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2728 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2729 if (disret == false)
2730 break;
2731
2732 Log(("Org patch jump: %s", szOutput));
2733 Assert(cbInstr);
2734 i += cbInstr;
2735 }
2736#endif
2737
2738 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2739 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2740#ifdef DEBUG
2741 if (rc == VINF_SUCCESS)
2742 {
2743 i = 0;
2744 while (i < pPatch->cbPrivInstr)
2745 {
2746 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2747 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2748 if (disret == false)
2749 break;
2750
2751 Log(("Org instr: %s", szOutput));
2752 Assert(cbInstr);
2753 i += cbInstr;
2754 }
2755 }
2756#endif
2757 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2758 return rc;
2759}
2760
2761/**
2762 * Generate the call from guest to patch code
2763 *
2764 * @returns VBox status code.
2765 * @param pVM Pointer to the VM.
2766 * @param pPatch Patch record
2767 * @param pInstrHC HC address where to insert the jump
2768 * @param pCacheRec Guest translation cache record
2769 */
2770static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2771{
2772 uint8_t temp[8];
2773 uint8_t *pPB;
2774 int rc;
2775
2776 Assert(pPatch->cbPatchJump <= sizeof(temp));
2777
2778 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2779 Assert(pPB);
2780
2781 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2782
2783 // jmp [PatchCode]
2784 if (fAddFixup)
2785 {
2786 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2787 {
2788 Log(("Relocation failed for the jump in the guest code!!\n"));
2789 return VERR_PATCHING_REFUSED;
2790 }
2791 }
2792
2793 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2794 temp[0] = pPatch->aPrivInstr[0];
2795 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2796
2797 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2798 AssertRC(rc);
2799
2800 return rc;
2801}
2802
2803
2804/**
2805 * Patch cli/sti pushf/popf instruction block at specified location
2806 *
2807 * @returns VBox status code.
2808 * @param pVM Pointer to the VM.
2809 * @param pInstrGC Guest context point to privileged instruction
2810 * @param pInstrHC Host context point to privileged instruction
2811 * @param uOpcode Instruction opcode
2812 * @param uOpSize Size of starting instruction
2813 * @param pPatchRec Patch record
2814 *
2815 * @note returns failure if patching is not allowed or possible
2816 *
2817 */
2818static int patmR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2819 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2820{
2821 PPATCHINFO pPatch = &pPatchRec->patch;
2822 int rc = VERR_PATCHING_REFUSED;
2823 uint32_t orgOffsetPatchMem = ~0;
2824 RTRCPTR pInstrStart;
2825 bool fInserted;
2826 NOREF(pInstrHC); NOREF(uOpSize);
2827
2828 /* Save original offset (in case of failures later on) */
2829 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2830 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2831
2832 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2833 switch (uOpcode)
2834 {
2835 case OP_MOV:
2836 break;
2837
2838 case OP_CLI:
2839 case OP_PUSHF:
2840 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2841 /* Note: special precautions are taken when disabling and enabling such patches. */
2842 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2843 break;
2844
2845 default:
2846 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2847 {
2848 AssertMsg(0, ("patmR3PatchBlock: Invalid opcode %x\n", uOpcode));
2849 return VERR_INVALID_PARAMETER;
2850 }
2851 }
2852
2853 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2854 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2855
2856 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2857 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2858 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2859 )
2860 {
2861 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2862 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2863 rc = VERR_PATCHING_REFUSED;
2864 goto failure;
2865 }
2866
2867 pPatch->nrPatch2GuestRecs = 0;
2868 pInstrStart = pInstrGC;
2869
2870#ifdef PATM_ENABLE_CALL
2871 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2872#endif
2873
2874 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2875 pPatch->uCurPatchOffset = 0;
2876
2877 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2878 {
2879 Assert(pPatch->flags & PATMFL_INTHANDLER);
2880
2881 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2882 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2883 if (RT_FAILURE(rc))
2884 goto failure;
2885 }
2886
2887 /***************************************************************************************************************************/
2888 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2889 /***************************************************************************************************************************/
2890#ifdef VBOX_WITH_STATISTICS
2891 if (!(pPatch->flags & PATMFL_SYSENTER))
2892 {
2893 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2894 if (RT_FAILURE(rc))
2895 goto failure;
2896 }
2897#endif
2898
2899 PATMP2GLOOKUPREC cacheRec;
2900 RT_ZERO(cacheRec);
2901 cacheRec.pPatch = pPatch;
2902
2903 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2904 /* Free leftover lock if any. */
2905 if (cacheRec.Lock.pvMap)
2906 {
2907 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2908 cacheRec.Lock.pvMap = NULL;
2909 }
2910 if (rc != VINF_SUCCESS)
2911 {
2912 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2913 goto failure;
2914 }
2915
2916 /* Calculated during analysis. */
2917 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2918 {
2919 /* Most likely cause: we encountered an illegal instruction very early on. */
2920 /** @todo could turn it into an int3 callable patch. */
2921 Log(("patmR3PatchBlock: patch block too small -> refuse\n"));
2922 rc = VERR_PATCHING_REFUSED;
2923 goto failure;
2924 }
2925
2926 /* size of patch block */
2927 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2928
2929
2930 /* Update free pointer in patch memory. */
2931 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2932 /* Round to next 8 byte boundary. */
2933 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2934
2935 /*
2936 * Insert into patch to guest lookup tree
2937 */
2938 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2939 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2940 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2941 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2942 if (!fInserted)
2943 {
2944 rc = VERR_PATCHING_REFUSED;
2945 goto failure;
2946 }
2947
2948 /* Note that patmr3SetBranchTargets can install additional patches!! */
2949 rc = patmr3SetBranchTargets(pVM, pPatch);
2950 if (rc != VINF_SUCCESS)
2951 {
2952 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2953 goto failure;
2954 }
2955
2956#ifdef LOG_ENABLED
2957 Log(("Patch code ----------------------------------------------------------\n"));
2958 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2959 /* Free leftover lock if any. */
2960 if (cacheRec.Lock.pvMap)
2961 {
2962 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2963 cacheRec.Lock.pvMap = NULL;
2964 }
2965 Log(("Patch code ends -----------------------------------------------------\n"));
2966#endif
2967
2968 /* make a copy of the guest code bytes that will be overwritten */
2969 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2970
2971 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2972 AssertRC(rc);
2973
2974 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2975 {
2976 /*uint8_t bASMInt3 = 0xCC; - unused */
2977
2978 Log(("patmR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2979 /* Replace first opcode byte with 'int 3'. */
2980 rc = patmActivateInt3Patch(pVM, pPatch);
2981 if (RT_FAILURE(rc))
2982 goto failure;
2983
2984 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2985 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2986
2987 pPatch->flags &= ~PATMFL_INSTR_HINT;
2988 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2989 }
2990 else
2991 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2992 {
2993 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2994 /* now insert a jump in the guest code */
2995 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2996 AssertRC(rc);
2997 if (RT_FAILURE(rc))
2998 goto failure;
2999
3000 }
3001
3002 PATM_LOG_RAW_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
3003
3004 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3005 pPatch->pTempInfo->nrIllegalInstr = 0;
3006
3007 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3008
3009 pPatch->uState = PATCH_ENABLED;
3010 return VINF_SUCCESS;
3011
3012failure:
3013 if (pPatchRec->CoreOffset.Key)
3014 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3015
3016 patmEmptyTree(pVM, &pPatch->FixupTree);
3017 pPatch->nrFixups = 0;
3018
3019 patmEmptyTree(pVM, &pPatch->JumpTree);
3020 pPatch->nrJumpRecs = 0;
3021
3022 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3023 pPatch->pTempInfo->nrIllegalInstr = 0;
3024
3025 /* Turn this cli patch into a dummy. */
3026 pPatch->uState = PATCH_REFUSED;
3027 pPatch->pPatchBlockOffset = 0;
3028
3029 // Give back the patch memory we no longer need
3030 Assert(orgOffsetPatchMem != (uint32_t)~0);
3031 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3032
3033 return rc;
3034}
3035
3036/**
3037 * Patch IDT handler
3038 *
3039 * @returns VBox status code.
3040 * @param pVM Pointer to the VM.
3041 * @param pInstrGC Guest context point to privileged instruction
3042 * @param uOpSize Size of starting instruction
3043 * @param pPatchRec Patch record
3044 * @param pCacheRec Cache record ptr
3045 *
3046 * @note returns failure if patching is not allowed or possible
3047 *
3048 */
3049static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3050{
3051 PPATCHINFO pPatch = &pPatchRec->patch;
3052 bool disret;
3053 DISCPUSTATE cpuPush, cpuJmp;
3054 uint32_t cbInstr;
3055 RTRCPTR pCurInstrGC = pInstrGC;
3056 uint8_t *pCurInstrHC, *pInstrHC;
3057 uint32_t orgOffsetPatchMem = ~0;
3058
3059 pInstrHC = pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
3060 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
3061
3062 /*
3063 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
3064 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
3065 * condition here and only patch the common entypoint once.
3066 */
3067 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &cbInstr);
3068 Assert(disret);
3069 if (disret && cpuPush.pCurInstr->uOpcode == OP_PUSH)
3070 {
3071 RTRCPTR pJmpInstrGC;
3072 int rc;
3073 pCurInstrGC += cbInstr;
3074
3075 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuJmp, &cbInstr);
3076 if ( disret
3077 && cpuJmp.pCurInstr->uOpcode == OP_JMP
3078 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
3079 )
3080 {
3081 bool fInserted;
3082 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3083 if (pJmpPatch == 0)
3084 {
3085 /* Patch it first! */
3086 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
3087 if (rc != VINF_SUCCESS)
3088 goto failure;
3089 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3090 Assert(pJmpPatch);
3091 }
3092 if (pJmpPatch->patch.uState != PATCH_ENABLED)
3093 goto failure;
3094
3095 /* save original offset (in case of failures later on) */
3096 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3097
3098 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3099 pPatch->uCurPatchOffset = 0;
3100 pPatch->nrPatch2GuestRecs = 0;
3101
3102#ifdef VBOX_WITH_STATISTICS
3103 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3104 if (RT_FAILURE(rc))
3105 goto failure;
3106#endif
3107
3108 /* Install fake cli patch (to clear the virtual IF) */
3109 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
3110 if (RT_FAILURE(rc))
3111 goto failure;
3112
3113 /* Add lookup record for patch to guest address translation (for the push) */
3114 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
3115
3116 /* Duplicate push. */
3117 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
3118 if (RT_FAILURE(rc))
3119 goto failure;
3120
3121 /* Generate jump to common entrypoint. */
3122 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
3123 if (RT_FAILURE(rc))
3124 goto failure;
3125
3126 /* size of patch block */
3127 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3128
3129 /* Update free pointer in patch memory. */
3130 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3131 /* Round to next 8 byte boundary */
3132 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3133
3134 /* There's no jump from guest to patch code. */
3135 pPatch->cbPatchJump = 0;
3136
3137
3138#ifdef LOG_ENABLED
3139 Log(("Patch code ----------------------------------------------------------\n"));
3140 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3141 Log(("Patch code ends -----------------------------------------------------\n"));
3142#endif
3143 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3144
3145 /*
3146 * Insert into patch to guest lookup tree
3147 */
3148 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3149 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3150 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3151 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3152
3153 pPatch->uState = PATCH_ENABLED;
3154
3155 return VINF_SUCCESS;
3156 }
3157 }
3158failure:
3159 /* Give back the patch memory we no longer need */
3160 if (orgOffsetPatchMem != (uint32_t)~0)
3161 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3162
3163 return patmR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3164}
3165
3166/**
3167 * Install a trampoline to call a guest trap handler directly
3168 *
3169 * @returns VBox status code.
3170 * @param pVM Pointer to the VM.
3171 * @param pInstrGC Guest context point to privileged instruction
3172 * @param pPatchRec Patch record
3173 * @param pCacheRec Cache record ptr
3174 *
3175 */
3176static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3177{
3178 PPATCHINFO pPatch = &pPatchRec->patch;
3179 int rc = VERR_PATCHING_REFUSED;
3180 uint32_t orgOffsetPatchMem = ~0;
3181 bool fInserted;
3182
3183 // save original offset (in case of failures later on)
3184 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3185
3186 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3187 pPatch->uCurPatchOffset = 0;
3188 pPatch->nrPatch2GuestRecs = 0;
3189
3190#ifdef VBOX_WITH_STATISTICS
3191 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3192 if (RT_FAILURE(rc))
3193 goto failure;
3194#endif
3195
3196 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3197 if (RT_FAILURE(rc))
3198 goto failure;
3199
3200 /* size of patch block */
3201 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3202
3203 /* Update free pointer in patch memory. */
3204 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3205 /* Round to next 8 byte boundary */
3206 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3207
3208 /* There's no jump from guest to patch code. */
3209 pPatch->cbPatchJump = 0;
3210
3211#ifdef LOG_ENABLED
3212 Log(("Patch code ----------------------------------------------------------\n"));
3213 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3214 Log(("Patch code ends -----------------------------------------------------\n"));
3215#endif
3216 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "TRAP handler");
3217 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3218
3219 /*
3220 * Insert into patch to guest lookup tree
3221 */
3222 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3223 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3224 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3225 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3226
3227 pPatch->uState = PATCH_ENABLED;
3228 return VINF_SUCCESS;
3229
3230failure:
3231 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3232
3233 /* Turn this cli patch into a dummy. */
3234 pPatch->uState = PATCH_REFUSED;
3235 pPatch->pPatchBlockOffset = 0;
3236
3237 /* Give back the patch memory we no longer need */
3238 Assert(orgOffsetPatchMem != (uint32_t)~0);
3239 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3240
3241 return rc;
3242}
3243
3244
3245#ifdef LOG_ENABLED
3246/**
3247 * Check if the instruction is patched as a common idt handler
3248 *
3249 * @returns true or false
3250 * @param pVM Pointer to the VM.
3251 * @param pInstrGC Guest context point to the instruction
3252 *
3253 */
3254static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3255{
3256 PPATMPATCHREC pRec;
3257
3258 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3259 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3260 return true;
3261 return false;
3262}
3263#endif //DEBUG
3264
3265
3266/**
3267 * Duplicates a complete function
3268 *
3269 * @returns VBox status code.
3270 * @param pVM Pointer to the VM.
3271 * @param pInstrGC Guest context point to privileged instruction
3272 * @param pPatchRec Patch record
3273 * @param pCacheRec Cache record ptr
3274 *
3275 */
3276static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3277{
3278 PPATCHINFO pPatch = &pPatchRec->patch;
3279 int rc = VERR_PATCHING_REFUSED;
3280 uint32_t orgOffsetPatchMem = ~0;
3281 bool fInserted;
3282
3283 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3284 /* Save original offset (in case of failures later on). */
3285 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3286
3287 /* We will not go on indefinitely with call instruction handling. */
3288 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3289 {
3290 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3291 return VERR_PATCHING_REFUSED;
3292 }
3293
3294 pVM->patm.s.ulCallDepth++;
3295
3296#ifdef PATM_ENABLE_CALL
3297 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3298#endif
3299
3300 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3301
3302 pPatch->nrPatch2GuestRecs = 0;
3303 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3304 pPatch->uCurPatchOffset = 0;
3305
3306 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3307 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3308 if (RT_FAILURE(rc))
3309 goto failure;
3310
3311#ifdef VBOX_WITH_STATISTICS
3312 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3313 if (RT_FAILURE(rc))
3314 goto failure;
3315#endif
3316
3317 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3318 if (rc != VINF_SUCCESS)
3319 {
3320 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3321 goto failure;
3322 }
3323
3324 //size of patch block
3325 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3326
3327 //update free pointer in patch memory
3328 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3329 /* Round to next 8 byte boundary. */
3330 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3331
3332 pPatch->uState = PATCH_ENABLED;
3333
3334 /*
3335 * Insert into patch to guest lookup tree
3336 */
3337 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3338 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3339 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3340 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3341 if (!fInserted)
3342 {
3343 rc = VERR_PATCHING_REFUSED;
3344 goto failure;
3345 }
3346
3347 /* Note that patmr3SetBranchTargets can install additional patches!! */
3348 rc = patmr3SetBranchTargets(pVM, pPatch);
3349 if (rc != VINF_SUCCESS)
3350 {
3351 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3352 goto failure;
3353 }
3354
3355#ifdef LOG_ENABLED
3356 Log(("Patch code ----------------------------------------------------------\n"));
3357 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3358 Log(("Patch code ends -----------------------------------------------------\n"));
3359#endif
3360
3361 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3362
3363 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3364 pPatch->pTempInfo->nrIllegalInstr = 0;
3365
3366 pVM->patm.s.ulCallDepth--;
3367 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3368 return VINF_SUCCESS;
3369
3370failure:
3371 if (pPatchRec->CoreOffset.Key)
3372 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3373
3374 patmEmptyTree(pVM, &pPatch->FixupTree);
3375 pPatch->nrFixups = 0;
3376
3377 patmEmptyTree(pVM, &pPatch->JumpTree);
3378 pPatch->nrJumpRecs = 0;
3379
3380 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3381 pPatch->pTempInfo->nrIllegalInstr = 0;
3382
3383 /* Turn this cli patch into a dummy. */
3384 pPatch->uState = PATCH_REFUSED;
3385 pPatch->pPatchBlockOffset = 0;
3386
3387 // Give back the patch memory we no longer need
3388 Assert(orgOffsetPatchMem != (uint32_t)~0);
3389 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3390
3391 pVM->patm.s.ulCallDepth--;
3392 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3393 return rc;
3394}
3395
3396/**
3397 * Creates trampoline code to jump inside an existing patch
3398 *
3399 * @returns VBox status code.
3400 * @param pVM Pointer to the VM.
3401 * @param pInstrGC Guest context point to privileged instruction
3402 * @param pPatchRec Patch record
3403 *
3404 */
3405static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3406{
3407 PPATCHINFO pPatch = &pPatchRec->patch;
3408 RTRCPTR pPage, pPatchTargetGC = 0;
3409 uint32_t orgOffsetPatchMem = ~0;
3410 int rc = VERR_PATCHING_REFUSED;
3411 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3412 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3413 bool fInserted = false;
3414
3415 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3416 /* Save original offset (in case of failures later on). */
3417 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3418
3419 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3420 /** @todo we already checked this before */
3421 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3422
3423 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3424 if (pPatchPage)
3425 {
3426 uint32_t i;
3427
3428 for (i=0;i<pPatchPage->cCount;i++)
3429 {
3430 if (pPatchPage->papPatch[i])
3431 {
3432 pPatchToJmp = pPatchPage->papPatch[i];
3433
3434 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3435 && pPatchToJmp->uState == PATCH_ENABLED)
3436 {
3437 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3438 if (pPatchTargetGC)
3439 {
3440 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3441 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3442 Assert(pPatchToGuestRec);
3443
3444 pPatchToGuestRec->fJumpTarget = true;
3445 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3446 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3447 break;
3448 }
3449 }
3450 }
3451 }
3452 }
3453 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3454
3455 /*
3456 * Only record the trampoline patch if this is the first patch to the target
3457 * or we recorded other patches already.
3458 * The goal is to refuse refreshing function duplicates if the guest
3459 * modifies code after a saved state was loaded because it is not possible
3460 * to save the relation between trampoline and target without changing the
3461 * saved satte version.
3462 */
3463 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3464 || pPatchToJmp->pTrampolinePatchesHead)
3465 {
3466 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3467 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3468 if (!pTrampRec)
3469 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3470
3471 pTrampRec->pPatchTrampoline = pPatchRec;
3472 }
3473
3474 pPatch->nrPatch2GuestRecs = 0;
3475 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3476 pPatch->uCurPatchOffset = 0;
3477
3478 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3479 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3480 if (RT_FAILURE(rc))
3481 goto failure;
3482
3483#ifdef VBOX_WITH_STATISTICS
3484 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3485 if (RT_FAILURE(rc))
3486 goto failure;
3487#endif
3488
3489 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3490 if (RT_FAILURE(rc))
3491 goto failure;
3492
3493 /*
3494 * Insert into patch to guest lookup tree
3495 */
3496 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3497 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3498 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3499 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3500 if (!fInserted)
3501 {
3502 rc = VERR_PATCHING_REFUSED;
3503 goto failure;
3504 }
3505
3506 /* size of patch block */
3507 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3508
3509 /* Update free pointer in patch memory. */
3510 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3511 /* Round to next 8 byte boundary */
3512 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3513
3514 /* There's no jump from guest to patch code. */
3515 pPatch->cbPatchJump = 0;
3516
3517 /* Enable the patch. */
3518 pPatch->uState = PATCH_ENABLED;
3519 /* We allow this patch to be called as a function. */
3520 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3521
3522 if (pTrampRec)
3523 {
3524 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3525 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3526 }
3527 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3528 return VINF_SUCCESS;
3529
3530failure:
3531 if (pPatchRec->CoreOffset.Key)
3532 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3533
3534 patmEmptyTree(pVM, &pPatch->FixupTree);
3535 pPatch->nrFixups = 0;
3536
3537 patmEmptyTree(pVM, &pPatch->JumpTree);
3538 pPatch->nrJumpRecs = 0;
3539
3540 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3541 pPatch->pTempInfo->nrIllegalInstr = 0;
3542
3543 /* Turn this cli patch into a dummy. */
3544 pPatch->uState = PATCH_REFUSED;
3545 pPatch->pPatchBlockOffset = 0;
3546
3547 // Give back the patch memory we no longer need
3548 Assert(orgOffsetPatchMem != (uint32_t)~0);
3549 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3550
3551 if (pTrampRec)
3552 MMR3HeapFree(pTrampRec);
3553
3554 return rc;
3555}
3556
3557
3558/**
3559 * Patch branch target function for call/jump at specified location.
3560 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3561 *
3562 * @returns VBox status code.
3563 * @param pVM Pointer to the VM.
3564 * @param pCtx Pointer to the guest CPU context.
3565 *
3566 */
3567VMMR3_INT_DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3568{
3569 RTRCPTR pBranchTarget, pPage;
3570 int rc;
3571 RTRCPTR pPatchTargetGC = 0;
3572 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
3573
3574 pBranchTarget = pCtx->edx;
3575 pBranchTarget = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3576
3577 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3578 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3579
3580 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3581 if (pPatchPage)
3582 {
3583 uint32_t i;
3584
3585 for (i=0;i<pPatchPage->cCount;i++)
3586 {
3587 if (pPatchPage->papPatch[i])
3588 {
3589 PPATCHINFO pPatch = pPatchPage->papPatch[i];
3590
3591 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3592 && pPatch->uState == PATCH_ENABLED)
3593 {
3594 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3595 if (pPatchTargetGC)
3596 {
3597 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3598 break;
3599 }
3600 }
3601 }
3602 }
3603 }
3604
3605 if (pPatchTargetGC)
3606 {
3607 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3608 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3609 }
3610 else
3611 {
3612 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3613 }
3614
3615 if (rc == VINF_SUCCESS)
3616 {
3617 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3618 Assert(pPatchTargetGC);
3619 }
3620
3621 if (pPatchTargetGC)
3622 {
3623 pCtx->eax = pPatchTargetGC;
3624 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3625 }
3626 else
3627 {
3628 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3629 pCtx->eax = 0;
3630 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3631 }
3632 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3633 rc = patmAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3634 AssertRC(rc);
3635
3636 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3637 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3638 return VINF_SUCCESS;
3639}
3640
3641/**
3642 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3643 *
3644 * @returns VBox status code.
3645 * @param pVM Pointer to the VM.
3646 * @param pCpu Disassembly CPU structure ptr
3647 * @param pInstrGC Guest context point to privileged instruction
3648 * @param pCacheRec Cache record ptr
3649 *
3650 */
3651static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3652{
3653 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3654 int rc = VERR_PATCHING_REFUSED;
3655 DISCPUSTATE cpu;
3656 RTRCPTR pTargetGC;
3657 PPATMPATCHREC pPatchFunction;
3658 uint32_t cbInstr;
3659 bool disret;
3660
3661 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3662 Assert((pCpu->pCurInstr->uOpcode == OP_CALL || pCpu->pCurInstr->uOpcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32);
3663
3664 if ((pCpu->pCurInstr->uOpcode != OP_CALL && pCpu->pCurInstr->uOpcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32)
3665 {
3666 rc = VERR_PATCHING_REFUSED;
3667 goto failure;
3668 }
3669
3670 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3671 if (pTargetGC == 0)
3672 {
3673 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
3674 rc = VERR_PATCHING_REFUSED;
3675 goto failure;
3676 }
3677
3678 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3679 if (pPatchFunction == NULL)
3680 {
3681 for(;;)
3682 {
3683 /* It could be an indirect call (call -> jmp dest).
3684 * Note that it's dangerous to assume the jump will never change...
3685 */
3686 uint8_t *pTmpInstrHC;
3687
3688 pTmpInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3689 Assert(pTmpInstrHC);
3690 if (pTmpInstrHC == 0)
3691 break;
3692
3693 disret = patmR3DisInstr(pVM, pPatch, pTargetGC, pTmpInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
3694 if (disret == false || cpu.pCurInstr->uOpcode != OP_JMP)
3695 break;
3696
3697 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3698 if (pTargetGC == 0)
3699 {
3700 break;
3701 }
3702
3703 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3704 break;
3705 }
3706 if (pPatchFunction == 0)
3707 {
3708 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3709 rc = VERR_PATCHING_REFUSED;
3710 goto failure;
3711 }
3712 }
3713
3714 // make a copy of the guest code bytes that will be overwritten
3715 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3716
3717 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3718 AssertRC(rc);
3719
3720 /* Now replace the original call in the guest code */
3721 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3722 AssertRC(rc);
3723 if (RT_FAILURE(rc))
3724 goto failure;
3725
3726 /* Lowest and highest address for write monitoring. */
3727 pPatch->pInstrGCLowest = pInstrGC;
3728 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3729 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "Call");
3730
3731 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3732
3733 pPatch->uState = PATCH_ENABLED;
3734 return VINF_SUCCESS;
3735
3736failure:
3737 /* Turn this patch into a dummy. */
3738 pPatch->uState = PATCH_REFUSED;
3739
3740 return rc;
3741}
3742
3743/**
3744 * Replace the address in an MMIO instruction with the cached version.
3745 *
3746 * @returns VBox status code.
3747 * @param pVM Pointer to the VM.
3748 * @param pInstrGC Guest context point to privileged instruction
3749 * @param pCpu Disassembly CPU structure ptr
3750 * @param pCacheRec Cache record ptr
3751 *
3752 * @note returns failure if patching is not allowed or possible
3753 *
3754 */
3755static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3756{
3757 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3758 uint8_t *pPB;
3759 int rc = VERR_PATCHING_REFUSED;
3760
3761 Assert(pVM->patm.s.mmio.pCachedData);
3762 if (!pVM->patm.s.mmio.pCachedData)
3763 goto failure;
3764
3765 if (pCpu->Param2.fUse != DISUSE_DISPLACEMENT32)
3766 goto failure;
3767
3768 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3769 if (pPB == 0)
3770 goto failure;
3771
3772 /* Add relocation record for cached data access. */
3773 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3774 {
3775 Log(("Relocation failed for cached mmio address!!\n"));
3776 return VERR_PATCHING_REFUSED;
3777 }
3778 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "MMIO patch old instruction:", "");
3779
3780 /* Save original instruction. */
3781 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3782 AssertRC(rc);
3783
3784 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3785
3786 /* Replace address with that of the cached item. */
3787 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3788 AssertRC(rc);
3789 if (RT_FAILURE(rc))
3790 {
3791 goto failure;
3792 }
3793
3794 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3795 pVM->patm.s.mmio.pCachedData = 0;
3796 pVM->patm.s.mmio.GCPhys = 0;
3797 pPatch->uState = PATCH_ENABLED;
3798 return VINF_SUCCESS;
3799
3800failure:
3801 /* Turn this patch into a dummy. */
3802 pPatch->uState = PATCH_REFUSED;
3803
3804 return rc;
3805}
3806
3807
3808/**
3809 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3810 *
3811 * @returns VBox status code.
3812 * @param pVM Pointer to the VM.
3813 * @param pInstrGC Guest context point to privileged instruction
3814 * @param pPatch Patch record
3815 *
3816 * @note returns failure if patching is not allowed or possible
3817 *
3818 */
3819static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3820{
3821 DISCPUSTATE cpu;
3822 uint32_t cbInstr;
3823 bool disret;
3824 uint8_t *pInstrHC;
3825
3826 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3827
3828 /* Convert GC to HC address. */
3829 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3830 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3831
3832 /* Disassemble mmio instruction. */
3833 disret = patmR3DisInstrNoStrOpMode(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE,
3834 &cpu, &cbInstr);
3835 if (disret == false)
3836 {
3837 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3838 return VERR_PATCHING_REFUSED;
3839 }
3840
3841 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
3842 if (cbInstr > MAX_INSTR_SIZE)
3843 return VERR_PATCHING_REFUSED;
3844 if (cpu.Param2.fUse != DISUSE_DISPLACEMENT32)
3845 return VERR_PATCHING_REFUSED;
3846
3847 /* Add relocation record for cached data access. */
3848 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3849 {
3850 Log(("Relocation failed for cached mmio address!!\n"));
3851 return VERR_PATCHING_REFUSED;
3852 }
3853 /* Replace address with that of the cached item. */
3854 *(RTRCPTR *)&pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3855
3856 /* Lowest and highest address for write monitoring. */
3857 pPatch->pInstrGCLowest = pInstrGC;
3858 pPatch->pInstrGCHighest = pInstrGC + cpu.cbInstr;
3859
3860 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3861 pVM->patm.s.mmio.pCachedData = 0;
3862 pVM->patm.s.mmio.GCPhys = 0;
3863 return VINF_SUCCESS;
3864}
3865
3866/**
3867 * Activates an int3 patch
3868 *
3869 * @returns VBox status code.
3870 * @param pVM Pointer to the VM.
3871 * @param pPatch Patch record
3872 */
3873static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3874{
3875 uint8_t bASMInt3 = 0xCC;
3876 int rc;
3877
3878 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3879 Assert(pPatch->uState != PATCH_ENABLED);
3880
3881 /* Replace first opcode byte with 'int 3'. */
3882 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
3883 AssertRC(rc);
3884
3885 pPatch->cbPatchJump = sizeof(bASMInt3);
3886
3887 return rc;
3888}
3889
3890/**
3891 * Deactivates an int3 patch
3892 *
3893 * @returns VBox status code.
3894 * @param pVM Pointer to the VM.
3895 * @param pPatch Patch record
3896 */
3897static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3898{
3899 uint8_t ASMInt3 = 0xCC;
3900 int rc;
3901
3902 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3903 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3904
3905 /* Restore first opcode byte. */
3906 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3907 AssertRC(rc);
3908 return rc;
3909}
3910
3911/**
3912 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically
3913 * in the raw-mode context.
3914 *
3915 * @returns VBox status code.
3916 * @param pVM Pointer to the VM.
3917 * @param pInstrGC Guest context point to privileged instruction
3918 * @param pInstrHC Host context point to privileged instruction
3919 * @param pCpu Disassembly CPU structure ptr
3920 * @param pPatch Patch record
3921 *
3922 * @note returns failure if patching is not allowed or possible
3923 *
3924 */
3925int patmR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3926{
3927 uint8_t bASMInt3 = 0xCC;
3928 int rc;
3929
3930 /* Note: Do not use patch memory here! It might called during patch installation too. */
3931 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "patmR3PatchInstrInt3:", "");
3932
3933 /* Save the original instruction. */
3934 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3935 AssertRC(rc);
3936 pPatch->cbPatchJump = sizeof(bASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3937
3938 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3939
3940 /* Replace first opcode byte with 'int 3'. */
3941 rc = patmActivateInt3Patch(pVM, pPatch);
3942 if (RT_FAILURE(rc))
3943 goto failure;
3944
3945 /* Lowest and highest address for write monitoring. */
3946 pPatch->pInstrGCLowest = pInstrGC;
3947 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3948
3949 pPatch->uState = PATCH_ENABLED;
3950 return VINF_SUCCESS;
3951
3952failure:
3953 /* Turn this patch into a dummy. */
3954 return VERR_PATCHING_REFUSED;
3955}
3956
3957#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3958/**
3959 * Patch a jump instruction at specified location
3960 *
3961 * @returns VBox status code.
3962 * @param pVM Pointer to the VM.
3963 * @param pInstrGC Guest context point to privileged instruction
3964 * @param pInstrHC Host context point to privileged instruction
3965 * @param pCpu Disassembly CPU structure ptr
3966 * @param pPatchRec Patch record
3967 *
3968 * @note returns failure if patching is not allowed or possible
3969 *
3970 */
3971int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3972{
3973 PPATCHINFO pPatch = &pPatchRec->patch;
3974 int rc = VERR_PATCHING_REFUSED;
3975
3976 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3977 pPatch->uCurPatchOffset = 0;
3978 pPatch->cbPatchBlockSize = 0;
3979 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3980
3981 /*
3982 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3983 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3984 */
3985 switch (pCpu->pCurInstr->uOpcode)
3986 {
3987 case OP_JO:
3988 case OP_JNO:
3989 case OP_JC:
3990 case OP_JNC:
3991 case OP_JE:
3992 case OP_JNE:
3993 case OP_JBE:
3994 case OP_JNBE:
3995 case OP_JS:
3996 case OP_JNS:
3997 case OP_JP:
3998 case OP_JNP:
3999 case OP_JL:
4000 case OP_JNL:
4001 case OP_JLE:
4002 case OP_JNLE:
4003 case OP_JMP:
4004 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
4005 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL);
4006 if (!(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL))
4007 goto failure;
4008
4009 Assert(pCpu->cbInstr == SIZEOF_NEARJUMP32 || pCpu->cbInstr == SIZEOF_NEAR_COND_JUMP32);
4010 if (pCpu->cbInstr != SIZEOF_NEARJUMP32 && pCpu->cbInstr != SIZEOF_NEAR_COND_JUMP32)
4011 goto failure;
4012
4013 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->cbInstr))
4014 {
4015 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
4016 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
4017 rc = VERR_PATCHING_REFUSED;
4018 goto failure;
4019 }
4020
4021 break;
4022
4023 default:
4024 goto failure;
4025 }
4026
4027 // make a copy of the guest code bytes that will be overwritten
4028 Assert(pCpu->cbInstr <= sizeof(pPatch->aPrivInstr));
4029 Assert(pCpu->cbInstr >= SIZEOF_NEARJUMP32);
4030 pPatch->cbPatchJump = pCpu->cbInstr;
4031
4032 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
4033 AssertRC(rc);
4034
4035 /* Now insert a jump in the guest code. */
4036 /*
4037 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
4038 * references the target instruction in the conflict patch.
4039 */
4040 RTRCPTR pJmpDest = patmR3GuestGCPtrToPatchGCPtrSimple(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue);
4041
4042 AssertMsg(pJmpDest, ("patmR3GuestGCPtrToPatchGCPtrSimple failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue));
4043 pPatch->pPatchJumpDestGC = pJmpDest;
4044
4045 PATMP2GLOOKUPREC cacheRec;
4046 RT_ZERO(cacheRec);
4047 cacheRec.pPatch = pPatch;
4048
4049 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
4050 /* Free leftover lock if any. */
4051 if (cacheRec.Lock.pvMap)
4052 {
4053 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4054 cacheRec.Lock.pvMap = NULL;
4055 }
4056 AssertRC(rc);
4057 if (RT_FAILURE(rc))
4058 goto failure;
4059
4060 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
4061
4062 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
4063 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
4064
4065 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
4066
4067 /* Lowest and highest address for write monitoring. */
4068 pPatch->pInstrGCLowest = pInstrGC;
4069 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
4070
4071 pPatch->uState = PATCH_ENABLED;
4072 return VINF_SUCCESS;
4073
4074failure:
4075 /* Turn this cli patch into a dummy. */
4076 pPatch->uState = PATCH_REFUSED;
4077
4078 return rc;
4079}
4080#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
4081
4082
4083/**
4084 * Gives hint to PATM about supervisor guest instructions
4085 *
4086 * @returns VBox status code.
4087 * @param pVM Pointer to the VM.
4088 * @param pInstr Guest context point to privileged instruction
4089 * @param flags Patch flags
4090 */
4091VMMR3_INT_DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4092{
4093 Assert(pInstrGC);
4094 Assert(flags == PATMFL_CODE32);
4095
4096 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4097 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4098}
4099
4100/**
4101 * Patch privileged instruction at specified location
4102 *
4103 * @returns VBox status code.
4104 * @param pVM Pointer to the VM.
4105 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4106 * @param flags Patch flags
4107 *
4108 * @note returns failure if patching is not allowed or possible
4109 */
4110VMMR3_INT_DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4111{
4112 DISCPUSTATE cpu;
4113 R3PTRTYPE(uint8_t *) pInstrHC;
4114 uint32_t cbInstr;
4115 PPATMPATCHREC pPatchRec;
4116 PCPUMCTX pCtx = 0;
4117 bool disret;
4118 int rc;
4119 PVMCPU pVCpu = VMMGetCpu0(pVM);
4120 LogFlow(("PATMR3InstallPatch: %08x (%#llx)\n", pInstrGC, flags));
4121
4122 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4123
4124 if ( !pVM
4125 || pInstrGC == 0
4126 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4127 {
4128 AssertFailed();
4129 return VERR_INVALID_PARAMETER;
4130 }
4131
4132 if (PATMIsEnabled(pVM) == false)
4133 return VERR_PATCHING_REFUSED;
4134
4135 /* Test for patch conflict only with patches that actually change guest code. */
4136 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4137 {
4138 PPATCHINFO pConflictPatch = patmFindActivePatchByEntrypoint(pVM, pInstrGC);
4139 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4140 if (pConflictPatch != 0)
4141 return VERR_PATCHING_REFUSED;
4142 }
4143
4144 if (!(flags & PATMFL_CODE32))
4145 {
4146 /** @todo Only 32 bits code right now */
4147 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4148 return VERR_NOT_IMPLEMENTED;
4149 }
4150
4151 /* We ran out of patch memory; don't bother anymore. */
4152 if (pVM->patm.s.fOutOfMemory == true)
4153 return VERR_PATCHING_REFUSED;
4154
4155#if 1 /* DONT COMMIT ENABLED! */
4156 /* Blacklisted NT4SP1 areas - debugging why we sometimes crash early on, */
4157 if ( 0
4158 //|| (pInstrGC - 0x80010000U) < 0x10000U // NT4SP1 HAL
4159 //|| (pInstrGC - 0x80010000U) < 0x5000U // NT4SP1 HAL
4160 //|| (pInstrGC - 0x80013000U) < 0x2000U // NT4SP1 HAL
4161 //|| (pInstrGC - 0x80014000U) < 0x1000U // NT4SP1 HAL
4162 //|| (pInstrGC - 0x80014000U) < 0x800U // NT4SP1 HAL
4163 //|| (pInstrGC - 0x80014400U) < 0x400U // NT4SP1 HAL
4164 //|| (pInstrGC - 0x80014400U) < 0x200U // NT4SP1 HAL
4165 //|| (pInstrGC - 0x80014400U) < 0x100U // NT4SP1 HAL
4166 //|| (pInstrGC - 0x80014500U) < 0x100U // NT4SP1 HAL - negative
4167 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4168 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4169 //|| (pInstrGC - 0x80014440U) < 0x40U // NT4SP1 HAL
4170 //|| (pInstrGC - 0x80014440U) < 0x20U // NT4SP1 HAL
4171 || pInstrGC == 0x80014447 /* KfLowerIrql */
4172 || 0)
4173 {
4174 Log(("PATMR3InstallPatch: %08x is blacklisted\n", pInstrGC));
4175 return VERR_PATCHING_REFUSED;
4176 }
4177#endif
4178
4179 /* Make sure the code selector is wide open; otherwise refuse. */
4180 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4181 if (CPUMGetGuestCPL(pVCpu) == 0)
4182 {
4183 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4184 if (pInstrGCFlat != pInstrGC)
4185 {
4186 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs.Sel, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4187 return VERR_PATCHING_REFUSED;
4188 }
4189 }
4190
4191 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4192 if (!(flags & PATMFL_GUEST_SPECIFIC))
4193 {
4194 /* New code. Make sure CSAM has a go at it first. */
4195 CSAMR3CheckCode(pVM, pInstrGC);
4196 }
4197
4198 /* Note: obsolete */
4199 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4200 && (flags & PATMFL_MMIO_ACCESS))
4201 {
4202 RTRCUINTPTR offset;
4203 void *pvPatchCoreOffset;
4204
4205 /* Find the patch record. */
4206 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4207 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4208 if (pvPatchCoreOffset == NULL)
4209 {
4210 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4211 return VERR_PATCH_NOT_FOUND; //fatal error
4212 }
4213 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4214
4215 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4216 }
4217
4218 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4219
4220 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4221 if (pPatchRec)
4222 {
4223 Assert(!(flags & PATMFL_TRAMPOLINE));
4224
4225 /* Hints about existing patches are ignored. */
4226 if (flags & PATMFL_INSTR_HINT)
4227 return VERR_PATCHING_REFUSED;
4228
4229 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4230 {
4231 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4232 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4233 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4234 }
4235
4236 if (pPatchRec->patch.uState == PATCH_DISABLED)
4237 {
4238 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4239 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4240 {
4241 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4242 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4243 }
4244 else
4245 Log(("Enabling patch %RRv again\n", pInstrGC));
4246
4247 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4248 rc = PATMR3EnablePatch(pVM, pInstrGC);
4249 if (RT_SUCCESS(rc))
4250 return VWRN_PATCH_ENABLED;
4251
4252 return rc;
4253 }
4254 if ( pPatchRec->patch.uState == PATCH_ENABLED
4255 || pPatchRec->patch.uState == PATCH_DIRTY)
4256 {
4257 /*
4258 * The patch might have been overwritten.
4259 */
4260 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4261 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4262 {
4263 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4264 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4265 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4266 {
4267 if (flags & PATMFL_IDTHANDLER)
4268 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4269
4270 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4271 }
4272 }
4273 rc = PATMR3RemovePatch(pVM, pInstrGC);
4274 if (RT_FAILURE(rc))
4275 return VERR_PATCHING_REFUSED;
4276 }
4277 else
4278 {
4279 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4280 /* already tried it once! */
4281 return VERR_PATCHING_REFUSED;
4282 }
4283 }
4284
4285 RTGCPHYS GCPhys;
4286 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4287 if (rc != VINF_SUCCESS)
4288 {
4289 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4290 return rc;
4291 }
4292 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4293 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4294 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4295 {
4296 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4297 return VERR_PATCHING_REFUSED;
4298 }
4299
4300 /* Initialize cache record for guest address translations. */
4301 bool fInserted;
4302 PATMP2GLOOKUPREC cacheRec;
4303 RT_ZERO(cacheRec);
4304
4305 pInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4306 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4307
4308 /* Allocate patch record. */
4309 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4310 if (RT_FAILURE(rc))
4311 {
4312 Log(("Out of memory!!!!\n"));
4313 return VERR_NO_MEMORY;
4314 }
4315 pPatchRec->Core.Key = pInstrGC;
4316 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4317 /* Insert patch record into the lookup tree. */
4318 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4319 Assert(fInserted);
4320
4321 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4322 pPatchRec->patch.flags = flags;
4323 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
4324 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4325
4326 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4327 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4328
4329 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4330 {
4331 /*
4332 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4333 */
4334 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4335 if (pPatchNear)
4336 {
4337 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4338 {
4339 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4340
4341 pPatchRec->patch.uState = PATCH_UNUSABLE;
4342 /*
4343 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4344 */
4345 return VERR_PATCHING_REFUSED;
4346 }
4347 }
4348 }
4349
4350 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4351 if (pPatchRec->patch.pTempInfo == 0)
4352 {
4353 Log(("Out of memory!!!!\n"));
4354 return VERR_NO_MEMORY;
4355 }
4356
4357 disret = patmR3DisInstrNoStrOpMode(pVM, &pPatchRec->patch, pInstrGC, NULL, PATMREAD_ORGCODE, &cpu, &cbInstr);
4358 if (disret == false)
4359 {
4360 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4361 return VERR_PATCHING_REFUSED;
4362 }
4363
4364 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
4365 if (cbInstr > MAX_INSTR_SIZE)
4366 return VERR_PATCHING_REFUSED;
4367
4368 pPatchRec->patch.cbPrivInstr = cbInstr;
4369 pPatchRec->patch.opcode = cpu.pCurInstr->uOpcode;
4370
4371 /* Restricted hinting for now. */
4372 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->uOpcode == OP_CLI);
4373
4374 /* Initialize cache record patch pointer. */
4375 cacheRec.pPatch = &pPatchRec->patch;
4376
4377 /* Allocate statistics slot */
4378 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4379 {
4380 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4381 }
4382 else
4383 {
4384 Log(("WARNING: Patch index wrap around!!\n"));
4385 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4386 }
4387
4388 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4389 {
4390 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4391 }
4392 else
4393 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4394 {
4395 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4396 }
4397 else
4398 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4399 {
4400 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4401 }
4402 else
4403 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4404 {
4405 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4406 }
4407 else
4408 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4409 {
4410 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4411 }
4412 else
4413 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4414 {
4415 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4416 }
4417 else
4418 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4419 {
4420 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4421 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4422
4423 rc = patmIdtHandler(pVM, pInstrGC, cbInstr, pPatchRec, &cacheRec);
4424#ifdef VBOX_WITH_STATISTICS
4425 if ( rc == VINF_SUCCESS
4426 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4427 {
4428 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4429 }
4430#endif
4431 }
4432 else
4433 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4434 {
4435 switch (cpu.pCurInstr->uOpcode)
4436 {
4437 case OP_SYSENTER:
4438 case OP_PUSH:
4439 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4440 if (rc == VINF_SUCCESS)
4441 {
4442 if (rc == VINF_SUCCESS)
4443 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4444 return rc;
4445 }
4446 break;
4447
4448 default:
4449 rc = VERR_NOT_IMPLEMENTED;
4450 break;
4451 }
4452 }
4453 else
4454 {
4455 switch (cpu.pCurInstr->uOpcode)
4456 {
4457 case OP_SYSENTER:
4458 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4459 if (rc == VINF_SUCCESS)
4460 {
4461 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4462 return VINF_SUCCESS;
4463 }
4464 break;
4465
4466#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4467 case OP_JO:
4468 case OP_JNO:
4469 case OP_JC:
4470 case OP_JNC:
4471 case OP_JE:
4472 case OP_JNE:
4473 case OP_JBE:
4474 case OP_JNBE:
4475 case OP_JS:
4476 case OP_JNS:
4477 case OP_JP:
4478 case OP_JNP:
4479 case OP_JL:
4480 case OP_JNL:
4481 case OP_JLE:
4482 case OP_JNLE:
4483 case OP_JECXZ:
4484 case OP_LOOP:
4485 case OP_LOOPNE:
4486 case OP_LOOPE:
4487 case OP_JMP:
4488 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4489 {
4490 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4491 break;
4492 }
4493 return VERR_NOT_IMPLEMENTED;
4494#endif
4495
4496 case OP_PUSHF:
4497 case OP_CLI:
4498 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4499 rc = patmR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec);
4500 break;
4501
4502#ifndef VBOX_WITH_SAFE_STR
4503 case OP_STR:
4504#endif
4505 case OP_SGDT:
4506 case OP_SLDT:
4507 case OP_SIDT:
4508 case OP_CPUID:
4509 case OP_LSL:
4510 case OP_LAR:
4511 case OP_SMSW:
4512 case OP_VERW:
4513 case OP_VERR:
4514 case OP_IRET:
4515#ifdef VBOX_WITH_RAW_RING1
4516 case OP_MOV:
4517#endif
4518 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4519 break;
4520
4521 default:
4522 return VERR_NOT_IMPLEMENTED;
4523 }
4524 }
4525
4526 if (rc != VINF_SUCCESS)
4527 {
4528 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4529 {
4530 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4531 pPatchRec->patch.nrPatch2GuestRecs = 0;
4532 }
4533 pVM->patm.s.uCurrentPatchIdx--;
4534 }
4535 else
4536 {
4537 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4538 AssertRCReturn(rc, rc);
4539
4540 /* Keep track upper and lower boundaries of patched instructions */
4541 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4542 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4543 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4544 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4545
4546 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4547 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4548
4549 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4550 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4551
4552 rc = VINF_SUCCESS;
4553
4554 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4555 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4556 {
4557 rc = PATMR3DisablePatch(pVM, pInstrGC);
4558 AssertRCReturn(rc, rc);
4559 }
4560
4561#ifdef VBOX_WITH_STATISTICS
4562 /* Register statistics counter */
4563 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4564 {
4565 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4566 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4567#ifndef DEBUG_sandervl
4568 /* Full breakdown for the GUI. */
4569 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4570 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4571 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4572 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4573 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4574 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4575 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4576 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4577 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4578 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4579 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4580 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4581 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4582 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4583 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4584 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4585#endif
4586 }
4587#endif
4588 }
4589 /* Free leftover lock if any. */
4590 if (cacheRec.Lock.pvMap)
4591 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4592 return rc;
4593}
4594
4595/**
4596 * Query instruction size
4597 *
4598 * @returns VBox status code.
4599 * @param pVM Pointer to the VM.
4600 * @param pPatch Patch record
4601 * @param pInstrGC Instruction address
4602 */
4603static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4604{
4605 uint8_t *pInstrHC;
4606 PGMPAGEMAPLOCK Lock;
4607
4608 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4609 if (rc == VINF_SUCCESS)
4610 {
4611 DISCPUSTATE cpu;
4612 bool disret;
4613 uint32_t cbInstr;
4614
4615 disret = patmR3DisInstr(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE | PATMREAD_NOCHECK, &cpu, &cbInstr);
4616 PGMPhysReleasePageMappingLock(pVM, &Lock);
4617 if (disret)
4618 return cbInstr;
4619 }
4620 return 0;
4621}
4622
4623/**
4624 * Add patch to page record
4625 *
4626 * @returns VBox status code.
4627 * @param pVM Pointer to the VM.
4628 * @param pPage Page address
4629 * @param pPatch Patch record
4630 */
4631int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4632{
4633 PPATMPATCHPAGE pPatchPage;
4634 int rc;
4635
4636 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4637
4638 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4639 if (pPatchPage)
4640 {
4641 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4642 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4643 {
4644 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4645 PPATCHINFO *papPatchOld = pPatchPage->papPatch;
4646
4647 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4648 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH,
4649 (void **)&pPatchPage->papPatch);
4650 if (RT_FAILURE(rc))
4651 {
4652 Log(("Out of memory!!!!\n"));
4653 return VERR_NO_MEMORY;
4654 }
4655 memcpy(pPatchPage->papPatch, papPatchOld, cMaxPatchesOld * sizeof(pPatchPage->papPatch[0]));
4656 MMHyperFree(pVM, papPatchOld);
4657 }
4658 pPatchPage->papPatch[pPatchPage->cCount] = pPatch;
4659 pPatchPage->cCount++;
4660 }
4661 else
4662 {
4663 bool fInserted;
4664
4665 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4666 if (RT_FAILURE(rc))
4667 {
4668 Log(("Out of memory!!!!\n"));
4669 return VERR_NO_MEMORY;
4670 }
4671 pPatchPage->Core.Key = pPage;
4672 pPatchPage->cCount = 1;
4673 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4674
4675 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH,
4676 (void **)&pPatchPage->papPatch);
4677 if (RT_FAILURE(rc))
4678 {
4679 Log(("Out of memory!!!!\n"));
4680 MMHyperFree(pVM, pPatchPage);
4681 return VERR_NO_MEMORY;
4682 }
4683 pPatchPage->papPatch[0] = pPatch;
4684
4685 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4686 Assert(fInserted);
4687 pVM->patm.s.cPageRecords++;
4688
4689 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4690 }
4691 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4692
4693 /* Get the closest guest instruction (from below) */
4694 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4695 Assert(pGuestToPatchRec);
4696 if (pGuestToPatchRec)
4697 {
4698 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4699 if ( pPatchPage->pLowestAddrGC == 0
4700 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4701 {
4702 RTRCUINTPTR offset;
4703
4704 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4705
4706 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4707 /* If we're too close to the page boundary, then make sure an
4708 instruction from the previous page doesn't cross the
4709 boundary itself. */
4710 if (offset && offset < MAX_INSTR_SIZE)
4711 {
4712 /* Get the closest guest instruction (from above) */
4713 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4714
4715 if (pGuestToPatchRec)
4716 {
4717 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4718 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4719 {
4720 pPatchPage->pLowestAddrGC = pPage;
4721 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4722 }
4723 }
4724 }
4725 }
4726 }
4727
4728 /* Get the closest guest instruction (from above) */
4729 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4730 Assert(pGuestToPatchRec);
4731 if (pGuestToPatchRec)
4732 {
4733 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4734 if ( pPatchPage->pHighestAddrGC == 0
4735 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4736 {
4737 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4738 /* Increase by instruction size. */
4739 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4740//// Assert(size);
4741 pPatchPage->pHighestAddrGC += size;
4742 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4743 }
4744 }
4745
4746 return VINF_SUCCESS;
4747}
4748
4749/**
4750 * Remove patch from page record
4751 *
4752 * @returns VBox status code.
4753 * @param pVM Pointer to the VM.
4754 * @param pPage Page address
4755 * @param pPatch Patch record
4756 */
4757int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4758{
4759 PPATMPATCHPAGE pPatchPage;
4760 int rc;
4761
4762 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4763 Assert(pPatchPage);
4764
4765 if (!pPatchPage)
4766 return VERR_INVALID_PARAMETER;
4767
4768 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4769
4770 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4771 if (pPatchPage->cCount > 1)
4772 {
4773 uint32_t i;
4774
4775 /* Used by multiple patches */
4776 for (i = 0; i < pPatchPage->cCount; i++)
4777 {
4778 if (pPatchPage->papPatch[i] == pPatch)
4779 {
4780 /* close the gap between the remaining pointers. */
4781 uint32_t cNew = --pPatchPage->cCount;
4782 if (i < cNew)
4783 pPatchPage->papPatch[i] = pPatchPage->papPatch[cNew];
4784 pPatchPage->papPatch[cNew] = NULL;
4785 return VINF_SUCCESS;
4786 }
4787 }
4788 AssertMsgFailed(("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4789 }
4790 else
4791 {
4792 PPATMPATCHPAGE pPatchNode;
4793
4794 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4795
4796 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4797 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4798 Assert(pPatchNode && pPatchNode == pPatchPage);
4799
4800 Assert(pPatchPage->papPatch);
4801 rc = MMHyperFree(pVM, pPatchPage->papPatch);
4802 AssertRC(rc);
4803 rc = MMHyperFree(pVM, pPatchPage);
4804 AssertRC(rc);
4805 pVM->patm.s.cPageRecords--;
4806 }
4807 return VINF_SUCCESS;
4808}
4809
4810/**
4811 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4812 *
4813 * @returns VBox status code.
4814 * @param pVM Pointer to the VM.
4815 * @param pPatch Patch record
4816 */
4817int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4818{
4819 int rc;
4820 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4821
4822 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4823 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4824 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4825
4826 /** @todo optimize better (large gaps between current and next used page) */
4827 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4828 {
4829 /* Get the closest guest instruction (from above) */
4830 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4831 if ( pGuestToPatchRec
4832 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4833 )
4834 {
4835 /* Code in page really patched -> add record */
4836 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4837 AssertRC(rc);
4838 }
4839 }
4840 pPatch->flags |= PATMFL_CODE_MONITORED;
4841 return VINF_SUCCESS;
4842}
4843
4844/**
4845 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4846 *
4847 * @returns VBox status code.
4848 * @param pVM Pointer to the VM.
4849 * @param pPatch Patch record
4850 */
4851static int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4852{
4853 int rc;
4854 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4855
4856 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4857 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4858 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4859
4860 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4861 {
4862 /* Get the closest guest instruction (from above) */
4863 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4864 if ( pGuestToPatchRec
4865 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4866 )
4867 {
4868 /* Code in page really patched -> remove record */
4869 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4870 AssertRC(rc);
4871 }
4872 }
4873 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4874 return VINF_SUCCESS;
4875}
4876
4877/**
4878 * Notifies PATM about a (potential) write to code that has been patched.
4879 *
4880 * @returns VBox status code.
4881 * @param pVM Pointer to the VM.
4882 * @param GCPtr GC pointer to write address
4883 * @param cbWrite Nr of bytes to write
4884 *
4885 */
4886VMMR3_INT_DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4887{
4888 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4889
4890 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4891
4892 Assert(VM_IS_EMT(pVM));
4893 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4894
4895 /* Quick boundary check */
4896 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4897 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4898 )
4899 return VINF_SUCCESS;
4900
4901 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4902
4903 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4904 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4905
4906 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4907 {
4908loop_start:
4909 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4910 if (pPatchPage)
4911 {
4912 uint32_t i;
4913 bool fValidPatchWrite = false;
4914
4915 /* Quick check to see if the write is in the patched part of the page */
4916 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4917 || pPatchPage->pHighestAddrGC < GCPtr)
4918 {
4919 break;
4920 }
4921
4922 for (i=0;i<pPatchPage->cCount;i++)
4923 {
4924 if (pPatchPage->papPatch[i])
4925 {
4926 PPATCHINFO pPatch = pPatchPage->papPatch[i];
4927 RTRCPTR pPatchInstrGC;
4928 //unused: bool fForceBreak = false;
4929
4930 Assert(pPatchPage->papPatch[i]->flags & PATMFL_CODE_MONITORED);
4931 /** @todo inefficient and includes redundant checks for multiple pages. */
4932 for (uint32_t j=0; j<cbWrite; j++)
4933 {
4934 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4935
4936 if ( pPatch->cbPatchJump
4937 && pGuestPtrGC >= pPatch->pPrivInstrGC
4938 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4939 {
4940 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4941 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4942 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4943 if (rc == VINF_SUCCESS)
4944 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4945 goto loop_start;
4946
4947 continue;
4948 }
4949
4950 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4951 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4952 if (!pPatchInstrGC)
4953 {
4954 RTRCPTR pClosestInstrGC;
4955 uint32_t size;
4956
4957 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4958 if (pPatchInstrGC)
4959 {
4960 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4961 Assert(pClosestInstrGC <= pGuestPtrGC);
4962 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4963 /* Check if this is not a write into a gap between two patches */
4964 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4965 pPatchInstrGC = 0;
4966 }
4967 }
4968 if (pPatchInstrGC)
4969 {
4970 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4971
4972 fValidPatchWrite = true;
4973
4974 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4975 Assert(pPatchToGuestRec);
4976 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4977 {
4978 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4979
4980 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4981 {
4982 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4983
4984 patmR3MarkDirtyPatch(pVM, pPatch);
4985
4986 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4987 goto loop_start;
4988 }
4989 else
4990 {
4991 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4992 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4993
4994 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4995 pPatchToGuestRec->fDirty = true;
4996
4997 *pInstrHC = 0xCC;
4998
4999 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
5000 }
5001 }
5002 /* else already marked dirty */
5003 }
5004 }
5005 }
5006 } /* for each patch */
5007
5008 if (fValidPatchWrite == false)
5009 {
5010 /* Write to a part of the page that either:
5011 * - doesn't contain any code (shared code/data); rather unlikely
5012 * - old code page that's no longer in active use.
5013 */
5014invalid_write_loop_start:
5015 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
5016
5017 if (pPatchPage)
5018 {
5019 for (i=0;i<pPatchPage->cCount;i++)
5020 {
5021 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5022
5023 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
5024 {
5025 /* Note: possibly dangerous assumption that all future writes will be harmless. */
5026 if (pPatch->flags & PATMFL_IDTHANDLER)
5027 {
5028 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5029
5030 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
5031 int rc = patmRemovePatchPages(pVM, pPatch);
5032 AssertRC(rc);
5033 }
5034 else
5035 {
5036 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5037 patmR3MarkDirtyPatch(pVM, pPatch);
5038 }
5039 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
5040 goto invalid_write_loop_start;
5041 }
5042 } /* for */
5043 }
5044 }
5045 }
5046 }
5047 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
5048 return VINF_SUCCESS;
5049
5050}
5051
5052/**
5053 * Disable all patches in a flushed page
5054 *
5055 * @returns VBox status code
5056 * @param pVM Pointer to the VM.
5057 * @param addr GC address of the page to flush
5058 * @note Currently only called by CSAMR3FlushPage; optimization to avoid
5059 * having to double check if the physical address has changed
5060 */
5061VMMR3_INT_DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
5062{
5063 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5064
5065 addr &= PAGE_BASE_GC_MASK;
5066
5067 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
5068 if (pPatchPage)
5069 {
5070 int i;
5071
5072 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
5073 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
5074 {
5075 if (pPatchPage->papPatch[i])
5076 {
5077 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5078
5079 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
5080 patmR3MarkDirtyPatch(pVM, pPatch);
5081 }
5082 }
5083 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
5084 }
5085 return VINF_SUCCESS;
5086}
5087
5088/**
5089 * Checks if the instructions at the specified address has been patched already.
5090 *
5091 * @returns boolean, patched or not
5092 * @param pVM Pointer to the VM.
5093 * @param pInstrGC Guest context pointer to instruction
5094 */
5095VMMR3_INT_DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
5096{
5097 Assert(!HMIsEnabled(pVM));
5098 PPATMPATCHREC pPatchRec;
5099 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5100 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
5101 return true;
5102 return false;
5103}
5104
5105/**
5106 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
5107 *
5108 * @returns VBox status code.
5109 * @param pVM Pointer to the VM.
5110 * @param pInstrGC GC address of instr
5111 * @param pByte opcode byte pointer (OUT)
5112 *
5113 */
5114VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
5115{
5116 PPATMPATCHREC pPatchRec;
5117
5118 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5119
5120 /* Shortcut. */
5121 if (!PATMIsEnabled(pVM))
5122 return VERR_PATCH_NOT_FOUND;
5123 Assert(!HMIsEnabled(pVM));
5124 if ( pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5125 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5126 return VERR_PATCH_NOT_FOUND;
5127
5128 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5129 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5130 if ( pPatchRec
5131 && pPatchRec->patch.uState == PATCH_ENABLED
5132 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5133 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5134 {
5135 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5136 *pByte = pPatchRec->patch.aPrivInstr[offset];
5137
5138 if (pPatchRec->patch.cbPatchJump == 1)
5139 {
5140 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5141 }
5142 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5143 return VINF_SUCCESS;
5144 }
5145 return VERR_PATCH_NOT_FOUND;
5146}
5147
5148/**
5149 * Read instruction bytes of the original code that was overwritten by the 5
5150 * bytes patch jump.
5151 *
5152 * @returns VINF_SUCCESS or VERR_PATCH_NOT_FOUND.
5153 * @param pVM Pointer to the VM.
5154 * @param GCPtrInstr GC address of instr
5155 * @param pbDst The output buffer.
5156 * @param cbToRead The maximum number bytes to read.
5157 * @param pcbRead Where to return the acutal number of bytes read.
5158 */
5159VMMR3_INT_DECL(int) PATMR3ReadOrgInstr(PVM pVM, RTGCPTR32 GCPtrInstr, uint8_t *pbDst, size_t cbToRead, size_t *pcbRead)
5160{
5161 /* Shortcut. */
5162 if (!PATMIsEnabled(pVM))
5163 return VERR_PATCH_NOT_FOUND;
5164 Assert(!HMIsEnabled(pVM));
5165 if ( GCPtrInstr < pVM->patm.s.pPatchedInstrGCLowest
5166 || GCPtrInstr > pVM->patm.s.pPatchedInstrGCHighest)
5167 return VERR_PATCH_NOT_FOUND;
5168
5169 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5170
5171 /*
5172 * If the patch is enabled and the pointer lies within 5 bytes of this
5173 * priv instr ptr, then we've got a hit!
5174 */
5175 RTGCPTR32 off;
5176 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree,
5177 GCPtrInstr, false /*fAbove*/);
5178 if ( pPatchRec
5179 && pPatchRec->patch.uState == PATCH_ENABLED
5180 && (off = GCPtrInstr - pPatchRec->patch.pPrivInstrGC) < pPatchRec->patch.cbPatchJump)
5181 {
5182 uint8_t const *pbSrc = &pPatchRec->patch.aPrivInstr[off];
5183 uint32_t const cbMax = pPatchRec->patch.cbPatchJump - off;
5184 if (cbToRead > cbMax)
5185 cbToRead = cbMax;
5186 switch (cbToRead)
5187 {
5188 case 5: pbDst[4] = pbSrc[4];
5189 case 4: pbDst[3] = pbSrc[3];
5190 case 3: pbDst[2] = pbSrc[2];
5191 case 2: pbDst[1] = pbSrc[1];
5192 case 1: pbDst[0] = pbSrc[0];
5193 break;
5194 default:
5195 memcpy(pbDst, pbSrc, cbToRead);
5196 }
5197 *pcbRead = cbToRead;
5198
5199 if (pPatchRec->patch.cbPatchJump == 1)
5200 Log(("PATMR3ReadOrgInstr: returning opcode %.*Rhxs for instruction at %RX32\n", cbToRead, pbSrc, GCPtrInstr));
5201 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5202 return VINF_SUCCESS;
5203 }
5204
5205 return VERR_PATCH_NOT_FOUND;
5206}
5207
5208/**
5209 * Disable patch for privileged instruction at specified location
5210 *
5211 * @returns VBox status code.
5212 * @param pVM Pointer to the VM.
5213 * @param pInstr Guest context point to privileged instruction
5214 *
5215 * @note returns failure if patching is not allowed or possible
5216 *
5217 */
5218VMMR3_INT_DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5219{
5220 PPATMPATCHREC pPatchRec;
5221 PPATCHINFO pPatch;
5222
5223 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5224 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5225 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5226 if (pPatchRec)
5227 {
5228 int rc = VINF_SUCCESS;
5229
5230 pPatch = &pPatchRec->patch;
5231
5232 /* Already disabled? */
5233 if (pPatch->uState == PATCH_DISABLED)
5234 return VINF_SUCCESS;
5235
5236 /* Clear the IDT entries for the patch we're disabling. */
5237 /* Note: very important as we clear IF in the patch itself */
5238 /** @todo this needs to be changed */
5239 if (pPatch->flags & PATMFL_IDTHANDLER)
5240 {
5241 uint32_t iGate;
5242
5243 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5244 if (iGate != (uint32_t)~0)
5245 {
5246 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5247 if (++cIDTHandlersDisabled < 256)
5248 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5249 }
5250 }
5251
5252 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5253 if ( pPatch->pPatchBlockOffset
5254 && pPatch->uState == PATCH_ENABLED)
5255 {
5256 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5257 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5258 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5259 }
5260
5261 /* IDT or function patches haven't changed any guest code. */
5262 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5263 {
5264 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5265 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5266
5267 if (pPatch->uState != PATCH_REFUSED)
5268 {
5269 uint8_t temp[16];
5270
5271 Assert(pPatch->cbPatchJump < sizeof(temp));
5272
5273 /* Let's first check if the guest code is still the same. */
5274 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5275 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5276 if (rc == VINF_SUCCESS)
5277 {
5278 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5279
5280 if ( temp[0] != 0xE9 /* jmp opcode */
5281 || *(RTRCINTPTR *)(&temp[1]) != displ
5282 )
5283 {
5284 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5285 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5286 /* Remove it completely */
5287 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5288 rc = PATMR3RemovePatch(pVM, pInstrGC);
5289 AssertRC(rc);
5290 return VWRN_PATCH_REMOVED;
5291 }
5292 patmRemoveJumpToPatch(pVM, pPatch);
5293 }
5294 else
5295 {
5296 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5297 pPatch->uState = PATCH_DISABLE_PENDING;
5298 }
5299 }
5300 else
5301 {
5302 AssertMsgFailed(("Patch was refused!\n"));
5303 return VERR_PATCH_ALREADY_DISABLED;
5304 }
5305 }
5306 else
5307 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5308 {
5309 uint8_t temp[16];
5310
5311 Assert(pPatch->cbPatchJump < sizeof(temp));
5312
5313 /* Let's first check if the guest code is still the same. */
5314 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5315 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5316 if (rc == VINF_SUCCESS)
5317 {
5318 if (temp[0] != 0xCC)
5319 {
5320 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5321 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5322 /* Remove it completely */
5323 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5324 rc = PATMR3RemovePatch(pVM, pInstrGC);
5325 AssertRC(rc);
5326 return VWRN_PATCH_REMOVED;
5327 }
5328 patmDeactivateInt3Patch(pVM, pPatch);
5329 }
5330 }
5331
5332 if (rc == VINF_SUCCESS)
5333 {
5334 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5335 if (pPatch->uState == PATCH_DISABLE_PENDING)
5336 {
5337 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5338 pPatch->uState = PATCH_UNUSABLE;
5339 }
5340 else
5341 if (pPatch->uState != PATCH_DIRTY)
5342 {
5343 pPatch->uOldState = pPatch->uState;
5344 pPatch->uState = PATCH_DISABLED;
5345 }
5346 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5347 }
5348
5349 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5350 return VINF_SUCCESS;
5351 }
5352 Log(("Patch not found!\n"));
5353 return VERR_PATCH_NOT_FOUND;
5354}
5355
5356/**
5357 * Permanently disable patch for privileged instruction at specified location
5358 *
5359 * @returns VBox status code.
5360 * @param pVM Pointer to the VM.
5361 * @param pInstr Guest context instruction pointer
5362 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5363 * @param pConflictPatch Conflicting patch
5364 *
5365 */
5366static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5367{
5368 NOREF(pConflictAddr);
5369#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5370 PATCHINFO patch;
5371 DISCPUSTATE cpu;
5372 R3PTRTYPE(uint8_t *) pInstrHC;
5373 uint32_t cbInstr;
5374 bool disret;
5375 int rc;
5376
5377 RT_ZERO(patch);
5378 pInstrHC = patmR3GCVirtToHCVirt(pVM, &patch, pInstrGC);
5379 disret = patmR3DisInstr(pVM, &patch, pInstrGC, pInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
5380 /*
5381 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5382 * with one that jumps right into the conflict patch.
5383 * Otherwise we must disable the conflicting patch to avoid serious problems.
5384 */
5385 if ( disret == true
5386 && (pConflictPatch->flags & PATMFL_CODE32)
5387 && (cpu.pCurInstr->uOpcode == OP_JMP || (cpu.pCurInstr->fOpType & DISOPTYPE_COND_CONTROLFLOW))
5388 && (cpu.Param1.fUse & DISUSE_IMMEDIATE32_REL))
5389 {
5390 /* Hint patches must be enabled first. */
5391 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5392 {
5393 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5394 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5395 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5396 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5397 /* Enabling might fail if the patched code has changed in the meantime. */
5398 if (rc != VINF_SUCCESS)
5399 return rc;
5400 }
5401
5402 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5403 if (RT_SUCCESS(rc))
5404 {
5405 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5406 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5407 return VINF_SUCCESS;
5408 }
5409 }
5410#endif
5411
5412 if (pConflictPatch->opcode == OP_CLI)
5413 {
5414 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5415 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5416 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5417 if (rc == VWRN_PATCH_REMOVED)
5418 return VINF_SUCCESS;
5419 if (RT_SUCCESS(rc))
5420 {
5421 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5422 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5423 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5424 if (rc == VERR_PATCH_NOT_FOUND)
5425 return VINF_SUCCESS; /* removed already */
5426
5427 AssertRC(rc);
5428 if (RT_SUCCESS(rc))
5429 {
5430 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5431 return VINF_SUCCESS;
5432 }
5433 }
5434 /* else turned into unusable patch (see below) */
5435 }
5436 else
5437 {
5438 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5439 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5440 if (rc == VWRN_PATCH_REMOVED)
5441 return VINF_SUCCESS;
5442 }
5443
5444 /* No need to monitor the code anymore. */
5445 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5446 {
5447 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5448 AssertRC(rc);
5449 }
5450 pConflictPatch->uState = PATCH_UNUSABLE;
5451 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5452 return VERR_PATCH_DISABLED;
5453}
5454
5455/**
5456 * Enable patch for privileged instruction at specified location
5457 *
5458 * @returns VBox status code.
5459 * @param pVM Pointer to the VM.
5460 * @param pInstr Guest context point to privileged instruction
5461 *
5462 * @note returns failure if patching is not allowed or possible
5463 *
5464 */
5465VMMR3_INT_DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5466{
5467 PPATMPATCHREC pPatchRec;
5468 PPATCHINFO pPatch;
5469
5470 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5471 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5472 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5473 if (pPatchRec)
5474 {
5475 int rc = VINF_SUCCESS;
5476
5477 pPatch = &pPatchRec->patch;
5478
5479 if (pPatch->uState == PATCH_DISABLED)
5480 {
5481 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5482 {
5483 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5484 uint8_t temp[16];
5485
5486 Assert(pPatch->cbPatchJump < sizeof(temp));
5487
5488 /* Let's first check if the guest code is still the same. */
5489 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5490 AssertRC(rc2);
5491 if (rc2 == VINF_SUCCESS)
5492 {
5493 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5494 {
5495 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5496 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5497 /* Remove it completely */
5498 rc = PATMR3RemovePatch(pVM, pInstrGC);
5499 AssertRC(rc);
5500 return VERR_PATCH_NOT_FOUND;
5501 }
5502
5503 PATMP2GLOOKUPREC cacheRec;
5504 RT_ZERO(cacheRec);
5505 cacheRec.pPatch = pPatch;
5506
5507 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5508 /* Free leftover lock if any. */
5509 if (cacheRec.Lock.pvMap)
5510 {
5511 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5512 cacheRec.Lock.pvMap = NULL;
5513 }
5514 AssertRC(rc2);
5515 if (RT_FAILURE(rc2))
5516 return rc2;
5517
5518#ifdef DEBUG
5519 {
5520 DISCPUSTATE cpu;
5521 char szOutput[256];
5522 uint32_t cbInstr;
5523 uint32_t i = 0;
5524 bool disret;
5525 while(i < pPatch->cbPatchJump)
5526 {
5527 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
5528 &cpu, &cbInstr, szOutput, sizeof(szOutput));
5529 Log(("Renewed patch instr: %s", szOutput));
5530 i += cbInstr;
5531 }
5532 }
5533#endif
5534 }
5535 }
5536 else
5537 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5538 {
5539 uint8_t temp[16];
5540
5541 Assert(pPatch->cbPatchJump < sizeof(temp));
5542
5543 /* Let's first check if the guest code is still the same. */
5544 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5545 AssertRC(rc2);
5546
5547 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5548 {
5549 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5550 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5551 rc = PATMR3RemovePatch(pVM, pInstrGC);
5552 AssertRC(rc);
5553 return VERR_PATCH_NOT_FOUND;
5554 }
5555
5556 rc2 = patmActivateInt3Patch(pVM, pPatch);
5557 if (RT_FAILURE(rc2))
5558 return rc2;
5559 }
5560
5561 pPatch->uState = pPatch->uOldState; //restore state
5562
5563 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5564 if (pPatch->pPatchBlockOffset)
5565 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5566
5567 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5568 }
5569 else
5570 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5571
5572 return rc;
5573 }
5574 return VERR_PATCH_NOT_FOUND;
5575}
5576
5577/**
5578 * Remove patch for privileged instruction at specified location
5579 *
5580 * @returns VBox status code.
5581 * @param pVM Pointer to the VM.
5582 * @param pPatchRec Patch record
5583 * @param fForceRemove Remove *all* patches
5584 */
5585int patmR3RemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5586{
5587 PPATCHINFO pPatch;
5588
5589 pPatch = &pPatchRec->patch;
5590
5591 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5592 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5593 {
5594 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5595 return VERR_ACCESS_DENIED;
5596 }
5597 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5598
5599 /* Note: NEVER EVER REUSE PATCH MEMORY */
5600 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5601
5602 if (pPatchRec->patch.pPatchBlockOffset)
5603 {
5604 PAVLOU32NODECORE pNode;
5605
5606 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5607 Assert(pNode);
5608 }
5609
5610 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5611 {
5612 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5613 AssertRC(rc);
5614 }
5615
5616#ifdef VBOX_WITH_STATISTICS
5617 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5618 {
5619 STAMR3Deregister(pVM, &pPatchRec->patch);
5620#ifndef DEBUG_sandervl
5621 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5622 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5623 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5624 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5625 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5626 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5627 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5628 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5629 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5630 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5631 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5632 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5633 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5634 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5635#endif
5636 }
5637#endif
5638
5639 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5640 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5641 pPatch->nrPatch2GuestRecs = 0;
5642 Assert(pPatch->Patch2GuestAddrTree == 0);
5643
5644 patmEmptyTree(pVM, &pPatch->FixupTree);
5645 pPatch->nrFixups = 0;
5646 Assert(pPatch->FixupTree == 0);
5647
5648 if (pPatchRec->patch.pTempInfo)
5649 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5650
5651 /* Note: might fail, because it has already been removed (e.g. during reset). */
5652 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5653
5654 /* Free the patch record */
5655 MMHyperFree(pVM, pPatchRec);
5656 return VINF_SUCCESS;
5657}
5658
5659/**
5660 * RTAvlU32DoWithAll() worker.
5661 * Checks whether the current trampoline instruction is the jump to the target patch
5662 * and updates the displacement to jump to the new target.
5663 *
5664 * @returns VBox status code.
5665 * @retval VERR_ALREADY_EXISTS if the jump was found.
5666 * @param pNode The current patch to guest record to check.
5667 * @param pvUser The refresh state.
5668 */
5669static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5670{
5671 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5672 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5673 PVM pVM = pRefreshPatchState->pVM;
5674
5675 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5676
5677 /*
5678 * Check if the patch instruction starts with a jump.
5679 * ASSUMES that there is no other patch to guest record that starts
5680 * with a jump.
5681 */
5682 if (*pPatchInstr == 0xE9)
5683 {
5684 /* Jump found, update the displacement. */
5685 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5686 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5687 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5688
5689 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5690 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5691
5692 *(uint32_t *)&pPatchInstr[1] = displ;
5693 return VERR_ALREADY_EXISTS; /** @todo better return code */
5694 }
5695
5696 return VINF_SUCCESS;
5697}
5698
5699/**
5700 * Attempt to refresh the patch by recompiling its entire code block
5701 *
5702 * @returns VBox status code.
5703 * @param pVM Pointer to the VM.
5704 * @param pPatchRec Patch record
5705 */
5706int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5707{
5708 PPATCHINFO pPatch;
5709 int rc;
5710 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5711 PTRAMPREC pTrampolinePatchesHead = NULL;
5712
5713 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5714
5715 pPatch = &pPatchRec->patch;
5716 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5717 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5718 {
5719 if (!pPatch->pTrampolinePatchesHead)
5720 {
5721 /*
5722 * It is sometimes possible that there are trampoline patches to this patch
5723 * but they are not recorded (after a saved state load for example).
5724 * Refuse to refresh those patches.
5725 * Can hurt performance in theory if the patched code is modified by the guest
5726 * and is executed often. However most of the time states are saved after the guest
5727 * code was modified and is not updated anymore afterwards so this shouldn't be a
5728 * big problem.
5729 */
5730 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5731 return VERR_PATCHING_REFUSED;
5732 }
5733 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5734 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5735 }
5736
5737 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5738
5739 rc = PATMR3DisablePatch(pVM, pInstrGC);
5740 AssertRC(rc);
5741
5742 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5743 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5744#ifdef VBOX_WITH_STATISTICS
5745 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5746 {
5747 STAMR3Deregister(pVM, &pPatchRec->patch);
5748#ifndef DEBUG_sandervl
5749 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5750 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5751 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5752 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5753 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5754 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5755 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5756 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5757 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5758 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5759 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5760 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5761 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5762 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5763#endif
5764 }
5765#endif
5766
5767 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5768
5769 /* Attempt to install a new patch. */
5770 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5771 if (RT_SUCCESS(rc))
5772 {
5773 RTRCPTR pPatchTargetGC;
5774 PPATMPATCHREC pNewPatchRec;
5775
5776 /* Determine target address in new patch */
5777 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5778 Assert(pPatchTargetGC);
5779 if (!pPatchTargetGC)
5780 {
5781 rc = VERR_PATCHING_REFUSED;
5782 goto failure;
5783 }
5784
5785 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5786 pPatch->uCurPatchOffset = 0;
5787
5788 /* insert jump to new patch in old patch block */
5789 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5790 if (RT_FAILURE(rc))
5791 goto failure;
5792
5793 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5794 Assert(pNewPatchRec); /* can't fail */
5795
5796 /* Remove old patch (only do that when everything is finished) */
5797 int rc2 = patmR3RemovePatch(pVM, pPatchRec, true /* force removal */);
5798 AssertRC(rc2);
5799
5800 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5801 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5802 Assert(fInserted); NOREF(fInserted);
5803
5804 Log(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5805 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5806
5807 /* Used by another patch, so don't remove it! */
5808 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5809
5810 if (pTrampolinePatchesHead)
5811 {
5812 /* Update all trampoline patches to jump to the new patch. */
5813 PTRAMPREC pTrampRec = NULL;
5814 PATMREFRESHPATCH RefreshPatch;
5815
5816 RefreshPatch.pVM = pVM;
5817 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5818
5819 pTrampRec = pTrampolinePatchesHead;
5820
5821 while (pTrampRec)
5822 {
5823 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5824
5825 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5826 /*
5827 * We have to find the right patch2guest record because there might be others
5828 * for statistics.
5829 */
5830 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5831 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5832 Assert(rc == VERR_ALREADY_EXISTS);
5833 rc = VINF_SUCCESS;
5834 pTrampRec = pTrampRec->pNext;
5835 }
5836 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5837 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5838 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5839 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5840 }
5841 }
5842
5843failure:
5844 if (RT_FAILURE(rc))
5845 {
5846 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5847
5848 /* Remove the new inactive patch */
5849 rc = PATMR3RemovePatch(pVM, pInstrGC);
5850 AssertRC(rc);
5851
5852 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5853 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5854 Assert(fInserted); NOREF(fInserted);
5855
5856 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5857 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5858 AssertRC(rc2);
5859
5860 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5861 }
5862 return rc;
5863}
5864
5865/**
5866 * Find patch for privileged instruction at specified location
5867 *
5868 * @returns Patch structure pointer if found; else NULL
5869 * @param pVM Pointer to the VM.
5870 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5871 * @param fIncludeHints Include hinted patches or not
5872 *
5873 */
5874PPATCHINFO patmFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5875{
5876 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5877 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5878 if (pPatchRec)
5879 {
5880 if ( pPatchRec->patch.uState == PATCH_ENABLED
5881 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5882 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5883 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5884 {
5885 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5886 return &pPatchRec->patch;
5887 }
5888 else
5889 if ( fIncludeHints
5890 && pPatchRec->patch.uState == PATCH_DISABLED
5891 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5892 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5893 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5894 {
5895 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5896 return &pPatchRec->patch;
5897 }
5898 }
5899 return NULL;
5900}
5901
5902/**
5903 * Checks whether the GC address is inside a generated patch jump
5904 *
5905 * @returns true -> yes, false -> no
5906 * @param pVM Pointer to the VM.
5907 * @param pAddr Guest context address.
5908 * @param pPatchAddr Guest context patch address (if true).
5909 */
5910VMMR3_INT_DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5911{
5912 RTRCPTR addr;
5913 PPATCHINFO pPatch;
5914
5915 Assert(!HMIsEnabled(pVM));
5916 if (PATMIsEnabled(pVM) == false)
5917 return false;
5918
5919 if (pPatchAddr == NULL)
5920 pPatchAddr = &addr;
5921
5922 *pPatchAddr = 0;
5923
5924 pPatch = patmFindActivePatchByEntrypoint(pVM, pAddr);
5925 if (pPatch)
5926 *pPatchAddr = pPatch->pPrivInstrGC;
5927
5928 return *pPatchAddr == 0 ? false : true;
5929}
5930
5931/**
5932 * Remove patch for privileged instruction at specified location
5933 *
5934 * @returns VBox status code.
5935 * @param pVM Pointer to the VM.
5936 * @param pInstr Guest context point to privileged instruction
5937 *
5938 * @note returns failure if patching is not allowed or possible
5939 *
5940 */
5941VMMR3_INT_DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5942{
5943 PPATMPATCHREC pPatchRec;
5944
5945 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5946 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5947 if (pPatchRec)
5948 {
5949 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5950 if (rc == VWRN_PATCH_REMOVED)
5951 return VINF_SUCCESS;
5952
5953 return patmR3RemovePatch(pVM, pPatchRec, false);
5954 }
5955 AssertFailed();
5956 return VERR_PATCH_NOT_FOUND;
5957}
5958
5959/**
5960 * Mark patch as dirty
5961 *
5962 * @returns VBox status code.
5963 * @param pVM Pointer to the VM.
5964 * @param pPatch Patch record
5965 *
5966 * @note returns failure if patching is not allowed or possible
5967 *
5968 */
5969static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5970{
5971 if (pPatch->pPatchBlockOffset)
5972 {
5973 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5974 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5975 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5976 }
5977
5978 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5979 /* Put back the replaced instruction. */
5980 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5981 if (rc == VWRN_PATCH_REMOVED)
5982 return VINF_SUCCESS;
5983
5984 /* Note: we don't restore patch pages for patches that are not enabled! */
5985 /* Note: be careful when changing this behaviour!! */
5986
5987 /* The patch pages are no longer marked for self-modifying code detection */
5988 if (pPatch->flags & PATMFL_CODE_MONITORED)
5989 {
5990 rc = patmRemovePatchPages(pVM, pPatch);
5991 AssertRCReturn(rc, rc);
5992 }
5993 pPatch->uState = PATCH_DIRTY;
5994
5995 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5996 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5997
5998 return VINF_SUCCESS;
5999}
6000
6001/**
6002 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
6003 *
6004 * @returns VBox status code.
6005 * @param pVM Pointer to the VM.
6006 * @param pPatch Patch block structure pointer
6007 * @param pPatchGC GC address in patch block
6008 */
6009RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
6010{
6011 Assert(pPatch->Patch2GuestAddrTree);
6012 /* Get the closest record from below. */
6013 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6014 if (pPatchToGuestRec)
6015 return pPatchToGuestRec->pOrgInstrGC;
6016
6017 return 0;
6018}
6019
6020/**
6021 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6022 *
6023 * @returns corresponding GC pointer in patch block
6024 * @param pVM Pointer to the VM.
6025 * @param pPatch Current patch block pointer
6026 * @param pInstrGC Guest context pointer to privileged instruction
6027 *
6028 */
6029RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6030{
6031 if (pPatch->Guest2PatchAddrTree)
6032 {
6033 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
6034 if (pGuestToPatchRec)
6035 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6036 }
6037
6038 return 0;
6039}
6040
6041/**
6042 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6043 *
6044 * @returns corresponding GC pointer in patch block
6045 * @param pVM Pointer to the VM.
6046 * @param pInstrGC Guest context pointer to privileged instruction
6047 */
6048static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
6049{
6050 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
6051 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
6052 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
6053 return NIL_RTRCPTR;
6054}
6055
6056/**
6057 * Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no
6058 * identical match)
6059 *
6060 * @returns corresponding GC pointer in patch block
6061 * @param pVM Pointer to the VM.
6062 * @param pPatch Current patch block pointer
6063 * @param pInstrGC Guest context pointer to privileged instruction
6064 *
6065 */
6066RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6067{
6068 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
6069 if (pGuestToPatchRec)
6070 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6071 return NIL_RTRCPTR;
6072}
6073
6074/**
6075 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
6076 *
6077 * @returns original GC instruction pointer or 0 if not found
6078 * @param pVM Pointer to the VM.
6079 * @param pPatchGC GC address in patch block
6080 * @param pEnmState State of the translated address (out)
6081 *
6082 */
6083VMMR3_INT_DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
6084{
6085 PPATMPATCHREC pPatchRec;
6086 void *pvPatchCoreOffset;
6087 RTRCPTR pPrivInstrGC;
6088
6089 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
6090 Assert(!HMIsEnabled(pVM));
6091 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6092 if (pvPatchCoreOffset == 0)
6093 {
6094 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
6095 return 0;
6096 }
6097 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6098 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
6099 if (pEnmState)
6100 {
6101 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
6102 || pPatchRec->patch.uState == PATCH_DIRTY
6103 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
6104 || pPatchRec->patch.uState == PATCH_UNUSABLE),
6105 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
6106
6107 if ( !pPrivInstrGC
6108 || pPatchRec->patch.uState == PATCH_UNUSABLE
6109 || pPatchRec->patch.uState == PATCH_REFUSED)
6110 {
6111 pPrivInstrGC = 0;
6112 *pEnmState = PATMTRANS_FAILED;
6113 }
6114 else
6115 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
6116 {
6117 *pEnmState = PATMTRANS_INHIBITIRQ;
6118 }
6119 else
6120 if ( pPatchRec->patch.uState == PATCH_ENABLED
6121 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
6122 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
6123 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
6124 {
6125 *pEnmState = PATMTRANS_OVERWRITTEN;
6126 }
6127 else
6128 if (patmFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
6129 {
6130 *pEnmState = PATMTRANS_OVERWRITTEN;
6131 }
6132 else
6133 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
6134 {
6135 *pEnmState = PATMTRANS_PATCHSTART;
6136 }
6137 else
6138 *pEnmState = PATMTRANS_SAFE;
6139 }
6140 return pPrivInstrGC;
6141}
6142
6143/**
6144 * Returns the GC pointer of the patch for the specified GC address
6145 *
6146 * @returns VBox status code.
6147 * @param pVM Pointer to the VM.
6148 * @param pAddrGC Guest context address
6149 */
6150VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
6151{
6152 PPATMPATCHREC pPatchRec;
6153
6154 Assert(!HMIsEnabled(pVM));
6155
6156 /* Find the patch record. */
6157 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
6158 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
6159 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
6160 return PATCHCODE_PTR_GC(&pPatchRec->patch);
6161 return NIL_RTRCPTR;
6162}
6163
6164/**
6165 * Attempt to recover dirty instructions
6166 *
6167 * @returns VBox status code.
6168 * @param pVM Pointer to the VM.
6169 * @param pCtx Pointer to the guest CPU context.
6170 * @param pPatch Patch record.
6171 * @param pPatchToGuestRec Patch to guest address record.
6172 * @param pEip GC pointer of trapping instruction.
6173 */
6174static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
6175{
6176 DISCPUSTATE CpuOld, CpuNew;
6177 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
6178 int rc;
6179 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
6180 uint32_t cbDirty;
6181 PRECPATCHTOGUEST pRec;
6182 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
6183 PVMCPU pVCpu = VMMGetCpu0(pVM);
6184 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
6185
6186 pRec = pPatchToGuestRec;
6187 pCurInstrGC = pOrgInstrGC;
6188 pCurPatchInstrGC = pEip;
6189 cbDirty = 0;
6190 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6191
6192 /* Find all adjacent dirty instructions */
6193 while (true)
6194 {
6195 if (pRec->fJumpTarget)
6196 {
6197 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6198 pRec->fDirty = false;
6199 return VERR_PATCHING_REFUSED;
6200 }
6201
6202 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6203 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6204 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6205
6206 /* Only harmless instructions are acceptable. */
6207 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6208 if ( RT_FAILURE(rc)
6209 || !(CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS))
6210 {
6211 if (RT_SUCCESS(rc))
6212 cbDirty += CpuOld.cbInstr;
6213 else
6214 if (!cbDirty)
6215 cbDirty = 1;
6216 break;
6217 }
6218
6219#ifdef DEBUG
6220 char szBuf[256];
6221 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6222 szBuf, sizeof(szBuf), NULL);
6223 Log(("DIRTY: %s\n", szBuf));
6224#endif
6225 /* Mark as clean; if we fail we'll let it always fault. */
6226 pRec->fDirty = false;
6227
6228 /* Remove old lookup record. */
6229 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6230 pPatchToGuestRec = NULL;
6231
6232 pCurPatchInstrGC += CpuOld.cbInstr;
6233 cbDirty += CpuOld.cbInstr;
6234
6235 /* Let's see if there's another dirty instruction right after. */
6236 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6237 if (!pRec || !pRec->fDirty)
6238 break; /* no more dirty instructions */
6239
6240 /* In case of complex instructions the next guest instruction could be quite far off. */
6241 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6242 }
6243
6244 if ( RT_SUCCESS(rc)
6245 && (CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS)
6246 )
6247 {
6248 uint32_t cbLeft;
6249
6250 pCurPatchInstrHC = pPatchInstrHC;
6251 pCurPatchInstrGC = pEip;
6252 cbLeft = cbDirty;
6253
6254 while (cbLeft && RT_SUCCESS(rc))
6255 {
6256 bool fValidInstr;
6257
6258 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6259
6260 fValidInstr = !!(CpuNew.pCurInstr->fOpType & DISOPTYPE_HARMLESS);
6261 if ( !fValidInstr
6262 && (CpuNew.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
6263 )
6264 {
6265 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6266
6267 if ( pTargetGC >= pOrgInstrGC
6268 && pTargetGC <= pOrgInstrGC + cbDirty
6269 )
6270 {
6271 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6272 fValidInstr = true;
6273 }
6274 }
6275
6276 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6277 if ( rc == VINF_SUCCESS
6278 && CpuNew.cbInstr <= cbLeft /* must still fit */
6279 && fValidInstr
6280 )
6281 {
6282#ifdef DEBUG
6283 char szBuf[256];
6284 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6285 szBuf, sizeof(szBuf), NULL);
6286 Log(("NEW: %s\n", szBuf));
6287#endif
6288
6289 /* Copy the new instruction. */
6290 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.cbInstr);
6291 AssertRC(rc);
6292
6293 /* Add a new lookup record for the duplicated instruction. */
6294 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6295 }
6296 else
6297 {
6298#ifdef DEBUG
6299 char szBuf[256];
6300 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6301 szBuf, sizeof(szBuf), NULL);
6302 Log(("NEW: %s (FAILED)\n", szBuf));
6303#endif
6304 /* Restore the old lookup record for the duplicated instruction. */
6305 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6306
6307 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6308 rc = VERR_PATCHING_REFUSED;
6309 break;
6310 }
6311 pCurInstrGC += CpuNew.cbInstr;
6312 pCurPatchInstrHC += CpuNew.cbInstr;
6313 pCurPatchInstrGC += CpuNew.cbInstr;
6314 cbLeft -= CpuNew.cbInstr;
6315
6316 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6317 if (!cbLeft)
6318 {
6319 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6320 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6321 {
6322 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6323 if (pRec)
6324 {
6325 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6326 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6327
6328 Assert(!pRec->fDirty);
6329
6330 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6331 if (cbFiller >= SIZEOF_NEARJUMP32)
6332 {
6333 pPatchFillHC[0] = 0xE9;
6334 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6335#ifdef DEBUG
6336 char szBuf[256];
6337 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC,
6338 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6339 Log(("FILL: %s\n", szBuf));
6340#endif
6341 }
6342 else
6343 {
6344 for (unsigned i = 0; i < cbFiller; i++)
6345 {
6346 pPatchFillHC[i] = 0x90; /* NOP */
6347#ifdef DEBUG
6348 char szBuf[256];
6349 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC + i,
6350 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6351 Log(("FILL: %s\n", szBuf));
6352#endif
6353 }
6354 }
6355 }
6356 }
6357 }
6358 }
6359 }
6360 else
6361 rc = VERR_PATCHING_REFUSED;
6362
6363 if (RT_SUCCESS(rc))
6364 {
6365 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6366 }
6367 else
6368 {
6369 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6370 Assert(cbDirty);
6371
6372 /* Mark the whole instruction stream with breakpoints. */
6373 if (cbDirty)
6374 memset(pPatchInstrHC, 0xCC, cbDirty);
6375
6376 if ( pVM->patm.s.fOutOfMemory == false
6377 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6378 {
6379 rc = patmR3RefreshPatch(pVM, pPatch);
6380 if (RT_FAILURE(rc))
6381 {
6382 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6383 }
6384 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6385 rc = VERR_PATCHING_REFUSED;
6386 }
6387 }
6388 return rc;
6389}
6390
6391/**
6392 * Handle trap inside patch code
6393 *
6394 * @returns VBox status code.
6395 * @param pVM Pointer to the VM.
6396 * @param pCtx Pointer to the guest CPU context.
6397 * @param pEip GC pointer of trapping instruction.
6398 * @param ppNewEip GC pointer to new instruction.
6399 */
6400VMMR3_INT_DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6401{
6402 PPATMPATCHREC pPatch = 0;
6403 void *pvPatchCoreOffset;
6404 RTRCUINTPTR offset;
6405 RTRCPTR pNewEip;
6406 int rc ;
6407 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6408 PVMCPU pVCpu = VMMGetCpu0(pVM);
6409
6410 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6411 Assert(pVM->cCpus == 1);
6412
6413 pNewEip = 0;
6414 *ppNewEip = 0;
6415
6416 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6417
6418 /* Find the patch record. */
6419 /* Note: there might not be a patch to guest translation record (global function) */
6420 offset = pEip - pVM->patm.s.pPatchMemGC;
6421 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6422 if (pvPatchCoreOffset)
6423 {
6424 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6425
6426 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6427
6428 if (pPatch->patch.uState == PATCH_DIRTY)
6429 {
6430 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6431 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6432 {
6433 /* Function duplication patches set fPIF to 1 on entry */
6434 pVM->patm.s.pGCStateHC->fPIF = 1;
6435 }
6436 }
6437 else
6438 if (pPatch->patch.uState == PATCH_DISABLED)
6439 {
6440 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6441 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6442 {
6443 /* Function duplication patches set fPIF to 1 on entry */
6444 pVM->patm.s.pGCStateHC->fPIF = 1;
6445 }
6446 }
6447 else
6448 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6449 {
6450 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6451
6452 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6453 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6454 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6455 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6456 }
6457
6458 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6459 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6460
6461 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6462 pPatch->patch.cTraps++;
6463 PATM_STAT_FAULT_INC(&pPatch->patch);
6464 }
6465 else
6466 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6467
6468 /* Check if we were interrupted in PATM generated instruction code. */
6469 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6470 {
6471 DISCPUSTATE Cpu;
6472 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6473 AssertRC(rc);
6474
6475 if ( rc == VINF_SUCCESS
6476 && ( Cpu.pCurInstr->uOpcode == OP_PUSHF
6477 || Cpu.pCurInstr->uOpcode == OP_PUSH
6478 || Cpu.pCurInstr->uOpcode == OP_CALL)
6479 )
6480 {
6481 uint64_t fFlags;
6482
6483 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6484
6485 if (Cpu.pCurInstr->uOpcode == OP_PUSH)
6486 {
6487 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6488 if ( rc == VINF_SUCCESS
6489 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6490 {
6491 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6492
6493 /* Reset the PATM stack. */
6494 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6495
6496 pVM->patm.s.pGCStateHC->fPIF = 1;
6497
6498 Log(("Faulting push -> go back to the original instruction\n"));
6499
6500 /* continue at the original instruction */
6501 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6502 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6503 return VINF_SUCCESS;
6504 }
6505 }
6506
6507 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6508 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6509 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6510 if (rc == VINF_SUCCESS)
6511 {
6512 /* The guest page *must* be present. */
6513 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6514 if ( rc == VINF_SUCCESS
6515 && (fFlags & X86_PTE_P))
6516 {
6517 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6518 return VINF_PATCH_CONTINUE;
6519 }
6520 }
6521 }
6522 else
6523 if (pPatch->patch.pPrivInstrGC == pNewEip)
6524 {
6525 /* Invalidated patch or first instruction overwritten.
6526 * We can ignore the fPIF state in this case.
6527 */
6528 /* Reset the PATM stack. */
6529 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6530
6531 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6532
6533 pVM->patm.s.pGCStateHC->fPIF = 1;
6534
6535 /* continue at the original instruction */
6536 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6537 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6538 return VINF_SUCCESS;
6539 }
6540
6541 char szBuf[256];
6542 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6543
6544 /* Very bad. We crashed in emitted code. Probably stack? */
6545 if (pPatch)
6546 {
6547 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6548 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6549 }
6550 else
6551 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6552 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6553 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6554 }
6555
6556 /* From here on, we must have a valid patch to guest translation. */
6557 if (pvPatchCoreOffset == 0)
6558 {
6559 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6560 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6561 return VERR_PATCH_NOT_FOUND;
6562 }
6563
6564 /* Take care of dirty/changed instructions. */
6565 if (pPatchToGuestRec->fDirty)
6566 {
6567 Assert(pPatchToGuestRec->Core.Key == offset);
6568 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6569
6570 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6571 if (RT_SUCCESS(rc))
6572 {
6573 /* Retry the current instruction. */
6574 pNewEip = pEip;
6575 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6576 }
6577 else
6578 {
6579 /* Reset the PATM stack. */
6580 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6581
6582 rc = VINF_SUCCESS; /* Continue at original instruction. */
6583 }
6584
6585 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6586 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6587 return rc;
6588 }
6589
6590#ifdef VBOX_STRICT
6591 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6592 {
6593 DISCPUSTATE cpu;
6594 bool disret;
6595 uint32_t cbInstr;
6596 PATMP2GLOOKUPREC cacheRec;
6597 RT_ZERO(cacheRec);
6598 cacheRec.pPatch = &pPatch->patch;
6599
6600 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6601 &cpu, &cbInstr);
6602 if (cacheRec.Lock.pvMap)
6603 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6604
6605 if (disret && cpu.pCurInstr->uOpcode == OP_RETN)
6606 {
6607 RTRCPTR retaddr;
6608 PCPUMCTX pCtx2;
6609
6610 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6611
6612 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6613 AssertRC(rc);
6614
6615 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6616 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6617 }
6618 }
6619#endif
6620
6621 /* Return original address, correct by subtracting the CS base address. */
6622 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6623
6624 /* Reset the PATM stack. */
6625 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6626
6627 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6628 {
6629 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6630 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6631#ifdef VBOX_STRICT
6632 DISCPUSTATE cpu;
6633 bool disret;
6634 uint32_t cbInstr;
6635 PATMP2GLOOKUPREC cacheRec;
6636 RT_ZERO(cacheRec);
6637 cacheRec.pPatch = &pPatch->patch;
6638
6639 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
6640 &cpu, &cbInstr);
6641 if (cacheRec.Lock.pvMap)
6642 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6643
6644 if (disret && (cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_INT3))
6645 {
6646 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6647 &cpu, &cbInstr);
6648 if (cacheRec.Lock.pvMap)
6649 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6650
6651 Assert(cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_IRET);
6652 }
6653#endif
6654 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6655 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6656 }
6657
6658 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6659 DBGFR3_DISAS_INSTR_LOG(pVCpu, pCtx->cs.Sel, pNewEip, "PATCHRET: ");
6660 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6661 {
6662 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6663 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6664 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6665 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6666 return VERR_PATCH_DISABLED;
6667 }
6668
6669#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6670 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6671 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6672 {
6673 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6674 //we are only wasting time, back out the patch
6675 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6676 pTrapRec->pNextPatchInstr = 0;
6677 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6678 return VERR_PATCH_DISABLED;
6679 }
6680#endif
6681
6682 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6683 return VINF_SUCCESS;
6684}
6685
6686
6687/**
6688 * Handle page-fault in monitored page
6689 *
6690 * @returns VBox status code.
6691 * @param pVM Pointer to the VM.
6692 */
6693VMMR3_INT_DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6694{
6695 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6696
6697 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6698 addr &= PAGE_BASE_GC_MASK;
6699
6700 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6701 AssertRC(rc); NOREF(rc);
6702
6703 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6704 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6705 {
6706 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6707 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6708 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6709 if (rc == VWRN_PATCH_REMOVED)
6710 return VINF_SUCCESS;
6711
6712 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6713
6714 if (addr == pPatchRec->patch.pPrivInstrGC)
6715 addr++;
6716 }
6717
6718 for(;;)
6719 {
6720 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6721
6722 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6723 break;
6724
6725 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6726 {
6727 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6728 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6729 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6730 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6731 }
6732 addr = pPatchRec->patch.pPrivInstrGC + 1;
6733 }
6734
6735 pVM->patm.s.pvFaultMonitor = 0;
6736 return VINF_SUCCESS;
6737}
6738
6739
6740#ifdef VBOX_WITH_STATISTICS
6741
6742static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6743{
6744 if (pPatch->flags & PATMFL_SYSENTER)
6745 {
6746 return "SYSENT";
6747 }
6748 else
6749 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6750 {
6751 static char szTrap[16];
6752 uint32_t iGate;
6753
6754 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6755 if (iGate < 256)
6756 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6757 else
6758 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6759 return szTrap;
6760 }
6761 else
6762 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6763 return "DUPFUNC";
6764 else
6765 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6766 return "FUNCCALL";
6767 else
6768 if (pPatch->flags & PATMFL_TRAMPOLINE)
6769 return "TRAMP";
6770 else
6771 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6772}
6773
6774static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6775{
6776 NOREF(pVM);
6777 switch(pPatch->uState)
6778 {
6779 case PATCH_ENABLED:
6780 return "ENA";
6781 case PATCH_DISABLED:
6782 return "DIS";
6783 case PATCH_DIRTY:
6784 return "DIR";
6785 case PATCH_UNUSABLE:
6786 return "UNU";
6787 case PATCH_REFUSED:
6788 return "REF";
6789 case PATCH_DISABLE_PENDING:
6790 return "DIP";
6791 default:
6792 AssertFailed();
6793 return " ";
6794 }
6795}
6796
6797/**
6798 * Resets the sample.
6799 * @param pVM Pointer to the VM.
6800 * @param pvSample The sample registered using STAMR3RegisterCallback.
6801 */
6802static void patmResetStat(PVM pVM, void *pvSample)
6803{
6804 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6805 Assert(pPatch);
6806
6807 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6808 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6809}
6810
6811/**
6812 * Prints the sample into the buffer.
6813 *
6814 * @param pVM Pointer to the VM.
6815 * @param pvSample The sample registered using STAMR3RegisterCallback.
6816 * @param pszBuf The buffer to print into.
6817 * @param cchBuf The size of the buffer.
6818 */
6819static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6820{
6821 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6822 Assert(pPatch);
6823
6824 Assert(pPatch->uState != PATCH_REFUSED);
6825 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6826
6827 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6828 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6829 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6830}
6831
6832/**
6833 * Returns the GC address of the corresponding patch statistics counter
6834 *
6835 * @returns Stat address
6836 * @param pVM Pointer to the VM.
6837 * @param pPatch Patch structure
6838 */
6839RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6840{
6841 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6842 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6843}
6844
6845#endif /* VBOX_WITH_STATISTICS */
6846#ifdef VBOX_WITH_DEBUGGER
6847
6848/**
6849 * The '.patmoff' command.
6850 *
6851 * @returns VBox status.
6852 * @param pCmd Pointer to the command descriptor (as registered).
6853 * @param pCmdHlp Pointer to command helper functions.
6854 * @param pVM Pointer to the current VM (if any).
6855 * @param paArgs Pointer to (readonly) array of arguments.
6856 * @param cArgs Number of arguments in the array.
6857 */
6858static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6859{
6860 /*
6861 * Validate input.
6862 */
6863 NOREF(cArgs); NOREF(paArgs);
6864 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6865 PVM pVM = pUVM->pVM;
6866 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6867
6868 if (HMIsEnabled(pVM))
6869 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6870
6871 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6872 PATMR3AllowPatching(pVM->pUVM, false);
6873 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6874}
6875
6876/**
6877 * The '.patmon' command.
6878 *
6879 * @returns VBox status.
6880 * @param pCmd Pointer to the command descriptor (as registered).
6881 * @param pCmdHlp Pointer to command helper functions.
6882 * @param pVM Pointer to the current VM (if any).
6883 * @param paArgs Pointer to (readonly) array of arguments.
6884 * @param cArgs Number of arguments in the array.
6885 */
6886static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6887{
6888 /*
6889 * Validate input.
6890 */
6891 NOREF(cArgs); NOREF(paArgs);
6892 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6893 PVM pVM = pUVM->pVM;
6894 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6895
6896 if (HMIsEnabled(pVM))
6897 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6898
6899 PATMR3AllowPatching(pVM->pUVM, true);
6900 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6901 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6902}
6903
6904#endif /* VBOX_WITH_DEBUGGER */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette