VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 46150

Last change on this file since 46150 was 46150, checked in by vboxsync, 12 years ago

PATM: Patch symbols.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 262.1 KB
Line 
1/* $Id: PATM.cpp 46150 2013-05-17 17:21:45Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * @note Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2013 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/cpumdis.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/hm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/trpm.h>
35#include <VBox/vmm/cfgm.h>
36#include <VBox/param.h>
37#include <VBox/vmm/selm.h>
38#include <VBox/vmm/csam.h>
39#include <iprt/avl.h>
40#include "PATMInternal.h"
41#include "PATMPatch.h"
42#include <VBox/vmm/vm.h>
43#include <VBox/vmm/uvm.h>
44#include <VBox/dbg.h>
45#include <VBox/err.h>
46#include <VBox/log.h>
47#include <iprt/assert.h>
48#include <iprt/asm.h>
49#include <VBox/dis.h>
50#include <VBox/disopcode.h>
51#include "internal/pgm.h"
52
53#include <iprt/string.h>
54#include "PATMA.h"
55
56//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
57//#define PATM_DISABLE_ALL
58
59/**
60 * Refresh trampoline patch state.
61 */
62typedef struct PATMREFRESHPATCH
63{
64 /** Pointer to the VM structure. */
65 PVM pVM;
66 /** The trampoline patch record. */
67 PPATCHINFO pPatchTrampoline;
68 /** The new patch we want to jump to. */
69 PPATCHINFO pPatchRec;
70} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
71
72
73#define PATMREAD_RAWCODE 1 /* read code as-is */
74#define PATMREAD_ORGCODE 2 /* read original guest opcode bytes; not the patched bytes */
75#define PATMREAD_NOCHECK 4 /* don't check for patch conflicts */
76
77/*
78 * Private structure used during disassembly
79 */
80typedef struct
81{
82 PVM pVM;
83 PPATCHINFO pPatchInfo;
84 R3PTRTYPE(uint8_t *) pbInstrHC;
85 RTRCPTR pInstrGC;
86 uint32_t fReadFlags;
87} PATMDISASM, *PPATMDISASM;
88
89
90/*******************************************************************************
91* Internal Functions *
92*******************************************************************************/
93
94static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
95static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
96static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
97
98#ifdef LOG_ENABLED // keep gcc quiet
99static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
100#endif
101#ifdef VBOX_WITH_STATISTICS
102static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
103static void patmResetStat(PVM pVM, void *pvSample);
104static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
105#endif
106
107#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
108#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
109
110static int patmReinit(PVM pVM);
111static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
112static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC);
113static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch);
114
115#ifdef VBOX_WITH_DEBUGGER
116static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
117static FNDBGCCMD patmr3CmdOn;
118static FNDBGCCMD patmr3CmdOff;
119
120/** Command descriptors. */
121static const DBGCCMD g_aCmds[] =
122{
123 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
124 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
125 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
126};
127#endif
128
129/* Don't want to break saved states, so put it here as a global variable. */
130static unsigned int cIDTHandlersDisabled = 0;
131
132/**
133 * Initializes the PATM.
134 *
135 * @returns VBox status code.
136 * @param pVM Pointer to the VM.
137 */
138VMMR3_INT_DECL(int) PATMR3Init(PVM pVM)
139{
140 int rc;
141
142 /*
143 * We only need a saved state dummy loader if HM is enabled.
144 */
145 if (HMIsEnabled(pVM))
146 {
147 pVM->fPATMEnabled = false;
148 return SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, 0,
149 NULL, NULL, NULL,
150 NULL, NULL, NULL,
151 NULL, patmR3LoadDummy, NULL);
152 }
153
154 /*
155 * Raw-mode.
156 */
157 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
158
159 /* These values can't change as they are hardcoded in patch code (old saved states!) */
160 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
161 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
162 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
163 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
164
165 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
166 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
167
168 /* Allocate patch memory and GC patch state memory. */
169 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
170 /* Add another page in case the generated code is much larger than expected. */
171 /** @todo bad safety precaution */
172 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
173 if (RT_FAILURE(rc))
174 {
175 Log(("MMHyperAlloc failed with %Rrc\n", rc));
176 return rc;
177 }
178 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
179
180 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
181 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
182 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
183
184 patmR3DbgInit(pVM);
185
186 /*
187 * Hypervisor memory for GC status data (read/write)
188 *
189 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
190 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
191 *
192 */
193 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
194 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
195 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
196
197 /* Hypervisor memory for patch statistics */
198 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
199 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
200
201 /* Memory for patch lookup trees. */
202 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
203 AssertRCReturn(rc, rc);
204 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
205
206#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
207 /* Check CFGM option. */
208 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
209 if (RT_FAILURE(rc))
210# ifdef PATM_DISABLE_ALL
211 pVM->fPATMEnabled = false;
212# else
213 pVM->fPATMEnabled = true;
214# endif
215#endif
216
217 rc = patmReinit(pVM);
218 AssertRC(rc);
219 if (RT_FAILURE(rc))
220 return rc;
221
222 /*
223 * Register save and load state notifiers.
224 */
225 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
226 NULL, NULL, NULL,
227 NULL, patmR3Save, NULL,
228 NULL, patmR3Load, NULL);
229 AssertRCReturn(rc, rc);
230
231#ifdef VBOX_WITH_DEBUGGER
232 /*
233 * Debugger commands.
234 */
235 static bool s_fRegisteredCmds = false;
236 if (!s_fRegisteredCmds)
237 {
238 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
239 if (RT_SUCCESS(rc2))
240 s_fRegisteredCmds = true;
241 }
242#endif
243
244#ifdef VBOX_WITH_STATISTICS
245 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
246 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
247 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
248 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
249 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
250 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
251 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
252 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
253
254 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
255 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
256
257 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
258 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
259 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
260
261 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
262 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
263 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
264 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
265 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
266
267 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
268 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
269
270 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
271 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
272
273 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
274 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
275 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
276
277 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
278 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
279 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
280
281 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
282 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
283
284 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
285 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
286 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
287 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
288
289 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
290 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
291
292 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
293 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
294
295 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
296 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
297 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
298
299 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
300 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
301 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
302 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
303
304 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
305 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
306 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
307 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
308 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
309
310 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
311#endif /* VBOX_WITH_STATISTICS */
312
313 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
314 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
315 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
316 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
317 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
318 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
319 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
320 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
321
322 return rc;
323}
324
325/**
326 * Finalizes HMA page attributes.
327 *
328 * @returns VBox status code.
329 * @param pVM Pointer to the VM.
330 */
331VMMR3_INT_DECL(int) PATMR3InitFinalize(PVM pVM)
332{
333 if (HMIsEnabled(pVM))
334 return VINF_SUCCESS;
335
336 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
337 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
338 if (RT_FAILURE(rc))
339 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
340
341 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
342 if (RT_FAILURE(rc))
343 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
344
345 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
346 if (RT_FAILURE(rc))
347 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
348
349 return rc;
350}
351
352/**
353 * (Re)initializes PATM
354 *
355 * @param pVM The VM.
356 */
357static int patmReinit(PVM pVM)
358{
359 int rc;
360
361 /*
362 * Assert alignment and sizes.
363 */
364 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
365 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
366
367 /*
368 * Setup any fixed pointers and offsets.
369 */
370 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
371
372#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
373#ifndef PATM_DISABLE_ALL
374 pVM->fPATMEnabled = true;
375#endif
376#endif
377
378 Assert(pVM->patm.s.pGCStateHC);
379 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
380 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
381
382 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
383 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
384
385 Assert(pVM->patm.s.pGCStackHC);
386 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
387 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
388 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
389 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
390
391 Assert(pVM->patm.s.pStatsHC);
392 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
393 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
394
395 Assert(pVM->patm.s.pPatchMemHC);
396 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
397 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
398 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
399
400 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
401 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
402
403 Assert(pVM->patm.s.PatchLookupTreeHC);
404 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
405
406 /*
407 * (Re)Initialize PATM structure
408 */
409 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
410 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
411 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
412 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
413 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
414 pVM->patm.s.pvFaultMonitor = 0;
415 pVM->patm.s.deltaReloc = 0;
416
417 /* Lowest and highest patched instruction */
418 pVM->patm.s.pPatchedInstrGCLowest = ~0;
419 pVM->patm.s.pPatchedInstrGCHighest = 0;
420
421 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
422 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
423 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
424
425 pVM->patm.s.pfnSysEnterPatchGC = 0;
426 pVM->patm.s.pfnSysEnterGC = 0;
427
428 pVM->patm.s.fOutOfMemory = false;
429
430 pVM->patm.s.pfnHelperCallGC = 0;
431 patmR3DbgReset(pVM);
432
433 /* Generate all global functions to be used by future patches. */
434 /* We generate a fake patch in order to use the existing code for relocation. */
435 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
436 if (RT_FAILURE(rc))
437 {
438 Log(("Out of memory!!!!\n"));
439 return VERR_NO_MEMORY;
440 }
441 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
442 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
443 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
444
445 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
446 AssertRC(rc);
447
448 /* Update free pointer in patch memory. */
449 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
450 /* Round to next 8 byte boundary. */
451 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
452
453
454 return rc;
455}
456
457
458/**
459 * Applies relocations to data and code managed by this
460 * component. This function will be called at init and
461 * whenever the VMM need to relocate it self inside the GC.
462 *
463 * The PATM will update the addresses used by the switcher.
464 *
465 * @param pVM The VM.
466 */
467VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM)
468{
469 if (HMIsEnabled(pVM))
470 return;
471
472 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
473 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
474
475 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
476 if (delta)
477 {
478 PCPUMCTX pCtx;
479
480 /* Update CPUMCTX guest context pointer. */
481 pVM->patm.s.pCPUMCtxGC += delta;
482
483 pVM->patm.s.deltaReloc = delta;
484
485 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
486
487 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
488
489 /* If we are running patch code right now, then also adjust EIP. */
490 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
491 pCtx->eip += delta;
492
493 pVM->patm.s.pGCStateGC = GCPtrNew;
494 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
495
496 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
497
498 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
499
500 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
501
502 if (pVM->patm.s.pfnSysEnterPatchGC)
503 pVM->patm.s.pfnSysEnterPatchGC += delta;
504
505 /* Deal with the global patch functions. */
506 pVM->patm.s.pfnHelperCallGC += delta;
507 pVM->patm.s.pfnHelperRetGC += delta;
508 pVM->patm.s.pfnHelperIretGC += delta;
509 pVM->patm.s.pfnHelperJumpGC += delta;
510
511 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
512 }
513}
514
515
516/**
517 * Terminates the PATM.
518 *
519 * Termination means cleaning up and freeing all resources,
520 * the VM it self is at this point powered off or suspended.
521 *
522 * @returns VBox status code.
523 * @param pVM Pointer to the VM.
524 */
525VMMR3_INT_DECL(int) PATMR3Term(PVM pVM)
526{
527 if (HMIsEnabled(pVM))
528 return VINF_SUCCESS;
529
530 patmR3DbgTerm(pVM);
531
532 /* Memory was all allocated from the two MM heaps and requires no freeing. */
533 return VINF_SUCCESS;
534}
535
536
537/**
538 * PATM reset callback.
539 *
540 * @returns VBox status code.
541 * @param pVM The VM which is reset.
542 */
543VMMR3_INT_DECL(int) PATMR3Reset(PVM pVM)
544{
545 Log(("PATMR3Reset\n"));
546 if (HMIsEnabled(pVM))
547 return VINF_SUCCESS;
548
549 /* Free all patches. */
550 for (;;)
551 {
552 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
553 if (pPatchRec)
554 patmR3RemovePatch(pVM, pPatchRec, true);
555 else
556 break;
557 }
558 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
559 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
560 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
561 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
562
563 int rc = patmReinit(pVM);
564 if (RT_SUCCESS(rc))
565 rc = PATMR3InitFinalize(pVM); /* paranoia */
566
567 return rc;
568}
569
570/**
571 * @callback_method_impl{FNDISREADBYTES}
572 */
573static DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
574{
575 PATMDISASM *pDisInfo = (PATMDISASM *)pDis->pvUser;
576
577/** @todo change this to read more! */
578 /*
579 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
580 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
581 */
582 /** @todo could change in the future! */
583 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
584 {
585 size_t cbRead = cbMaxRead;
586 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
587 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
588 if (RT_SUCCESS(rc))
589 {
590 if (cbRead >= cbMinRead)
591 {
592 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
593 return VINF_SUCCESS;
594 }
595
596 cbMinRead -= (uint8_t)cbRead;
597 cbMaxRead -= (uint8_t)cbRead;
598 offInstr += (uint8_t)cbRead;
599 uSrcAddr += cbRead;
600 }
601
602#ifdef VBOX_STRICT
603 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
604 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
605 {
606 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr, NULL) == false);
607 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr + cbMinRead-1, NULL) == false);
608 }
609#endif
610 }
611
612 int rc = VINF_SUCCESS;
613 RTGCPTR32 uSrcAddr = (RTGCPTR32)pDis->uInstrAddr + offInstr;
614 if ( !pDisInfo->pbInstrHC
615 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(uSrcAddr + cbMinRead - 1)
616 && !PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr)))
617 {
618 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr));
619 rc = PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
620 offInstr += cbMinRead;
621 }
622 else
623 {
624 /*
625 * pbInstrHC is the base address; adjust according to the GC pointer.
626 *
627 * Try read the max number of bytes here. Since the disassembler only
628 * ever uses these bytes for the current instruction, it doesn't matter
629 * much if we accidentally read the start of the next instruction even
630 * if it happens to be a patch jump or int3.
631 */
632 uint8_t const *pbInstrHC = pDisInfo->pbInstrHC; AssertPtr(pbInstrHC);
633 pbInstrHC += uSrcAddr - pDisInfo->pInstrGC;
634
635 size_t cbMaxRead1 = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
636 size_t cbMaxRead2 = PAGE_SIZE - ((uintptr_t)pbInstrHC & PAGE_OFFSET_MASK);
637 size_t cbToRead = RT_MIN(cbMaxRead1, RT_MAX(cbMaxRead2, cbMinRead));
638 if (cbToRead > cbMaxRead)
639 cbToRead = cbMaxRead;
640
641 memcpy(&pDis->abInstr[offInstr], pbInstrHC, cbToRead);
642 offInstr += (uint8_t)cbToRead;
643 }
644
645 pDis->cbCachedInstr = offInstr;
646 return rc;
647}
648
649
650DECLINLINE(bool) patmR3DisInstrToStr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
651 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
652{
653 PATMDISASM disinfo;
654 disinfo.pVM = pVM;
655 disinfo.pPatchInfo = pPatch;
656 disinfo.pbInstrHC = pbInstrHC;
657 disinfo.pInstrGC = InstrGCPtr32;
658 disinfo.fReadFlags = fReadFlags;
659 return RT_SUCCESS(DISInstrToStrWithReader(InstrGCPtr32,
660 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
661 patmReadBytes, &disinfo,
662 pCpu, pcbInstr, pszOutput, cbOutput));
663}
664
665
666DECLINLINE(bool) patmR3DisInstr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
667 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
668{
669 PATMDISASM disinfo;
670 disinfo.pVM = pVM;
671 disinfo.pPatchInfo = pPatch;
672 disinfo.pbInstrHC = pbInstrHC;
673 disinfo.pInstrGC = InstrGCPtr32;
674 disinfo.fReadFlags = fReadFlags;
675 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32,
676 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
677 patmReadBytes, &disinfo,
678 pCpu, pcbInstr));
679}
680
681
682DECLINLINE(bool) patmR3DisInstrNoStrOpMode(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC,
683 uint32_t fReadFlags,
684 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
685{
686 PATMDISASM disinfo;
687 disinfo.pVM = pVM;
688 disinfo.pPatchInfo = pPatch;
689 disinfo.pbInstrHC = pbInstrHC;
690 disinfo.pInstrGC = InstrGCPtr32;
691 disinfo.fReadFlags = fReadFlags;
692 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32, pPatch->uOpMode, patmReadBytes, &disinfo,
693 pCpu, pcbInstr));
694}
695
696#ifdef LOG_ENABLED
697# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
698 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_ORGCODE, a_szComment, " patch:")
699# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
700 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_RAWCODE, a_szComment, " patch:")
701
702# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) \
703 do { \
704 if (LogIsEnabled()) \
705 patmLogRawPatchInstr(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2); \
706 } while (0)
707
708static void patmLogRawPatchInstr(PVM pVM, PPATCHINFO pPatch, uint32_t fFlags,
709 const char *pszComment1, const char *pszComment2)
710{
711 DISCPUSTATE DisState;
712 char szOutput[128];
713 szOutput[0] = '\0';
714 patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC, NULL, fFlags,
715 &DisState, NULL, szOutput, sizeof(szOutput));
716 Log(("%s%s %s", pszComment1, pszComment2, szOutput));
717}
718
719#else
720# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
721# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
722# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) do { } while (0)
723#endif
724
725
726/**
727 * Callback function for RTAvloU32DoWithAll
728 *
729 * Updates all fixups in the patches
730 *
731 * @returns VBox status code.
732 * @param pNode Current node
733 * @param pParam Pointer to the VM.
734 */
735static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
736{
737 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
738 PVM pVM = (PVM)pParam;
739 RTRCINTPTR delta;
740 int rc;
741
742 /* Nothing to do if the patch is not active. */
743 if (pPatch->patch.uState == PATCH_REFUSED)
744 return 0;
745
746 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
747 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Org patch jump:", "");
748
749 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
750 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
751
752 /*
753 * Apply fixups
754 */
755 PRELOCREC pRec = 0;
756 AVLPVKEY key = 0;
757
758 while (true)
759 {
760 /* Get the record that's closest from above */
761 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
762 if (pRec == 0)
763 break;
764
765 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
766
767 switch (pRec->uType)
768 {
769 case FIXUP_ABSOLUTE:
770 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
771 if ( !pRec->pSource
772 || PATMIsPatchGCAddr(pVM, pRec->pSource))
773 {
774 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
775 }
776 else
777 {
778 uint8_t curInstr[15];
779 uint8_t oldInstr[15];
780 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
781
782 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
783
784 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
785 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
786
787 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
788 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
789
790 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
791
792 if ( rc == VERR_PAGE_NOT_PRESENT
793 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
794 {
795 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
796
797 Log(("PATM: Patch page not present -> check later!\n"));
798 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
799 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
800 }
801 else
802 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
803 {
804 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
805 /*
806 * Disable patch; this is not a good solution
807 */
808 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
809 pPatch->patch.uState = PATCH_DISABLED;
810 }
811 else
812 if (RT_SUCCESS(rc))
813 {
814 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
815 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
816 AssertRC(rc);
817 }
818 }
819 break;
820
821 case FIXUP_REL_JMPTOPATCH:
822 {
823 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
824
825 if ( pPatch->patch.uState == PATCH_ENABLED
826 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
827 {
828 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
829 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
830 RTRCPTR pJumpOffGC;
831 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
832 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
833
834#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
835 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
836#else
837 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
838#endif
839
840 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
841#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
842 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
843 {
844 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
845
846 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
847 oldJump[0] = pPatch->patch.aPrivInstr[0];
848 oldJump[1] = pPatch->patch.aPrivInstr[1];
849 *(RTRCUINTPTR *)&oldJump[2] = displOld;
850 }
851 else
852#endif
853 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
854 {
855 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
856 oldJump[0] = 0xE9;
857 *(RTRCUINTPTR *)&oldJump[1] = displOld;
858 }
859 else
860 {
861 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
862 continue; //this should never happen!!
863 }
864 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
865
866 /*
867 * Read old patch jump and compare it to the one we previously installed
868 */
869 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
870 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
871
872 if ( rc == VERR_PAGE_NOT_PRESENT
873 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
874 {
875 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
876
877 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
878 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
879 }
880 else
881 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
882 {
883 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
884 /*
885 * Disable patch; this is not a good solution
886 */
887 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
888 pPatch->patch.uState = PATCH_DISABLED;
889 }
890 else
891 if (RT_SUCCESS(rc))
892 {
893 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
894 AssertRC(rc);
895 }
896 else
897 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
898 }
899 else
900 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
901
902 pRec->pDest = pTarget;
903 break;
904 }
905
906 case FIXUP_REL_JMPTOGUEST:
907 {
908 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
909 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
910
911 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
912 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
913 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
914 pRec->pSource = pSource;
915 break;
916 }
917
918 default:
919 AssertMsg(0, ("Invalid fixup type!!\n"));
920 return VERR_INVALID_PARAMETER;
921 }
922 }
923
924 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
925 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Rel patch jump:", "");
926 return 0;
927}
928
929/**
930 * \#PF Handler callback for virtual access handler ranges.
931 *
932 * Important to realize that a physical page in a range can have aliases, and
933 * for ALL and WRITE handlers these will also trigger.
934 *
935 * @returns VINF_SUCCESS if the handler have carried out the operation.
936 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
937 * @param pVM Pointer to the VM.
938 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
939 * @param pvPtr The HC mapping of that address.
940 * @param pvBuf What the guest is reading/writing.
941 * @param cbBuf How much it's reading/writing.
942 * @param enmAccessType The access type.
943 * @param pvUser User argument.
944 */
945DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
946 PGMACCESSTYPE enmAccessType, void *pvUser)
947{
948 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
949 NOREF(pvPtr); NOREF(pvBuf); NOREF(cbBuf); NOREF(pvUser);
950
951 /** @todo could be the wrong virtual address (alias) */
952 pVM->patm.s.pvFaultMonitor = GCPtr;
953 PATMR3HandleMonitoredPage(pVM);
954 return VINF_PGM_HANDLER_DO_DEFAULT;
955}
956
957#ifdef VBOX_WITH_DEBUGGER
958
959/**
960 * Callback function for RTAvloU32DoWithAll
961 *
962 * Enables the patch that's being enumerated
963 *
964 * @returns 0 (continue enumeration).
965 * @param pNode Current node
966 * @param pVM Pointer to the VM.
967 */
968static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
969{
970 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
971
972 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
973 return 0;
974}
975
976
977/**
978 * Callback function for RTAvloU32DoWithAll
979 *
980 * Disables the patch that's being enumerated
981 *
982 * @returns 0 (continue enumeration).
983 * @param pNode Current node
984 * @param pVM Pointer to the VM.
985 */
986static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
987{
988 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
989
990 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
991 return 0;
992}
993
994#endif /* VBOX_WITH_DEBUGGER */
995#ifdef UNUSED_FUNCTIONS
996
997/**
998 * Returns the host context pointer and size of the patch memory block
999 *
1000 * @returns Host context pointer.
1001 * @param pVM Pointer to the VM.
1002 * @param pcb Size of the patch memory block
1003 * @internal
1004 */
1005VMMR3_INT_DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
1006{
1007 AssertReturn(!HMIsEnabled(pVM), NULL);
1008 if (pcb)
1009 *pcb = pVM->patm.s.cbPatchMem;
1010 return pVM->patm.s.pPatchMemHC;
1011}
1012
1013
1014/**
1015 * Returns the guest context pointer and size of the patch memory block
1016 *
1017 * @returns Guest context pointer.
1018 * @param pVM Pointer to the VM.
1019 * @param pcb Size of the patch memory block
1020 */
1021VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
1022{
1023 AssertReturn(!HMIsEnabled(pVM), NIL_RTRCPTR);
1024 if (pcb)
1025 *pcb = pVM->patm.s.cbPatchMem;
1026 return pVM->patm.s.pPatchMemGC;
1027}
1028
1029#endif /* UNUSED_FUNCTIONS */
1030
1031/**
1032 * Returns the host context pointer of the GC context structure
1033 *
1034 * @returns VBox status code.
1035 * @param pVM Pointer to the VM.
1036 */
1037VMMR3_INT_DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
1038{
1039 AssertReturn(!HMIsEnabled(pVM), NULL);
1040 return pVM->patm.s.pGCStateHC;
1041}
1042
1043
1044#ifdef UNUSED_FUNCTION
1045/**
1046 * Checks whether the HC address is part of our patch region
1047 *
1048 * @returns true/false.
1049 * @param pVM Pointer to the VM.
1050 * @param pAddrHC Host context ring-3 address to check.
1051 */
1052VMMR3_INT_DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, void *pAddrHC)
1053{
1054 return (uintptr_t)pAddrHC >= (uintptr_t)pVM->patm.s.pPatchMemHC
1055 && (uintptr_t)pAddrHC < (uintptr_t)pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem;
1056}
1057#endif
1058
1059
1060/**
1061 * Allows or disallow patching of privileged instructions executed by the guest OS
1062 *
1063 * @returns VBox status code.
1064 * @param pUVM The user mode VM handle.
1065 * @param fAllowPatching Allow/disallow patching
1066 */
1067VMMR3DECL(int) PATMR3AllowPatching(PUVM pUVM, bool fAllowPatching)
1068{
1069 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1070 PVM pVM = pUVM->pVM;
1071 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1072
1073 if (!HMIsEnabled(pVM))
1074 pVM->fPATMEnabled = fAllowPatching;
1075 else
1076 Assert(!pVM->fPATMEnabled);
1077 return VINF_SUCCESS;
1078}
1079
1080
1081/**
1082 * Checks if the patch manager is enabled or not.
1083 *
1084 * @returns true if enabled, false if not (or if invalid handle).
1085 * @param pUVM The user mode VM handle.
1086 */
1087VMMR3DECL(bool) PATMR3IsEnabled(PUVM pUVM)
1088{
1089 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1090 PVM pVM = pUVM->pVM;
1091 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1092 return PATMIsEnabled(pVM);
1093}
1094
1095
1096/**
1097 * Convert a GC patch block pointer to a HC patch pointer
1098 *
1099 * @returns HC pointer or NULL if it's not a GC patch pointer
1100 * @param pVM Pointer to the VM.
1101 * @param pAddrGC GC pointer
1102 */
1103VMMR3_INT_DECL(void *) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
1104{
1105 AssertReturn(!HMIsEnabled(pVM), NULL);
1106 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
1107 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
1108 return NULL;
1109}
1110
1111
1112/**
1113 * Convert guest context address to host context pointer
1114 *
1115 * @returns VBox status code.
1116 * @param pVM Pointer to the VM.
1117 * @param pCacheRec Address conversion cache record
1118 * @param pGCPtr Guest context pointer
1119 *
1120 * @returns Host context pointer or NULL in case of an error
1121 *
1122 */
1123R3PTRTYPE(uint8_t *) patmR3GCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
1124{
1125 int rc;
1126 R3PTRTYPE(uint8_t *) pHCPtr;
1127 uint32_t offset;
1128
1129 if (PATMIsPatchGCAddr(pVM, pGCPtr))
1130 {
1131 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1132 Assert(pPatch);
1133 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
1134 }
1135
1136 offset = pGCPtr & PAGE_OFFSET_MASK;
1137 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1138 return pCacheRec->pPageLocStartHC + offset;
1139
1140 /* Release previous lock if any. */
1141 if (pCacheRec->Lock.pvMap)
1142 {
1143 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1144 pCacheRec->Lock.pvMap = NULL;
1145 }
1146
1147 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1148 if (rc != VINF_SUCCESS)
1149 {
1150 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1151 return NULL;
1152 }
1153 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1154 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1155 return pHCPtr;
1156}
1157
1158
1159/**
1160 * Calculates and fills in all branch targets
1161 *
1162 * @returns VBox status code.
1163 * @param pVM Pointer to the VM.
1164 * @param pPatch Current patch block pointer
1165 *
1166 */
1167static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1168{
1169 int32_t displ;
1170
1171 PJUMPREC pRec = 0;
1172 unsigned nrJumpRecs = 0;
1173
1174 /*
1175 * Set all branch targets inside the patch block.
1176 * We remove all jump records as they are no longer needed afterwards.
1177 */
1178 while (true)
1179 {
1180 RCPTRTYPE(uint8_t *) pInstrGC;
1181 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1182
1183 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1184 if (pRec == 0)
1185 break;
1186
1187 nrJumpRecs++;
1188
1189 /* HC in patch block to GC in patch block. */
1190 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1191
1192 if (pRec->opcode == OP_CALL)
1193 {
1194 /* Special case: call function replacement patch from this patch block.
1195 */
1196 PPATMPATCHREC pFunctionRec = patmQueryFunctionPatch(pVM, pRec->pTargetGC);
1197 if (!pFunctionRec)
1198 {
1199 int rc;
1200
1201 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1202 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1203 else
1204 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1205
1206 if (RT_FAILURE(rc))
1207 {
1208 uint8_t *pPatchHC;
1209 RTRCPTR pPatchGC;
1210 RTRCPTR pOrgInstrGC;
1211
1212 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1213 Assert(pOrgInstrGC);
1214
1215 /* Failure for some reason -> mark exit point with int 3. */
1216 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1217
1218 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1219 Assert(pPatchGC);
1220
1221 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1222
1223 /* Set a breakpoint at the very beginning of the recompiled instruction */
1224 *pPatchHC = 0xCC;
1225
1226 continue;
1227 }
1228 }
1229 else
1230 {
1231 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1232 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1233 }
1234
1235 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1236 }
1237 else
1238 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1239
1240 if (pBranchTargetGC == 0)
1241 {
1242 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1243 return VERR_PATCHING_REFUSED;
1244 }
1245 /* Our jumps *always* have a dword displacement (to make things easier). */
1246 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1247 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1248 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1249 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1250 }
1251 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1252 Assert(pPatch->JumpTree == 0);
1253 return VINF_SUCCESS;
1254}
1255
1256/**
1257 * Add an illegal instruction record
1258 *
1259 * @param pVM Pointer to the VM.
1260 * @param pPatch Patch structure ptr
1261 * @param pInstrGC Guest context pointer to privileged instruction
1262 *
1263 */
1264static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1265{
1266 PAVLPVNODECORE pRec;
1267
1268 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1269 Assert(pRec);
1270 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1271
1272 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1273 Assert(ret); NOREF(ret);
1274 pPatch->pTempInfo->nrIllegalInstr++;
1275}
1276
1277static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1278{
1279 PAVLPVNODECORE pRec;
1280
1281 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1282 if (pRec)
1283 return true;
1284 else
1285 return false;
1286}
1287
1288/**
1289 * Add a patch to guest lookup record
1290 *
1291 * @param pVM Pointer to the VM.
1292 * @param pPatch Patch structure ptr
1293 * @param pPatchInstrHC Guest context pointer to patch block
1294 * @param pInstrGC Guest context pointer to privileged instruction
1295 * @param enmType Lookup type
1296 * @param fDirty Dirty flag
1297 *
1298 * @note Be extremely careful with this function. Make absolutely sure the guest
1299 * address is correct! (to avoid executing instructions twice!)
1300 */
1301void patmR3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1302{
1303 bool ret;
1304 PRECPATCHTOGUEST pPatchToGuestRec;
1305 PRECGUESTTOPATCH pGuestToPatchRec;
1306 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1307
1308 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1309 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1310
1311 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1312 {
1313 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1314 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1315 return; /* already there */
1316
1317 Assert(!pPatchToGuestRec);
1318 }
1319#ifdef VBOX_STRICT
1320 else
1321 {
1322 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1323 Assert(!pPatchToGuestRec);
1324 }
1325#endif
1326
1327 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1328 Assert(pPatchToGuestRec);
1329 pPatchToGuestRec->Core.Key = PatchOffset;
1330 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1331 pPatchToGuestRec->enmType = enmType;
1332 pPatchToGuestRec->fDirty = fDirty;
1333
1334 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1335 Assert(ret);
1336
1337 /* GC to patch address */
1338 if (enmType == PATM_LOOKUP_BOTHDIR)
1339 {
1340 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1341 if (!pGuestToPatchRec)
1342 {
1343 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1344 pGuestToPatchRec->Core.Key = pInstrGC;
1345 pGuestToPatchRec->PatchOffset = PatchOffset;
1346
1347 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1348 Assert(ret);
1349 }
1350 }
1351
1352 pPatch->nrPatch2GuestRecs++;
1353}
1354
1355
1356/**
1357 * Removes a patch to guest lookup record
1358 *
1359 * @param pVM Pointer to the VM.
1360 * @param pPatch Patch structure ptr
1361 * @param pPatchInstrGC Guest context pointer to patch block
1362 */
1363void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1364{
1365 PAVLU32NODECORE pNode;
1366 PAVLU32NODECORE pNode2;
1367 PRECPATCHTOGUEST pPatchToGuestRec;
1368 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1369
1370 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1371 Assert(pPatchToGuestRec);
1372 if (pPatchToGuestRec)
1373 {
1374 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1375 {
1376 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1377
1378 Assert(pGuestToPatchRec->Core.Key);
1379 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1380 Assert(pNode2);
1381 }
1382 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1383 Assert(pNode);
1384
1385 MMR3HeapFree(pPatchToGuestRec);
1386 pPatch->nrPatch2GuestRecs--;
1387 }
1388}
1389
1390
1391/**
1392 * RTAvlPVDestroy callback.
1393 */
1394static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1395{
1396 MMR3HeapFree(pNode);
1397 return 0;
1398}
1399
1400/**
1401 * Empty the specified tree (PV tree, MMR3 heap)
1402 *
1403 * @param pVM Pointer to the VM.
1404 * @param ppTree Tree to empty
1405 */
1406static void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1407{
1408 NOREF(pVM);
1409 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1410}
1411
1412
1413/**
1414 * RTAvlU32Destroy callback.
1415 */
1416static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1417{
1418 MMR3HeapFree(pNode);
1419 return 0;
1420}
1421
1422/**
1423 * Empty the specified tree (U32 tree, MMR3 heap)
1424 *
1425 * @param pVM Pointer to the VM.
1426 * @param ppTree Tree to empty
1427 */
1428static void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1429{
1430 NOREF(pVM);
1431 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1432}
1433
1434
1435/**
1436 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1437 *
1438 * @returns VBox status code.
1439 * @param pVM Pointer to the VM.
1440 * @param pCpu CPU disassembly state
1441 * @param pInstrGC Guest context pointer to privileged instruction
1442 * @param pCurInstrGC Guest context pointer to the current instruction
1443 * @param pCacheRec Cache record ptr
1444 *
1445 */
1446static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1447{
1448 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1449 bool fIllegalInstr = false;
1450
1451 /*
1452 * Preliminary heuristics:
1453 *- no call instructions without a fixed displacement between cli and sti/popf
1454 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1455 *- no nested pushf/cli
1456 *- sti/popf should be the (eventual) target of all branches
1457 *- no near or far returns; no int xx, no into
1458 *
1459 * Note: Later on we can impose less stricter guidelines if the need arises
1460 */
1461
1462 /* Bail out if the patch gets too big. */
1463 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1464 {
1465 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1466 fIllegalInstr = true;
1467 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1468 }
1469 else
1470 {
1471 /* No unconditional jumps or calls without fixed displacements. */
1472 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1473 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1474 )
1475 {
1476 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1477 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1478 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1479 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1480 )
1481 {
1482 fIllegalInstr = true;
1483 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1484 }
1485 }
1486
1487 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1488 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->uOpcode == OP_JMP)
1489 {
1490 if ( pCurInstrGC > pPatch->pPrivInstrGC
1491 && pCurInstrGC + pCpu->cbInstr < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1492 {
1493 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1494 /* We turn this one into a int 3 callable patch. */
1495 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1496 }
1497 }
1498 else
1499 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1500 if (pPatch->opcode == OP_PUSHF)
1501 {
1502 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->uOpcode == OP_PUSHF)
1503 {
1504 fIllegalInstr = true;
1505 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1506 }
1507 }
1508
1509 /* no far returns */
1510 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1511 {
1512 pPatch->pTempInfo->nrRetInstr++;
1513 fIllegalInstr = true;
1514 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1515 }
1516 else if ( pCpu->pCurInstr->uOpcode == OP_INT3
1517 || pCpu->pCurInstr->uOpcode == OP_INT
1518 || pCpu->pCurInstr->uOpcode == OP_INTO)
1519 {
1520 /* No int xx or into either. */
1521 fIllegalInstr = true;
1522 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1523 }
1524 }
1525
1526 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1527
1528 /* Illegal instruction -> end of analysis phase for this code block */
1529 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1530 return VINF_SUCCESS;
1531
1532 /* Check for exit points. */
1533 switch (pCpu->pCurInstr->uOpcode)
1534 {
1535 case OP_SYSEXIT:
1536 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1537
1538 case OP_SYSENTER:
1539 case OP_ILLUD2:
1540 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1541 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1542 return VINF_SUCCESS;
1543
1544 case OP_STI:
1545 case OP_POPF:
1546 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1547 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1548 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1549 {
1550 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1551 return VERR_PATCHING_REFUSED;
1552 }
1553 if (pPatch->opcode == OP_PUSHF)
1554 {
1555 if (pCpu->pCurInstr->uOpcode == OP_POPF)
1556 {
1557 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1558 return VINF_SUCCESS;
1559
1560 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1561 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1562 pPatch->flags |= PATMFL_CHECK_SIZE;
1563 }
1564 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1565 }
1566 /* else: fall through. */
1567 case OP_RETN: /* exit point for function replacement */
1568 return VINF_SUCCESS;
1569
1570 case OP_IRET:
1571 return VINF_SUCCESS; /* exitpoint */
1572
1573 case OP_CPUID:
1574 case OP_CALL:
1575 case OP_JMP:
1576 break;
1577
1578#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1579 case OP_STR:
1580 break;
1581#endif
1582
1583 default:
1584 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1585 {
1586 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1587 return VINF_SUCCESS; /* exit point */
1588 }
1589 break;
1590 }
1591
1592 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1593 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW))
1594 {
1595 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1596 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->cbInstr));
1597 return VINF_SUCCESS;
1598 }
1599
1600 return VWRN_CONTINUE_ANALYSIS;
1601}
1602
1603/**
1604 * Analyses the instructions inside a function for compliance
1605 *
1606 * @returns VBox status code.
1607 * @param pVM Pointer to the VM.
1608 * @param pCpu CPU disassembly state
1609 * @param pInstrGC Guest context pointer to privileged instruction
1610 * @param pCurInstrGC Guest context pointer to the current instruction
1611 * @param pCacheRec Cache record ptr
1612 *
1613 */
1614static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1615{
1616 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1617 bool fIllegalInstr = false;
1618 NOREF(pInstrGC);
1619
1620 //Preliminary heuristics:
1621 //- no call instructions
1622 //- ret ends a block
1623
1624 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1625
1626 // bail out if the patch gets too big
1627 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1628 {
1629 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1630 fIllegalInstr = true;
1631 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1632 }
1633 else
1634 {
1635 // no unconditional jumps or calls without fixed displacements
1636 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1637 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1638 )
1639 {
1640 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1641 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1642 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1643 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1644 )
1645 {
1646 fIllegalInstr = true;
1647 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1648 }
1649 }
1650 else /* no far returns */
1651 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1652 {
1653 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1654 fIllegalInstr = true;
1655 }
1656 else /* no int xx or into either */
1657 if (pCpu->pCurInstr->uOpcode == OP_INT3 || pCpu->pCurInstr->uOpcode == OP_INT || pCpu->pCurInstr->uOpcode == OP_INTO)
1658 {
1659 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1660 fIllegalInstr = true;
1661 }
1662
1663 #if 0
1664 ///@todo we can handle certain in/out and privileged instructions in the guest context
1665 if (pCpu->pCurInstr->fOpType & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr->uOpcode != OP_STI)
1666 {
1667 Log(("Illegal instructions for function patch!!\n"));
1668 return VERR_PATCHING_REFUSED;
1669 }
1670 #endif
1671 }
1672
1673 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1674
1675 /* Illegal instruction -> end of analysis phase for this code block */
1676 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1677 {
1678 return VINF_SUCCESS;
1679 }
1680
1681 // Check for exit points
1682 switch (pCpu->pCurInstr->uOpcode)
1683 {
1684 case OP_ILLUD2:
1685 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1686 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1687 return VINF_SUCCESS;
1688
1689 case OP_IRET:
1690 case OP_SYSEXIT: /* will fault or emulated in GC */
1691 case OP_RETN:
1692 return VINF_SUCCESS;
1693
1694#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1695 case OP_STR:
1696 break;
1697#endif
1698
1699 case OP_POPF:
1700 case OP_STI:
1701 return VWRN_CONTINUE_ANALYSIS;
1702 default:
1703 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1704 {
1705 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1706 return VINF_SUCCESS; /* exit point */
1707 }
1708 return VWRN_CONTINUE_ANALYSIS;
1709 }
1710
1711 return VWRN_CONTINUE_ANALYSIS;
1712}
1713
1714/**
1715 * Recompiles the instructions in a code block
1716 *
1717 * @returns VBox status code.
1718 * @param pVM Pointer to the VM.
1719 * @param pCpu CPU disassembly state
1720 * @param pInstrGC Guest context pointer to privileged instruction
1721 * @param pCurInstrGC Guest context pointer to the current instruction
1722 * @param pCacheRec Cache record ptr
1723 *
1724 */
1725static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1726{
1727 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1728 int rc = VINF_SUCCESS;
1729 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1730
1731 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1732
1733 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1734 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1735 {
1736 /*
1737 * Been there, done that; so insert a jump (we don't want to duplicate code)
1738 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1739 */
1740 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1741 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1742 }
1743
1744 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1745 {
1746 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1747 }
1748 else
1749 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1750
1751 if (RT_FAILURE(rc))
1752 return rc;
1753
1754 /* Note: Never do a direct return unless a failure is encountered! */
1755
1756 /* Clear recompilation of next instruction flag; we are doing that right here. */
1757 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1758 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1759
1760 /* Add lookup record for patch to guest address translation */
1761 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1762
1763 /* Update lowest and highest instruction address for this patch */
1764 if (pCurInstrGC < pPatch->pInstrGCLowest)
1765 pPatch->pInstrGCLowest = pCurInstrGC;
1766 else
1767 if (pCurInstrGC > pPatch->pInstrGCHighest)
1768 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->cbInstr;
1769
1770 /* Illegal instruction -> end of recompile phase for this code block. */
1771 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1772 {
1773 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1774 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1775 goto end;
1776 }
1777
1778 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1779 * Indirect calls are handled below.
1780 */
1781 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1782 && (pCpu->pCurInstr->uOpcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1783 && (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J))
1784 {
1785 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1786 if (pTargetGC == 0)
1787 {
1788 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
1789 return VERR_PATCHING_REFUSED;
1790 }
1791
1792 if (pCpu->pCurInstr->uOpcode == OP_CALL)
1793 {
1794 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1795 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1796 if (RT_FAILURE(rc))
1797 goto end;
1798 }
1799 else
1800 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->uOpcode, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1801
1802 if (RT_SUCCESS(rc))
1803 rc = VWRN_CONTINUE_RECOMPILE;
1804
1805 goto end;
1806 }
1807
1808 switch (pCpu->pCurInstr->uOpcode)
1809 {
1810 case OP_CLI:
1811 {
1812 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1813 * until we've found the proper exit point(s).
1814 */
1815 if ( pCurInstrGC != pInstrGC
1816 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1817 )
1818 {
1819 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1820 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1821 }
1822 /* Set by irq inhibition; no longer valid now. */
1823 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1824
1825 rc = patmPatchGenCli(pVM, pPatch);
1826 if (RT_SUCCESS(rc))
1827 rc = VWRN_CONTINUE_RECOMPILE;
1828 break;
1829 }
1830
1831 case OP_MOV:
1832 if (pCpu->pCurInstr->fOpType & DISOPTYPE_POTENTIALLY_DANGEROUS)
1833 {
1834 /* mov ss, src? */
1835 if ( (pCpu->Param1.fUse & DISUSE_REG_SEG)
1836 && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS))
1837 {
1838 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1839 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1840 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1841 }
1842#if 0 /* necessary for Haiku */
1843 else
1844 if ( (pCpu->Param2.fUse & DISUSE_REG_SEG)
1845 && (pCpu->Param2.Base.idxSegReg == USE_REG_SS)
1846 && (pCpu->Param1.fUse & (DISUSE_REG_GEN32|DISUSE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1847 {
1848 /* mov GPR, ss */
1849 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1850 if (RT_SUCCESS(rc))
1851 rc = VWRN_CONTINUE_RECOMPILE;
1852 break;
1853 }
1854#endif
1855 }
1856 goto duplicate_instr;
1857
1858 case OP_POP:
1859 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1860 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS)
1861 {
1862 Assert(pCpu->pCurInstr->fOpType & DISOPTYPE_INHIBIT_IRQS);
1863
1864 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1865 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1866 }
1867 goto duplicate_instr;
1868
1869 case OP_STI:
1870 {
1871 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1872
1873 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1874 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1875 {
1876 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1877 fInhibitIRQInstr = true;
1878 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1879 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1880 }
1881 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1882
1883 if (RT_SUCCESS(rc))
1884 {
1885 DISCPUSTATE cpu = *pCpu;
1886 unsigned cbInstr;
1887 int disret;
1888 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1889
1890 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1891
1892 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1893 { /* Force pNextInstrHC out of scope after using it */
1894 uint8_t *pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1895 if (pNextInstrHC == NULL)
1896 {
1897 AssertFailed();
1898 return VERR_PATCHING_REFUSED;
1899 }
1900
1901 // Disassemble the next instruction
1902 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
1903 }
1904 if (disret == false)
1905 {
1906 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1907 return VERR_PATCHING_REFUSED;
1908 }
1909 pReturnInstrGC = pNextInstrGC + cbInstr;
1910
1911 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1912 || pReturnInstrGC <= pInstrGC
1913 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1914 )
1915 {
1916 /* Not an exit point for function duplication patches */
1917 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1918 && RT_SUCCESS(rc))
1919 {
1920 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1921 rc = VWRN_CONTINUE_RECOMPILE;
1922 }
1923 else
1924 rc = VINF_SUCCESS; //exit point
1925 }
1926 else {
1927 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1928 rc = VERR_PATCHING_REFUSED; //not allowed!!
1929 }
1930 }
1931 break;
1932 }
1933
1934 case OP_POPF:
1935 {
1936 bool fGenerateJmpBack = (pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32);
1937
1938 /* Not an exit point for IDT handler or function replacement patches */
1939 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1940 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1941 fGenerateJmpBack = false;
1942
1943 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->cbInstr, !!(pCpu->fPrefix & DISPREFIX_OPSIZE), fGenerateJmpBack);
1944 if (RT_SUCCESS(rc))
1945 {
1946 if (fGenerateJmpBack == false)
1947 {
1948 /* Not an exit point for IDT handler or function replacement patches */
1949 rc = VWRN_CONTINUE_RECOMPILE;
1950 }
1951 else
1952 {
1953 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1954 rc = VINF_SUCCESS; /* exit point! */
1955 }
1956 }
1957 break;
1958 }
1959
1960 case OP_PUSHF:
1961 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1962 if (RT_SUCCESS(rc))
1963 rc = VWRN_CONTINUE_RECOMPILE;
1964 break;
1965
1966 case OP_PUSH:
1967 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1968 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS)
1969 {
1970 rc = patmPatchGenPushCS(pVM, pPatch);
1971 if (RT_SUCCESS(rc))
1972 rc = VWRN_CONTINUE_RECOMPILE;
1973 break;
1974 }
1975 goto duplicate_instr;
1976
1977 case OP_IRET:
1978 Log(("IRET at %RRv\n", pCurInstrGC));
1979 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1980 if (RT_SUCCESS(rc))
1981 {
1982 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1983 rc = VINF_SUCCESS; /* exit point by definition */
1984 }
1985 break;
1986
1987 case OP_ILLUD2:
1988 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1989 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1990 if (RT_SUCCESS(rc))
1991 rc = VINF_SUCCESS; /* exit point by definition */
1992 Log(("Illegal opcode (0xf 0xb)\n"));
1993 break;
1994
1995 case OP_CPUID:
1996 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1997 if (RT_SUCCESS(rc))
1998 rc = VWRN_CONTINUE_RECOMPILE;
1999 break;
2000
2001 case OP_STR:
2002#ifdef VBOX_WITH_SAFE_STR /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table and move OP_STR into #ifndef */
2003 /* Now safe because our shadow TR entry is identical to the guest's. */
2004 goto duplicate_instr;
2005#endif
2006 case OP_SLDT:
2007 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
2008 if (RT_SUCCESS(rc))
2009 rc = VWRN_CONTINUE_RECOMPILE;
2010 break;
2011
2012 case OP_SGDT:
2013 case OP_SIDT:
2014 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
2015 if (RT_SUCCESS(rc))
2016 rc = VWRN_CONTINUE_RECOMPILE;
2017 break;
2018
2019 case OP_RETN:
2020 /* retn is an exit point for function patches */
2021 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
2022 if (RT_SUCCESS(rc))
2023 rc = VINF_SUCCESS; /* exit point by definition */
2024 break;
2025
2026 case OP_SYSEXIT:
2027 /* Duplicate it, so it can be emulated in GC (or fault). */
2028 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2029 if (RT_SUCCESS(rc))
2030 rc = VINF_SUCCESS; /* exit point by definition */
2031 break;
2032
2033 case OP_CALL:
2034 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2035 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2036 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2037 */
2038 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2039 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far calls! */)
2040 {
2041 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
2042 if (RT_SUCCESS(rc))
2043 {
2044 rc = VWRN_CONTINUE_RECOMPILE;
2045 }
2046 break;
2047 }
2048 goto gen_illegal_instr;
2049
2050 case OP_JMP:
2051 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2052 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2053 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2054 */
2055 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2056 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far jumps! */)
2057 {
2058 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
2059 if (RT_SUCCESS(rc))
2060 rc = VINF_SUCCESS; /* end of branch */
2061 break;
2062 }
2063 goto gen_illegal_instr;
2064
2065 case OP_INT3:
2066 case OP_INT:
2067 case OP_INTO:
2068 goto gen_illegal_instr;
2069
2070 case OP_MOV_DR:
2071 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2072 if (pCpu->pCurInstr->fParam2 == OP_PARM_Dd)
2073 {
2074 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
2075 if (RT_SUCCESS(rc))
2076 rc = VWRN_CONTINUE_RECOMPILE;
2077 break;
2078 }
2079 goto duplicate_instr;
2080
2081 case OP_MOV_CR:
2082 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2083 if (pCpu->pCurInstr->fParam2 == OP_PARM_Cd)
2084 {
2085 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
2086 if (RT_SUCCESS(rc))
2087 rc = VWRN_CONTINUE_RECOMPILE;
2088 break;
2089 }
2090 goto duplicate_instr;
2091
2092 default:
2093 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW | DISOPTYPE_PRIVILEGED_NOTRAP))
2094 {
2095gen_illegal_instr:
2096 rc = patmPatchGenIllegalInstr(pVM, pPatch);
2097 if (RT_SUCCESS(rc))
2098 rc = VINF_SUCCESS; /* exit point by definition */
2099 }
2100 else
2101 {
2102duplicate_instr:
2103 Log(("patmPatchGenDuplicate\n"));
2104 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2105 if (RT_SUCCESS(rc))
2106 rc = VWRN_CONTINUE_RECOMPILE;
2107 }
2108 break;
2109 }
2110
2111end:
2112
2113 if ( !fInhibitIRQInstr
2114 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2115 {
2116 int rc2;
2117 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2118
2119 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2120 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
2121 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
2122 {
2123 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
2124
2125 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2126 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
2127 rc = VINF_SUCCESS; /* end of the line */
2128 }
2129 else
2130 {
2131 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
2132 }
2133 if (RT_FAILURE(rc2))
2134 rc = rc2;
2135 }
2136
2137 if (RT_SUCCESS(rc))
2138 {
2139 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
2140 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
2141 && pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32
2142 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
2143 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
2144 )
2145 {
2146 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2147
2148 // The end marker for this kind of patch is any instruction at a location outside our patch jump
2149 Log(("patmRecompileCallback: end found for single instruction patch at %RRv cbInstr %d\n", pNextInstrGC, pCpu->cbInstr));
2150
2151 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2152 AssertRC(rc);
2153 }
2154 }
2155 return rc;
2156}
2157
2158
2159#ifdef LOG_ENABLED
2160
2161/**
2162 * Add a disasm jump record (temporary for prevent duplicate analysis)
2163 *
2164 * @param pVM Pointer to the VM.
2165 * @param pPatch Patch structure ptr
2166 * @param pInstrGC Guest context pointer to privileged instruction
2167 *
2168 */
2169static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2170{
2171 PAVLPVNODECORE pRec;
2172
2173 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2174 Assert(pRec);
2175 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2176
2177 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2178 Assert(ret);
2179}
2180
2181/**
2182 * Checks if jump target has been analysed before.
2183 *
2184 * @returns VBox status code.
2185 * @param pPatch Patch struct
2186 * @param pInstrGC Jump target
2187 *
2188 */
2189static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2190{
2191 PAVLPVNODECORE pRec;
2192
2193 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2194 if (pRec)
2195 return true;
2196 return false;
2197}
2198
2199/**
2200 * For proper disassembly of the final patch block
2201 *
2202 * @returns VBox status code.
2203 * @param pVM Pointer to the VM.
2204 * @param pCpu CPU disassembly state
2205 * @param pInstrGC Guest context pointer to privileged instruction
2206 * @param pCurInstrGC Guest context pointer to the current instruction
2207 * @param pCacheRec Cache record ptr
2208 *
2209 */
2210int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2211{
2212 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2213 NOREF(pInstrGC);
2214
2215 if (pCpu->pCurInstr->uOpcode == OP_INT3)
2216 {
2217 /* Could be an int3 inserted in a call patch. Check to be sure */
2218 DISCPUSTATE cpu;
2219 RTRCPTR pOrgJumpGC;
2220
2221 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2222
2223 { /* Force pOrgJumpHC out of scope after using it */
2224 uint8_t *pOrgJumpHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2225
2226 bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL);
2227 if (!disret || cpu.pCurInstr->uOpcode != OP_CALL || cpu.Param1.cb != 4 /* only near calls */)
2228 return VINF_SUCCESS;
2229 }
2230 return VWRN_CONTINUE_ANALYSIS;
2231 }
2232
2233 if ( pCpu->pCurInstr->uOpcode == OP_ILLUD2
2234 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2235 {
2236 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2237 return VWRN_CONTINUE_ANALYSIS;
2238 }
2239
2240 if ( (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2241 || pCpu->pCurInstr->uOpcode == OP_INT
2242 || pCpu->pCurInstr->uOpcode == OP_IRET
2243 || pCpu->pCurInstr->uOpcode == OP_RETN
2244 || pCpu->pCurInstr->uOpcode == OP_RETF
2245 )
2246 {
2247 return VINF_SUCCESS;
2248 }
2249
2250 if (pCpu->pCurInstr->uOpcode == OP_ILLUD2)
2251 return VINF_SUCCESS;
2252
2253 return VWRN_CONTINUE_ANALYSIS;
2254}
2255
2256
2257/**
2258 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2259 *
2260 * @returns VBox status code.
2261 * @param pVM Pointer to the VM.
2262 * @param pInstrGC Guest context pointer to the initial privileged instruction
2263 * @param pCurInstrGC Guest context pointer to the current instruction
2264 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2265 * @param pCacheRec Cache record ptr
2266 *
2267 */
2268int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2269{
2270 DISCPUSTATE cpu;
2271 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2272 int rc = VWRN_CONTINUE_ANALYSIS;
2273 uint32_t cbInstr, delta;
2274 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2275 bool disret;
2276 char szOutput[256];
2277
2278 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2279
2280 /* We need this to determine branch targets (and for disassembling). */
2281 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2282
2283 while (rc == VWRN_CONTINUE_ANALYSIS)
2284 {
2285 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2286 if (pCurInstrHC == NULL)
2287 {
2288 rc = VERR_PATCHING_REFUSED;
2289 goto end;
2290 }
2291
2292 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_RAWCODE,
2293 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2294 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2295 {
2296 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2297
2298 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2299 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2300 else
2301 Log(("DIS %s", szOutput));
2302
2303 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2304 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2305 {
2306 rc = VINF_SUCCESS;
2307 goto end;
2308 }
2309 }
2310 else
2311 Log(("DIS: %s", szOutput));
2312
2313 if (disret == false)
2314 {
2315 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2316 rc = VINF_SUCCESS;
2317 goto end;
2318 }
2319
2320 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2321 if (rc != VWRN_CONTINUE_ANALYSIS) {
2322 break; //done!
2323 }
2324
2325 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2326 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2327 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2328 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2329 )
2330 {
2331 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2332 RTRCPTR pOrgTargetGC;
2333
2334 if (pTargetGC == 0)
2335 {
2336 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2337 rc = VERR_PATCHING_REFUSED;
2338 break;
2339 }
2340
2341 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2342 {
2343 //jump back to guest code
2344 rc = VINF_SUCCESS;
2345 goto end;
2346 }
2347 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2348
2349 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2350 {
2351 rc = VINF_SUCCESS;
2352 goto end;
2353 }
2354
2355 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2356 {
2357 /* New jump, let's check it. */
2358 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2359
2360 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2361 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2362 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2363
2364 if (rc != VINF_SUCCESS) {
2365 break; //done!
2366 }
2367 }
2368 if (cpu.pCurInstr->uOpcode == OP_JMP)
2369 {
2370 /* Unconditional jump; return to caller. */
2371 rc = VINF_SUCCESS;
2372 goto end;
2373 }
2374
2375 rc = VWRN_CONTINUE_ANALYSIS;
2376 }
2377 pCurInstrGC += cbInstr;
2378 }
2379end:
2380 return rc;
2381}
2382
2383/**
2384 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2385 *
2386 * @returns VBox status code.
2387 * @param pVM Pointer to the VM.
2388 * @param pInstrGC Guest context pointer to the initial privileged instruction
2389 * @param pCurInstrGC Guest context pointer to the current instruction
2390 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2391 * @param pCacheRec Cache record ptr
2392 *
2393 */
2394int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2395{
2396 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2397
2398 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2399 /* Free all disasm jump records. */
2400 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2401 return rc;
2402}
2403
2404#endif /* LOG_ENABLED */
2405
2406/**
2407 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2408 * If so, this patch is permanently disabled.
2409 *
2410 * @param pVM Pointer to the VM.
2411 * @param pInstrGC Guest context pointer to instruction
2412 * @param pConflictGC Guest context pointer to check
2413 *
2414 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2415 *
2416 */
2417VMMR3_INT_DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2418{
2419 AssertReturn(!HMIsEnabled(pVM), VERR_PATCH_NO_CONFLICT);
2420 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2421 if (pTargetPatch)
2422 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2423 return VERR_PATCH_NO_CONFLICT;
2424}
2425
2426/**
2427 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2428 *
2429 * @returns VBox status code.
2430 * @param pVM Pointer to the VM.
2431 * @param pInstrGC Guest context pointer to privileged instruction
2432 * @param pCurInstrGC Guest context pointer to the current instruction
2433 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2434 * @param pCacheRec Cache record ptr
2435 *
2436 */
2437static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2438{
2439 DISCPUSTATE cpu;
2440 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2441 int rc = VWRN_CONTINUE_ANALYSIS;
2442 uint32_t cbInstr;
2443 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2444 bool disret;
2445#ifdef LOG_ENABLED
2446 char szOutput[256];
2447#endif
2448
2449 while (rc == VWRN_CONTINUE_RECOMPILE)
2450 {
2451 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2452 if (pCurInstrHC == NULL)
2453 {
2454 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2455 goto end;
2456 }
2457#ifdef LOG_ENABLED
2458 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE,
2459 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2460 Log(("Recompile: %s", szOutput));
2461#else
2462 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
2463#endif
2464 if (disret == false)
2465 {
2466 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2467
2468 /* Add lookup record for patch to guest address translation */
2469 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2470 patmPatchGenIllegalInstr(pVM, pPatch);
2471 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2472 goto end;
2473 }
2474
2475 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2476 if (rc != VWRN_CONTINUE_RECOMPILE)
2477 {
2478 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2479 if ( rc == VINF_SUCCESS
2480 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2481 {
2482 DISCPUSTATE cpunext;
2483 uint32_t opsizenext;
2484 uint8_t *pNextInstrHC;
2485 RTRCPTR pNextInstrGC = pCurInstrGC + cbInstr;
2486
2487 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2488
2489 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2490 * Recompile the next instruction as well
2491 */
2492 pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2493 if (pNextInstrHC == NULL)
2494 {
2495 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2496 goto end;
2497 }
2498 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpunext, &opsizenext);
2499 if (disret == false)
2500 {
2501 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2502 goto end;
2503 }
2504 switch(cpunext.pCurInstr->uOpcode)
2505 {
2506 case OP_IRET: /* inhibit cleared in generated code */
2507 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2508 case OP_HLT:
2509 break; /* recompile these */
2510
2511 default:
2512 if (cpunext.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2513 {
2514 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2515
2516 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2517 AssertRC(rc);
2518 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2519 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2520 }
2521 break;
2522 }
2523
2524 /* Note: after a cli we must continue to a proper exit point */
2525 if (cpunext.pCurInstr->uOpcode != OP_CLI)
2526 {
2527 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2528 if (RT_SUCCESS(rc))
2529 {
2530 rc = VINF_SUCCESS;
2531 goto end;
2532 }
2533 break;
2534 }
2535 else
2536 rc = VWRN_CONTINUE_RECOMPILE;
2537 }
2538 else
2539 break; /* done! */
2540 }
2541
2542 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2543
2544
2545 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2546 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2547 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2548 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2549 )
2550 {
2551 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2552 if (addr == 0)
2553 {
2554 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2555 rc = VERR_PATCHING_REFUSED;
2556 break;
2557 }
2558
2559 Log(("Jump encountered target %RRv\n", addr));
2560
2561 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2562 if (!(cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW))
2563 {
2564 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2565 /* First we need to finish this linear code stream until the next exit point. */
2566 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+cbInstr, pfnPATMR3Recompile, pCacheRec);
2567 if (RT_FAILURE(rc))
2568 {
2569 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2570 break; //fatal error
2571 }
2572 }
2573
2574 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2575 {
2576 /* New code; let's recompile it. */
2577 Log(("patmRecompileCodeStream continue with jump\n"));
2578
2579 /*
2580 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2581 * this patch so we can continue our analysis
2582 *
2583 * We rely on CSAM to detect and resolve conflicts
2584 */
2585 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, addr);
2586 if(pTargetPatch)
2587 {
2588 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2589 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2590 }
2591
2592 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2593 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2594 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2595
2596 if(pTargetPatch)
2597 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2598
2599 if (RT_FAILURE(rc))
2600 {
2601 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2602 break; //done!
2603 }
2604 }
2605 /* Always return to caller here; we're done! */
2606 rc = VINF_SUCCESS;
2607 goto end;
2608 }
2609 else
2610 if (cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW)
2611 {
2612 rc = VINF_SUCCESS;
2613 goto end;
2614 }
2615 pCurInstrGC += cbInstr;
2616 }
2617end:
2618 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2619 return rc;
2620}
2621
2622
2623/**
2624 * Generate the jump from guest to patch code
2625 *
2626 * @returns VBox status code.
2627 * @param pVM Pointer to the VM.
2628 * @param pPatch Patch record
2629 * @param pCacheRec Guest translation lookup cache record
2630 */
2631static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2632{
2633 uint8_t temp[8];
2634 uint8_t *pPB;
2635 int rc;
2636
2637 Assert(pPatch->cbPatchJump <= sizeof(temp));
2638 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2639
2640 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2641 Assert(pPB);
2642
2643#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2644 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2645 {
2646 Assert(pPatch->pPatchJumpDestGC);
2647
2648 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2649 {
2650 // jmp [PatchCode]
2651 if (fAddFixup)
2652 {
2653 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2654 {
2655 Log(("Relocation failed for the jump in the guest code!!\n"));
2656 return VERR_PATCHING_REFUSED;
2657 }
2658 }
2659
2660 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2661 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2662 }
2663 else
2664 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2665 {
2666 // jmp [PatchCode]
2667 if (fAddFixup)
2668 {
2669 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2670 {
2671 Log(("Relocation failed for the jump in the guest code!!\n"));
2672 return VERR_PATCHING_REFUSED;
2673 }
2674 }
2675
2676 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2677 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2678 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2679 }
2680 else
2681 {
2682 Assert(0);
2683 return VERR_PATCHING_REFUSED;
2684 }
2685 }
2686 else
2687#endif
2688 {
2689 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2690
2691 // jmp [PatchCode]
2692 if (fAddFixup)
2693 {
2694 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2695 {
2696 Log(("Relocation failed for the jump in the guest code!!\n"));
2697 return VERR_PATCHING_REFUSED;
2698 }
2699 }
2700 temp[0] = 0xE9; //jmp
2701 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2702 }
2703 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2704 AssertRC(rc);
2705
2706 if (rc == VINF_SUCCESS)
2707 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2708
2709 return rc;
2710}
2711
2712/**
2713 * Remove the jump from guest to patch code
2714 *
2715 * @returns VBox status code.
2716 * @param pVM Pointer to the VM.
2717 * @param pPatch Patch record
2718 */
2719static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2720{
2721#ifdef DEBUG
2722 DISCPUSTATE cpu;
2723 char szOutput[256];
2724 uint32_t cbInstr, i = 0;
2725 bool disret;
2726
2727 while (i < pPatch->cbPrivInstr)
2728 {
2729 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2730 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2731 if (disret == false)
2732 break;
2733
2734 Log(("Org patch jump: %s", szOutput));
2735 Assert(cbInstr);
2736 i += cbInstr;
2737 }
2738#endif
2739
2740 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2741 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2742#ifdef DEBUG
2743 if (rc == VINF_SUCCESS)
2744 {
2745 i = 0;
2746 while (i < pPatch->cbPrivInstr)
2747 {
2748 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2749 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2750 if (disret == false)
2751 break;
2752
2753 Log(("Org instr: %s", szOutput));
2754 Assert(cbInstr);
2755 i += cbInstr;
2756 }
2757 }
2758#endif
2759 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2760 return rc;
2761}
2762
2763/**
2764 * Generate the call from guest to patch code
2765 *
2766 * @returns VBox status code.
2767 * @param pVM Pointer to the VM.
2768 * @param pPatch Patch record
2769 * @param pInstrHC HC address where to insert the jump
2770 * @param pCacheRec Guest translation cache record
2771 */
2772static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2773{
2774 uint8_t temp[8];
2775 uint8_t *pPB;
2776 int rc;
2777
2778 Assert(pPatch->cbPatchJump <= sizeof(temp));
2779
2780 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2781 Assert(pPB);
2782
2783 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2784
2785 // jmp [PatchCode]
2786 if (fAddFixup)
2787 {
2788 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2789 {
2790 Log(("Relocation failed for the jump in the guest code!!\n"));
2791 return VERR_PATCHING_REFUSED;
2792 }
2793 }
2794
2795 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2796 temp[0] = pPatch->aPrivInstr[0];
2797 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2798
2799 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2800 AssertRC(rc);
2801
2802 return rc;
2803}
2804
2805
2806/**
2807 * Patch cli/sti pushf/popf instruction block at specified location
2808 *
2809 * @returns VBox status code.
2810 * @param pVM Pointer to the VM.
2811 * @param pInstrGC Guest context point to privileged instruction
2812 * @param pInstrHC Host context point to privileged instruction
2813 * @param uOpcode Instruction opcode
2814 * @param uOpSize Size of starting instruction
2815 * @param pPatchRec Patch record
2816 *
2817 * @note returns failure if patching is not allowed or possible
2818 *
2819 */
2820static int patmR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2821 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2822{
2823 PPATCHINFO pPatch = &pPatchRec->patch;
2824 int rc = VERR_PATCHING_REFUSED;
2825 uint32_t orgOffsetPatchMem = ~0;
2826 RTRCPTR pInstrStart;
2827 bool fInserted;
2828 NOREF(pInstrHC); NOREF(uOpSize);
2829
2830 /* Save original offset (in case of failures later on) */
2831 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2832 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2833
2834 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2835 switch (uOpcode)
2836 {
2837 case OP_MOV:
2838 break;
2839
2840 case OP_CLI:
2841 case OP_PUSHF:
2842 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2843 /* Note: special precautions are taken when disabling and enabling such patches. */
2844 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2845 break;
2846
2847 default:
2848 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2849 {
2850 AssertMsg(0, ("patmR3PatchBlock: Invalid opcode %x\n", uOpcode));
2851 return VERR_INVALID_PARAMETER;
2852 }
2853 }
2854
2855 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2856 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2857
2858 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2859 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2860 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2861 )
2862 {
2863 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2864 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2865 rc = VERR_PATCHING_REFUSED;
2866 goto failure;
2867 }
2868
2869 pPatch->nrPatch2GuestRecs = 0;
2870 pInstrStart = pInstrGC;
2871
2872#ifdef PATM_ENABLE_CALL
2873 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2874#endif
2875
2876 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2877 pPatch->uCurPatchOffset = 0;
2878
2879 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2880 {
2881 Assert(pPatch->flags & PATMFL_INTHANDLER);
2882
2883 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2884 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2885 if (RT_FAILURE(rc))
2886 goto failure;
2887 }
2888
2889 /***************************************************************************************************************************/
2890 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2891 /***************************************************************************************************************************/
2892#ifdef VBOX_WITH_STATISTICS
2893 if (!(pPatch->flags & PATMFL_SYSENTER))
2894 {
2895 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2896 if (RT_FAILURE(rc))
2897 goto failure;
2898 }
2899#endif
2900
2901 PATMP2GLOOKUPREC cacheRec;
2902 RT_ZERO(cacheRec);
2903 cacheRec.pPatch = pPatch;
2904
2905 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2906 /* Free leftover lock if any. */
2907 if (cacheRec.Lock.pvMap)
2908 {
2909 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2910 cacheRec.Lock.pvMap = NULL;
2911 }
2912 if (rc != VINF_SUCCESS)
2913 {
2914 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2915 goto failure;
2916 }
2917
2918 /* Calculated during analysis. */
2919 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2920 {
2921 /* Most likely cause: we encountered an illegal instruction very early on. */
2922 /** @todo could turn it into an int3 callable patch. */
2923 Log(("patmR3PatchBlock: patch block too small -> refuse\n"));
2924 rc = VERR_PATCHING_REFUSED;
2925 goto failure;
2926 }
2927
2928 /* size of patch block */
2929 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2930
2931
2932 /* Update free pointer in patch memory. */
2933 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2934 /* Round to next 8 byte boundary. */
2935 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2936
2937 /*
2938 * Insert into patch to guest lookup tree
2939 */
2940 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2941 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2942 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2943 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2944 if (!fInserted)
2945 {
2946 rc = VERR_PATCHING_REFUSED;
2947 goto failure;
2948 }
2949
2950 /* Note that patmr3SetBranchTargets can install additional patches!! */
2951 rc = patmr3SetBranchTargets(pVM, pPatch);
2952 if (rc != VINF_SUCCESS)
2953 {
2954 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2955 goto failure;
2956 }
2957
2958#ifdef LOG_ENABLED
2959 Log(("Patch code ----------------------------------------------------------\n"));
2960 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2961 /* Free leftover lock if any. */
2962 if (cacheRec.Lock.pvMap)
2963 {
2964 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2965 cacheRec.Lock.pvMap = NULL;
2966 }
2967 Log(("Patch code ends -----------------------------------------------------\n"));
2968#endif
2969
2970 /* make a copy of the guest code bytes that will be overwritten */
2971 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2972
2973 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2974 AssertRC(rc);
2975
2976 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2977 {
2978 /*uint8_t bASMInt3 = 0xCC; - unused */
2979
2980 Log(("patmR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2981 /* Replace first opcode byte with 'int 3'. */
2982 rc = patmActivateInt3Patch(pVM, pPatch);
2983 if (RT_FAILURE(rc))
2984 goto failure;
2985
2986 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2987 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2988
2989 pPatch->flags &= ~PATMFL_INSTR_HINT;
2990 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2991 }
2992 else
2993 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2994 {
2995 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2996 /* now insert a jump in the guest code */
2997 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2998 AssertRC(rc);
2999 if (RT_FAILURE(rc))
3000 goto failure;
3001
3002 }
3003
3004 patmR3DbgAddPatch(pVM, pPatchRec);
3005
3006 PATM_LOG_RAW_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
3007
3008 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3009 pPatch->pTempInfo->nrIllegalInstr = 0;
3010
3011 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3012
3013 pPatch->uState = PATCH_ENABLED;
3014 return VINF_SUCCESS;
3015
3016failure:
3017 if (pPatchRec->CoreOffset.Key)
3018 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3019
3020 patmEmptyTree(pVM, &pPatch->FixupTree);
3021 pPatch->nrFixups = 0;
3022
3023 patmEmptyTree(pVM, &pPatch->JumpTree);
3024 pPatch->nrJumpRecs = 0;
3025
3026 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3027 pPatch->pTempInfo->nrIllegalInstr = 0;
3028
3029 /* Turn this cli patch into a dummy. */
3030 pPatch->uState = PATCH_REFUSED;
3031 pPatch->pPatchBlockOffset = 0;
3032
3033 // Give back the patch memory we no longer need
3034 Assert(orgOffsetPatchMem != (uint32_t)~0);
3035 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3036
3037 return rc;
3038}
3039
3040/**
3041 * Patch IDT handler
3042 *
3043 * @returns VBox status code.
3044 * @param pVM Pointer to the VM.
3045 * @param pInstrGC Guest context point to privileged instruction
3046 * @param uOpSize Size of starting instruction
3047 * @param pPatchRec Patch record
3048 * @param pCacheRec Cache record ptr
3049 *
3050 * @note returns failure if patching is not allowed or possible
3051 *
3052 */
3053static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3054{
3055 PPATCHINFO pPatch = &pPatchRec->patch;
3056 bool disret;
3057 DISCPUSTATE cpuPush, cpuJmp;
3058 uint32_t cbInstr;
3059 RTRCPTR pCurInstrGC = pInstrGC;
3060 uint8_t *pCurInstrHC, *pInstrHC;
3061 uint32_t orgOffsetPatchMem = ~0;
3062
3063 pInstrHC = pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
3064 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
3065
3066 /*
3067 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
3068 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
3069 * condition here and only patch the common entypoint once.
3070 */
3071 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &cbInstr);
3072 Assert(disret);
3073 if (disret && cpuPush.pCurInstr->uOpcode == OP_PUSH)
3074 {
3075 RTRCPTR pJmpInstrGC;
3076 int rc;
3077 pCurInstrGC += cbInstr;
3078
3079 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuJmp, &cbInstr);
3080 if ( disret
3081 && cpuJmp.pCurInstr->uOpcode == OP_JMP
3082 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
3083 )
3084 {
3085 bool fInserted;
3086 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3087 if (pJmpPatch == 0)
3088 {
3089 /* Patch it first! */
3090 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
3091 if (rc != VINF_SUCCESS)
3092 goto failure;
3093 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3094 Assert(pJmpPatch);
3095 }
3096 if (pJmpPatch->patch.uState != PATCH_ENABLED)
3097 goto failure;
3098
3099 /* save original offset (in case of failures later on) */
3100 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3101
3102 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3103 pPatch->uCurPatchOffset = 0;
3104 pPatch->nrPatch2GuestRecs = 0;
3105
3106#ifdef VBOX_WITH_STATISTICS
3107 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3108 if (RT_FAILURE(rc))
3109 goto failure;
3110#endif
3111
3112 /* Install fake cli patch (to clear the virtual IF) */
3113 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
3114 if (RT_FAILURE(rc))
3115 goto failure;
3116
3117 /* Add lookup record for patch to guest address translation (for the push) */
3118 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
3119
3120 /* Duplicate push. */
3121 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
3122 if (RT_FAILURE(rc))
3123 goto failure;
3124
3125 /* Generate jump to common entrypoint. */
3126 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
3127 if (RT_FAILURE(rc))
3128 goto failure;
3129
3130 /* size of patch block */
3131 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3132
3133 /* Update free pointer in patch memory. */
3134 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3135 /* Round to next 8 byte boundary */
3136 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3137
3138 /* There's no jump from guest to patch code. */
3139 pPatch->cbPatchJump = 0;
3140
3141
3142#ifdef LOG_ENABLED
3143 Log(("Patch code ----------------------------------------------------------\n"));
3144 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3145 Log(("Patch code ends -----------------------------------------------------\n"));
3146#endif
3147 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3148
3149 /*
3150 * Insert into patch to guest lookup tree
3151 */
3152 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3153 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3154 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3155 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3156 patmR3DbgAddPatch(pVM, pPatchRec);
3157
3158 pPatch->uState = PATCH_ENABLED;
3159
3160 return VINF_SUCCESS;
3161 }
3162 }
3163failure:
3164 /* Give back the patch memory we no longer need */
3165 if (orgOffsetPatchMem != (uint32_t)~0)
3166 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3167
3168 return patmR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3169}
3170
3171/**
3172 * Install a trampoline to call a guest trap handler directly
3173 *
3174 * @returns VBox status code.
3175 * @param pVM Pointer to the VM.
3176 * @param pInstrGC Guest context point to privileged instruction
3177 * @param pPatchRec Patch record
3178 * @param pCacheRec Cache record ptr
3179 *
3180 */
3181static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3182{
3183 PPATCHINFO pPatch = &pPatchRec->patch;
3184 int rc = VERR_PATCHING_REFUSED;
3185 uint32_t orgOffsetPatchMem = ~0;
3186 bool fInserted;
3187
3188 // save original offset (in case of failures later on)
3189 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3190
3191 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3192 pPatch->uCurPatchOffset = 0;
3193 pPatch->nrPatch2GuestRecs = 0;
3194
3195#ifdef VBOX_WITH_STATISTICS
3196 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3197 if (RT_FAILURE(rc))
3198 goto failure;
3199#endif
3200
3201 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3202 if (RT_FAILURE(rc))
3203 goto failure;
3204
3205 /* size of patch block */
3206 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3207
3208 /* Update free pointer in patch memory. */
3209 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3210 /* Round to next 8 byte boundary */
3211 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3212
3213 /* There's no jump from guest to patch code. */
3214 pPatch->cbPatchJump = 0;
3215
3216#ifdef LOG_ENABLED
3217 Log(("Patch code ----------------------------------------------------------\n"));
3218 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3219 Log(("Patch code ends -----------------------------------------------------\n"));
3220#endif
3221 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "TRAP handler");
3222 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3223
3224 /*
3225 * Insert into patch to guest lookup tree
3226 */
3227 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3228 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3229 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3230 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3231 patmR3DbgAddPatch(pVM, pPatchRec);
3232
3233 pPatch->uState = PATCH_ENABLED;
3234 return VINF_SUCCESS;
3235
3236failure:
3237 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3238
3239 /* Turn this cli patch into a dummy. */
3240 pPatch->uState = PATCH_REFUSED;
3241 pPatch->pPatchBlockOffset = 0;
3242
3243 /* Give back the patch memory we no longer need */
3244 Assert(orgOffsetPatchMem != (uint32_t)~0);
3245 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3246
3247 return rc;
3248}
3249
3250
3251#ifdef LOG_ENABLED
3252/**
3253 * Check if the instruction is patched as a common idt handler
3254 *
3255 * @returns true or false
3256 * @param pVM Pointer to the VM.
3257 * @param pInstrGC Guest context point to the instruction
3258 *
3259 */
3260static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3261{
3262 PPATMPATCHREC pRec;
3263
3264 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3265 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3266 return true;
3267 return false;
3268}
3269#endif //DEBUG
3270
3271
3272/**
3273 * Duplicates a complete function
3274 *
3275 * @returns VBox status code.
3276 * @param pVM Pointer to the VM.
3277 * @param pInstrGC Guest context point to privileged instruction
3278 * @param pPatchRec Patch record
3279 * @param pCacheRec Cache record ptr
3280 *
3281 */
3282static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3283{
3284 PPATCHINFO pPatch = &pPatchRec->patch;
3285 int rc = VERR_PATCHING_REFUSED;
3286 uint32_t orgOffsetPatchMem = ~0;
3287 bool fInserted;
3288
3289 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3290 /* Save original offset (in case of failures later on). */
3291 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3292
3293 /* We will not go on indefinitely with call instruction handling. */
3294 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3295 {
3296 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3297 return VERR_PATCHING_REFUSED;
3298 }
3299
3300 pVM->patm.s.ulCallDepth++;
3301
3302#ifdef PATM_ENABLE_CALL
3303 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3304#endif
3305
3306 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3307
3308 pPatch->nrPatch2GuestRecs = 0;
3309 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3310 pPatch->uCurPatchOffset = 0;
3311
3312 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3313 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3314 if (RT_FAILURE(rc))
3315 goto failure;
3316
3317#ifdef VBOX_WITH_STATISTICS
3318 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3319 if (RT_FAILURE(rc))
3320 goto failure;
3321#endif
3322
3323 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3324 if (rc != VINF_SUCCESS)
3325 {
3326 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3327 goto failure;
3328 }
3329
3330 //size of patch block
3331 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3332
3333 //update free pointer in patch memory
3334 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3335 /* Round to next 8 byte boundary. */
3336 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3337
3338 pPatch->uState = PATCH_ENABLED;
3339
3340 /*
3341 * Insert into patch to guest lookup tree
3342 */
3343 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3344 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3345 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3346 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3347 if (!fInserted)
3348 {
3349 rc = VERR_PATCHING_REFUSED;
3350 goto failure;
3351 }
3352
3353 /* Note that patmr3SetBranchTargets can install additional patches!! */
3354 rc = patmr3SetBranchTargets(pVM, pPatch);
3355 if (rc != VINF_SUCCESS)
3356 {
3357 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3358 goto failure;
3359 }
3360
3361 patmR3DbgAddPatch(pVM, pPatchRec);
3362
3363#ifdef LOG_ENABLED
3364 Log(("Patch code ----------------------------------------------------------\n"));
3365 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3366 Log(("Patch code ends -----------------------------------------------------\n"));
3367#endif
3368
3369 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3370
3371 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3372 pPatch->pTempInfo->nrIllegalInstr = 0;
3373
3374 pVM->patm.s.ulCallDepth--;
3375 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3376 return VINF_SUCCESS;
3377
3378failure:
3379 if (pPatchRec->CoreOffset.Key)
3380 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3381
3382 patmEmptyTree(pVM, &pPatch->FixupTree);
3383 pPatch->nrFixups = 0;
3384
3385 patmEmptyTree(pVM, &pPatch->JumpTree);
3386 pPatch->nrJumpRecs = 0;
3387
3388 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3389 pPatch->pTempInfo->nrIllegalInstr = 0;
3390
3391 /* Turn this cli patch into a dummy. */
3392 pPatch->uState = PATCH_REFUSED;
3393 pPatch->pPatchBlockOffset = 0;
3394
3395 // Give back the patch memory we no longer need
3396 Assert(orgOffsetPatchMem != (uint32_t)~0);
3397 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3398
3399 pVM->patm.s.ulCallDepth--;
3400 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3401 return rc;
3402}
3403
3404/**
3405 * Creates trampoline code to jump inside an existing patch
3406 *
3407 * @returns VBox status code.
3408 * @param pVM Pointer to the VM.
3409 * @param pInstrGC Guest context point to privileged instruction
3410 * @param pPatchRec Patch record
3411 *
3412 */
3413static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3414{
3415 PPATCHINFO pPatch = &pPatchRec->patch;
3416 RTRCPTR pPage, pPatchTargetGC = 0;
3417 uint32_t orgOffsetPatchMem = ~0;
3418 int rc = VERR_PATCHING_REFUSED;
3419 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3420 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3421 bool fInserted = false;
3422
3423 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3424 /* Save original offset (in case of failures later on). */
3425 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3426
3427 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3428 /** @todo we already checked this before */
3429 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3430
3431 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3432 if (pPatchPage)
3433 {
3434 uint32_t i;
3435
3436 for (i=0;i<pPatchPage->cCount;i++)
3437 {
3438 if (pPatchPage->papPatch[i])
3439 {
3440 pPatchToJmp = pPatchPage->papPatch[i];
3441
3442 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3443 && pPatchToJmp->uState == PATCH_ENABLED)
3444 {
3445 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3446 if (pPatchTargetGC)
3447 {
3448 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3449 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3450 Assert(pPatchToGuestRec);
3451
3452 pPatchToGuestRec->fJumpTarget = true;
3453 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3454 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3455 break;
3456 }
3457 }
3458 }
3459 }
3460 }
3461 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3462
3463 /*
3464 * Only record the trampoline patch if this is the first patch to the target
3465 * or we recorded other patches already.
3466 * The goal is to refuse refreshing function duplicates if the guest
3467 * modifies code after a saved state was loaded because it is not possible
3468 * to save the relation between trampoline and target without changing the
3469 * saved satte version.
3470 */
3471 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3472 || pPatchToJmp->pTrampolinePatchesHead)
3473 {
3474 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3475 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3476 if (!pTrampRec)
3477 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3478
3479 pTrampRec->pPatchTrampoline = pPatchRec;
3480 }
3481
3482 pPatch->nrPatch2GuestRecs = 0;
3483 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3484 pPatch->uCurPatchOffset = 0;
3485
3486 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3487 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3488 if (RT_FAILURE(rc))
3489 goto failure;
3490
3491#ifdef VBOX_WITH_STATISTICS
3492 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3493 if (RT_FAILURE(rc))
3494 goto failure;
3495#endif
3496
3497 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3498 if (RT_FAILURE(rc))
3499 goto failure;
3500
3501 /*
3502 * Insert into patch to guest lookup tree
3503 */
3504 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3505 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3506 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3507 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3508 if (!fInserted)
3509 {
3510 rc = VERR_PATCHING_REFUSED;
3511 goto failure;
3512 }
3513 patmR3DbgAddPatch(pVM, pPatchRec);
3514
3515 /* size of patch block */
3516 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3517
3518 /* Update free pointer in patch memory. */
3519 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3520 /* Round to next 8 byte boundary */
3521 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3522
3523 /* There's no jump from guest to patch code. */
3524 pPatch->cbPatchJump = 0;
3525
3526 /* Enable the patch. */
3527 pPatch->uState = PATCH_ENABLED;
3528 /* We allow this patch to be called as a function. */
3529 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3530
3531 if (pTrampRec)
3532 {
3533 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3534 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3535 }
3536 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3537 return VINF_SUCCESS;
3538
3539failure:
3540 if (pPatchRec->CoreOffset.Key)
3541 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3542
3543 patmEmptyTree(pVM, &pPatch->FixupTree);
3544 pPatch->nrFixups = 0;
3545
3546 patmEmptyTree(pVM, &pPatch->JumpTree);
3547 pPatch->nrJumpRecs = 0;
3548
3549 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3550 pPatch->pTempInfo->nrIllegalInstr = 0;
3551
3552 /* Turn this cli patch into a dummy. */
3553 pPatch->uState = PATCH_REFUSED;
3554 pPatch->pPatchBlockOffset = 0;
3555
3556 // Give back the patch memory we no longer need
3557 Assert(orgOffsetPatchMem != (uint32_t)~0);
3558 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3559
3560 if (pTrampRec)
3561 MMR3HeapFree(pTrampRec);
3562
3563 return rc;
3564}
3565
3566
3567/**
3568 * Patch branch target function for call/jump at specified location.
3569 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3570 *
3571 * @returns VBox status code.
3572 * @param pVM Pointer to the VM.
3573 * @param pCtx Pointer to the guest CPU context.
3574 *
3575 */
3576VMMR3_INT_DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3577{
3578 RTRCPTR pBranchTarget, pPage;
3579 int rc;
3580 RTRCPTR pPatchTargetGC = 0;
3581 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
3582
3583 pBranchTarget = pCtx->edx;
3584 pBranchTarget = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3585
3586 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3587 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3588
3589 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3590 if (pPatchPage)
3591 {
3592 uint32_t i;
3593
3594 for (i=0;i<pPatchPage->cCount;i++)
3595 {
3596 if (pPatchPage->papPatch[i])
3597 {
3598 PPATCHINFO pPatch = pPatchPage->papPatch[i];
3599
3600 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3601 && pPatch->uState == PATCH_ENABLED)
3602 {
3603 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3604 if (pPatchTargetGC)
3605 {
3606 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3607 break;
3608 }
3609 }
3610 }
3611 }
3612 }
3613
3614 if (pPatchTargetGC)
3615 {
3616 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3617 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3618 }
3619 else
3620 {
3621 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3622 }
3623
3624 if (rc == VINF_SUCCESS)
3625 {
3626 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3627 Assert(pPatchTargetGC);
3628 }
3629
3630 if (pPatchTargetGC)
3631 {
3632 pCtx->eax = pPatchTargetGC;
3633 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3634 }
3635 else
3636 {
3637 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3638 pCtx->eax = 0;
3639 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3640 }
3641 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3642 rc = patmAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3643 AssertRC(rc);
3644
3645 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3646 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3647 return VINF_SUCCESS;
3648}
3649
3650/**
3651 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3652 *
3653 * @returns VBox status code.
3654 * @param pVM Pointer to the VM.
3655 * @param pCpu Disassembly CPU structure ptr
3656 * @param pInstrGC Guest context point to privileged instruction
3657 * @param pCacheRec Cache record ptr
3658 *
3659 */
3660static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3661{
3662 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3663 int rc = VERR_PATCHING_REFUSED;
3664 DISCPUSTATE cpu;
3665 RTRCPTR pTargetGC;
3666 PPATMPATCHREC pPatchFunction;
3667 uint32_t cbInstr;
3668 bool disret;
3669
3670 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3671 Assert((pCpu->pCurInstr->uOpcode == OP_CALL || pCpu->pCurInstr->uOpcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32);
3672
3673 if ((pCpu->pCurInstr->uOpcode != OP_CALL && pCpu->pCurInstr->uOpcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32)
3674 {
3675 rc = VERR_PATCHING_REFUSED;
3676 goto failure;
3677 }
3678
3679 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3680 if (pTargetGC == 0)
3681 {
3682 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
3683 rc = VERR_PATCHING_REFUSED;
3684 goto failure;
3685 }
3686
3687 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3688 if (pPatchFunction == NULL)
3689 {
3690 for(;;)
3691 {
3692 /* It could be an indirect call (call -> jmp dest).
3693 * Note that it's dangerous to assume the jump will never change...
3694 */
3695 uint8_t *pTmpInstrHC;
3696
3697 pTmpInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3698 Assert(pTmpInstrHC);
3699 if (pTmpInstrHC == 0)
3700 break;
3701
3702 disret = patmR3DisInstr(pVM, pPatch, pTargetGC, pTmpInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
3703 if (disret == false || cpu.pCurInstr->uOpcode != OP_JMP)
3704 break;
3705
3706 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3707 if (pTargetGC == 0)
3708 {
3709 break;
3710 }
3711
3712 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3713 break;
3714 }
3715 if (pPatchFunction == 0)
3716 {
3717 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3718 rc = VERR_PATCHING_REFUSED;
3719 goto failure;
3720 }
3721 }
3722
3723 // make a copy of the guest code bytes that will be overwritten
3724 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3725
3726 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3727 AssertRC(rc);
3728
3729 /* Now replace the original call in the guest code */
3730 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3731 AssertRC(rc);
3732 if (RT_FAILURE(rc))
3733 goto failure;
3734
3735 /* Lowest and highest address for write monitoring. */
3736 pPatch->pInstrGCLowest = pInstrGC;
3737 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3738 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "Call");
3739
3740 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3741
3742 pPatch->uState = PATCH_ENABLED;
3743 return VINF_SUCCESS;
3744
3745failure:
3746 /* Turn this patch into a dummy. */
3747 pPatch->uState = PATCH_REFUSED;
3748
3749 return rc;
3750}
3751
3752/**
3753 * Replace the address in an MMIO instruction with the cached version.
3754 *
3755 * @returns VBox status code.
3756 * @param pVM Pointer to the VM.
3757 * @param pInstrGC Guest context point to privileged instruction
3758 * @param pCpu Disassembly CPU structure ptr
3759 * @param pCacheRec Cache record ptr
3760 *
3761 * @note returns failure if patching is not allowed or possible
3762 *
3763 */
3764static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3765{
3766 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3767 uint8_t *pPB;
3768 int rc = VERR_PATCHING_REFUSED;
3769
3770 Assert(pVM->patm.s.mmio.pCachedData);
3771 if (!pVM->patm.s.mmio.pCachedData)
3772 goto failure;
3773
3774 if (pCpu->Param2.fUse != DISUSE_DISPLACEMENT32)
3775 goto failure;
3776
3777 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3778 if (pPB == 0)
3779 goto failure;
3780
3781 /* Add relocation record for cached data access. */
3782 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3783 {
3784 Log(("Relocation failed for cached mmio address!!\n"));
3785 return VERR_PATCHING_REFUSED;
3786 }
3787 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "MMIO patch old instruction:", "");
3788
3789 /* Save original instruction. */
3790 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3791 AssertRC(rc);
3792
3793 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3794
3795 /* Replace address with that of the cached item. */
3796 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3797 AssertRC(rc);
3798 if (RT_FAILURE(rc))
3799 {
3800 goto failure;
3801 }
3802
3803 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3804 pVM->patm.s.mmio.pCachedData = 0;
3805 pVM->patm.s.mmio.GCPhys = 0;
3806 pPatch->uState = PATCH_ENABLED;
3807 return VINF_SUCCESS;
3808
3809failure:
3810 /* Turn this patch into a dummy. */
3811 pPatch->uState = PATCH_REFUSED;
3812
3813 return rc;
3814}
3815
3816
3817/**
3818 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3819 *
3820 * @returns VBox status code.
3821 * @param pVM Pointer to the VM.
3822 * @param pInstrGC Guest context point to privileged instruction
3823 * @param pPatch Patch record
3824 *
3825 * @note returns failure if patching is not allowed or possible
3826 *
3827 */
3828static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3829{
3830 DISCPUSTATE cpu;
3831 uint32_t cbInstr;
3832 bool disret;
3833 uint8_t *pInstrHC;
3834
3835 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3836
3837 /* Convert GC to HC address. */
3838 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3839 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3840
3841 /* Disassemble mmio instruction. */
3842 disret = patmR3DisInstrNoStrOpMode(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE,
3843 &cpu, &cbInstr);
3844 if (disret == false)
3845 {
3846 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3847 return VERR_PATCHING_REFUSED;
3848 }
3849
3850 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
3851 if (cbInstr > MAX_INSTR_SIZE)
3852 return VERR_PATCHING_REFUSED;
3853 if (cpu.Param2.fUse != DISUSE_DISPLACEMENT32)
3854 return VERR_PATCHING_REFUSED;
3855
3856 /* Add relocation record for cached data access. */
3857 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3858 {
3859 Log(("Relocation failed for cached mmio address!!\n"));
3860 return VERR_PATCHING_REFUSED;
3861 }
3862 /* Replace address with that of the cached item. */
3863 *(RTRCPTR *)&pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3864
3865 /* Lowest and highest address for write monitoring. */
3866 pPatch->pInstrGCLowest = pInstrGC;
3867 pPatch->pInstrGCHighest = pInstrGC + cpu.cbInstr;
3868
3869 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3870 pVM->patm.s.mmio.pCachedData = 0;
3871 pVM->patm.s.mmio.GCPhys = 0;
3872 return VINF_SUCCESS;
3873}
3874
3875/**
3876 * Activates an int3 patch
3877 *
3878 * @returns VBox status code.
3879 * @param pVM Pointer to the VM.
3880 * @param pPatch Patch record
3881 */
3882static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3883{
3884 uint8_t bASMInt3 = 0xCC;
3885 int rc;
3886
3887 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3888 Assert(pPatch->uState != PATCH_ENABLED);
3889
3890 /* Replace first opcode byte with 'int 3'. */
3891 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
3892 AssertRC(rc);
3893
3894 pPatch->cbPatchJump = sizeof(bASMInt3);
3895
3896 return rc;
3897}
3898
3899/**
3900 * Deactivates an int3 patch
3901 *
3902 * @returns VBox status code.
3903 * @param pVM Pointer to the VM.
3904 * @param pPatch Patch record
3905 */
3906static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3907{
3908 uint8_t ASMInt3 = 0xCC;
3909 int rc;
3910
3911 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3912 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3913
3914 /* Restore first opcode byte. */
3915 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3916 AssertRC(rc);
3917 return rc;
3918}
3919
3920/**
3921 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically
3922 * in the raw-mode context.
3923 *
3924 * @returns VBox status code.
3925 * @param pVM Pointer to the VM.
3926 * @param pInstrGC Guest context point to privileged instruction
3927 * @param pInstrHC Host context point to privileged instruction
3928 * @param pCpu Disassembly CPU structure ptr
3929 * @param pPatch Patch record
3930 *
3931 * @note returns failure if patching is not allowed or possible
3932 *
3933 */
3934int patmR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3935{
3936 uint8_t bASMInt3 = 0xCC;
3937 int rc;
3938
3939 /* Note: Do not use patch memory here! It might called during patch installation too. */
3940 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "patmR3PatchInstrInt3:", "");
3941
3942 /* Save the original instruction. */
3943 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3944 AssertRC(rc);
3945 pPatch->cbPatchJump = sizeof(bASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3946
3947 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3948
3949 /* Replace first opcode byte with 'int 3'. */
3950 rc = patmActivateInt3Patch(pVM, pPatch);
3951 if (RT_FAILURE(rc))
3952 goto failure;
3953
3954 /* Lowest and highest address for write monitoring. */
3955 pPatch->pInstrGCLowest = pInstrGC;
3956 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3957
3958 pPatch->uState = PATCH_ENABLED;
3959 return VINF_SUCCESS;
3960
3961failure:
3962 /* Turn this patch into a dummy. */
3963 return VERR_PATCHING_REFUSED;
3964}
3965
3966#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3967/**
3968 * Patch a jump instruction at specified location
3969 *
3970 * @returns VBox status code.
3971 * @param pVM Pointer to the VM.
3972 * @param pInstrGC Guest context point to privileged instruction
3973 * @param pInstrHC Host context point to privileged instruction
3974 * @param pCpu Disassembly CPU structure ptr
3975 * @param pPatchRec Patch record
3976 *
3977 * @note returns failure if patching is not allowed or possible
3978 *
3979 */
3980int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3981{
3982 PPATCHINFO pPatch = &pPatchRec->patch;
3983 int rc = VERR_PATCHING_REFUSED;
3984
3985 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3986 pPatch->uCurPatchOffset = 0;
3987 pPatch->cbPatchBlockSize = 0;
3988 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3989
3990 /*
3991 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3992 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3993 */
3994 switch (pCpu->pCurInstr->uOpcode)
3995 {
3996 case OP_JO:
3997 case OP_JNO:
3998 case OP_JC:
3999 case OP_JNC:
4000 case OP_JE:
4001 case OP_JNE:
4002 case OP_JBE:
4003 case OP_JNBE:
4004 case OP_JS:
4005 case OP_JNS:
4006 case OP_JP:
4007 case OP_JNP:
4008 case OP_JL:
4009 case OP_JNL:
4010 case OP_JLE:
4011 case OP_JNLE:
4012 case OP_JMP:
4013 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
4014 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL);
4015 if (!(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL))
4016 goto failure;
4017
4018 Assert(pCpu->cbInstr == SIZEOF_NEARJUMP32 || pCpu->cbInstr == SIZEOF_NEAR_COND_JUMP32);
4019 if (pCpu->cbInstr != SIZEOF_NEARJUMP32 && pCpu->cbInstr != SIZEOF_NEAR_COND_JUMP32)
4020 goto failure;
4021
4022 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->cbInstr))
4023 {
4024 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
4025 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
4026 rc = VERR_PATCHING_REFUSED;
4027 goto failure;
4028 }
4029
4030 break;
4031
4032 default:
4033 goto failure;
4034 }
4035
4036 // make a copy of the guest code bytes that will be overwritten
4037 Assert(pCpu->cbInstr <= sizeof(pPatch->aPrivInstr));
4038 Assert(pCpu->cbInstr >= SIZEOF_NEARJUMP32);
4039 pPatch->cbPatchJump = pCpu->cbInstr;
4040
4041 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
4042 AssertRC(rc);
4043
4044 /* Now insert a jump in the guest code. */
4045 /*
4046 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
4047 * references the target instruction in the conflict patch.
4048 */
4049 RTRCPTR pJmpDest = patmR3GuestGCPtrToPatchGCPtrSimple(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue);
4050
4051 AssertMsg(pJmpDest, ("patmR3GuestGCPtrToPatchGCPtrSimple failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue));
4052 pPatch->pPatchJumpDestGC = pJmpDest;
4053
4054 PATMP2GLOOKUPREC cacheRec;
4055 RT_ZERO(cacheRec);
4056 cacheRec.pPatch = pPatch;
4057
4058 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
4059 /* Free leftover lock if any. */
4060 if (cacheRec.Lock.pvMap)
4061 {
4062 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4063 cacheRec.Lock.pvMap = NULL;
4064 }
4065 AssertRC(rc);
4066 if (RT_FAILURE(rc))
4067 goto failure;
4068
4069 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
4070
4071 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
4072 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
4073
4074 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
4075
4076 /* Lowest and highest address for write monitoring. */
4077 pPatch->pInstrGCLowest = pInstrGC;
4078 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
4079
4080 pPatch->uState = PATCH_ENABLED;
4081 return VINF_SUCCESS;
4082
4083failure:
4084 /* Turn this cli patch into a dummy. */
4085 pPatch->uState = PATCH_REFUSED;
4086
4087 return rc;
4088}
4089#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
4090
4091
4092/**
4093 * Gives hint to PATM about supervisor guest instructions
4094 *
4095 * @returns VBox status code.
4096 * @param pVM Pointer to the VM.
4097 * @param pInstr Guest context point to privileged instruction
4098 * @param flags Patch flags
4099 */
4100VMMR3_INT_DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4101{
4102 Assert(pInstrGC);
4103 Assert(flags == PATMFL_CODE32);
4104
4105 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4106 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4107}
4108
4109/**
4110 * Patch privileged instruction at specified location
4111 *
4112 * @returns VBox status code.
4113 * @param pVM Pointer to the VM.
4114 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4115 * @param flags Patch flags
4116 *
4117 * @note returns failure if patching is not allowed or possible
4118 */
4119VMMR3_INT_DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4120{
4121 DISCPUSTATE cpu;
4122 R3PTRTYPE(uint8_t *) pInstrHC;
4123 uint32_t cbInstr;
4124 PPATMPATCHREC pPatchRec;
4125 PCPUMCTX pCtx = 0;
4126 bool disret;
4127 int rc;
4128 PVMCPU pVCpu = VMMGetCpu0(pVM);
4129 LogFlow(("PATMR3InstallPatch: %08x (%#llx)\n", pInstrGC, flags));
4130
4131 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4132
4133 if ( !pVM
4134 || pInstrGC == 0
4135 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4136 {
4137 AssertFailed();
4138 return VERR_INVALID_PARAMETER;
4139 }
4140
4141 if (PATMIsEnabled(pVM) == false)
4142 return VERR_PATCHING_REFUSED;
4143
4144 /* Test for patch conflict only with patches that actually change guest code. */
4145 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4146 {
4147 PPATCHINFO pConflictPatch = patmFindActivePatchByEntrypoint(pVM, pInstrGC);
4148 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4149 if (pConflictPatch != 0)
4150 return VERR_PATCHING_REFUSED;
4151 }
4152
4153 if (!(flags & PATMFL_CODE32))
4154 {
4155 /** @todo Only 32 bits code right now */
4156 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4157 return VERR_NOT_IMPLEMENTED;
4158 }
4159
4160 /* We ran out of patch memory; don't bother anymore. */
4161 if (pVM->patm.s.fOutOfMemory == true)
4162 return VERR_PATCHING_REFUSED;
4163
4164#if 1 /* DONT COMMIT ENABLED! */
4165 /* Blacklisted NT4SP1 areas - debugging why we sometimes crash early on, */
4166 if ( 0
4167 //|| (pInstrGC - 0x80010000U) < 0x10000U // NT4SP1 HAL
4168 //|| (pInstrGC - 0x80010000U) < 0x5000U // NT4SP1 HAL
4169 //|| (pInstrGC - 0x80013000U) < 0x2000U // NT4SP1 HAL
4170 //|| (pInstrGC - 0x80014000U) < 0x1000U // NT4SP1 HAL
4171 //|| (pInstrGC - 0x80014000U) < 0x800U // NT4SP1 HAL
4172 //|| (pInstrGC - 0x80014400U) < 0x400U // NT4SP1 HAL
4173 //|| (pInstrGC - 0x80014400U) < 0x200U // NT4SP1 HAL
4174 //|| (pInstrGC - 0x80014400U) < 0x100U // NT4SP1 HAL
4175 //|| (pInstrGC - 0x80014500U) < 0x100U // NT4SP1 HAL - negative
4176 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4177 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4178 //|| (pInstrGC - 0x80014440U) < 0x40U // NT4SP1 HAL
4179 //|| (pInstrGC - 0x80014440U) < 0x20U // NT4SP1 HAL
4180 || pInstrGC == 0x80014447 /* KfLowerIrql */
4181 || 0)
4182 {
4183 Log(("PATMR3InstallPatch: %08x is blacklisted\n", pInstrGC));
4184 return VERR_PATCHING_REFUSED;
4185 }
4186#endif
4187
4188 /* Make sure the code selector is wide open; otherwise refuse. */
4189 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4190 if (CPUMGetGuestCPL(pVCpu) == 0)
4191 {
4192 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4193 if (pInstrGCFlat != pInstrGC)
4194 {
4195 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs.Sel, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4196 return VERR_PATCHING_REFUSED;
4197 }
4198 }
4199
4200 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4201 if (!(flags & PATMFL_GUEST_SPECIFIC))
4202 {
4203 /* New code. Make sure CSAM has a go at it first. */
4204 CSAMR3CheckCode(pVM, pInstrGC);
4205 }
4206
4207 /* Note: obsolete */
4208 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4209 && (flags & PATMFL_MMIO_ACCESS))
4210 {
4211 RTRCUINTPTR offset;
4212 void *pvPatchCoreOffset;
4213
4214 /* Find the patch record. */
4215 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4216 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4217 if (pvPatchCoreOffset == NULL)
4218 {
4219 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4220 return VERR_PATCH_NOT_FOUND; //fatal error
4221 }
4222 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4223
4224 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4225 }
4226
4227 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4228
4229 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4230 if (pPatchRec)
4231 {
4232 Assert(!(flags & PATMFL_TRAMPOLINE));
4233
4234 /* Hints about existing patches are ignored. */
4235 if (flags & PATMFL_INSTR_HINT)
4236 return VERR_PATCHING_REFUSED;
4237
4238 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4239 {
4240 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4241 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4242 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4243 }
4244
4245 if (pPatchRec->patch.uState == PATCH_DISABLED)
4246 {
4247 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4248 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4249 {
4250 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4251 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4252 }
4253 else
4254 Log(("Enabling patch %RRv again\n", pInstrGC));
4255
4256 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4257 rc = PATMR3EnablePatch(pVM, pInstrGC);
4258 if (RT_SUCCESS(rc))
4259 return VWRN_PATCH_ENABLED;
4260
4261 return rc;
4262 }
4263 if ( pPatchRec->patch.uState == PATCH_ENABLED
4264 || pPatchRec->patch.uState == PATCH_DIRTY)
4265 {
4266 /*
4267 * The patch might have been overwritten.
4268 */
4269 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4270 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4271 {
4272 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4273 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4274 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4275 {
4276 if (flags & PATMFL_IDTHANDLER)
4277 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4278
4279 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4280 }
4281 }
4282 rc = PATMR3RemovePatch(pVM, pInstrGC);
4283 if (RT_FAILURE(rc))
4284 return VERR_PATCHING_REFUSED;
4285 }
4286 else
4287 {
4288 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4289 /* already tried it once! */
4290 return VERR_PATCHING_REFUSED;
4291 }
4292 }
4293
4294 RTGCPHYS GCPhys;
4295 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4296 if (rc != VINF_SUCCESS)
4297 {
4298 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4299 return rc;
4300 }
4301 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4302 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4303 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4304 {
4305 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4306 return VERR_PATCHING_REFUSED;
4307 }
4308
4309 /* Initialize cache record for guest address translations. */
4310 bool fInserted;
4311 PATMP2GLOOKUPREC cacheRec;
4312 RT_ZERO(cacheRec);
4313
4314 pInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4315 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4316
4317 /* Allocate patch record. */
4318 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4319 if (RT_FAILURE(rc))
4320 {
4321 Log(("Out of memory!!!!\n"));
4322 return VERR_NO_MEMORY;
4323 }
4324 pPatchRec->Core.Key = pInstrGC;
4325 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4326 /* Insert patch record into the lookup tree. */
4327 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4328 Assert(fInserted);
4329
4330 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4331 pPatchRec->patch.flags = flags;
4332 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
4333 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4334
4335 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4336 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4337
4338 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4339 {
4340 /*
4341 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4342 */
4343 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4344 if (pPatchNear)
4345 {
4346 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4347 {
4348 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4349
4350 pPatchRec->patch.uState = PATCH_UNUSABLE;
4351 /*
4352 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4353 */
4354 return VERR_PATCHING_REFUSED;
4355 }
4356 }
4357 }
4358
4359 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4360 if (pPatchRec->patch.pTempInfo == 0)
4361 {
4362 Log(("Out of memory!!!!\n"));
4363 return VERR_NO_MEMORY;
4364 }
4365
4366 disret = patmR3DisInstrNoStrOpMode(pVM, &pPatchRec->patch, pInstrGC, NULL, PATMREAD_ORGCODE, &cpu, &cbInstr);
4367 if (disret == false)
4368 {
4369 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4370 return VERR_PATCHING_REFUSED;
4371 }
4372
4373 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
4374 if (cbInstr > MAX_INSTR_SIZE)
4375 return VERR_PATCHING_REFUSED;
4376
4377 pPatchRec->patch.cbPrivInstr = cbInstr;
4378 pPatchRec->patch.opcode = cpu.pCurInstr->uOpcode;
4379
4380 /* Restricted hinting for now. */
4381 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->uOpcode == OP_CLI);
4382
4383 /* Initialize cache record patch pointer. */
4384 cacheRec.pPatch = &pPatchRec->patch;
4385
4386 /* Allocate statistics slot */
4387 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4388 {
4389 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4390 }
4391 else
4392 {
4393 Log(("WARNING: Patch index wrap around!!\n"));
4394 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4395 }
4396
4397 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4398 {
4399 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4400 }
4401 else
4402 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4403 {
4404 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4405 }
4406 else
4407 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4408 {
4409 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4410 }
4411 else
4412 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4413 {
4414 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4415 }
4416 else
4417 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4418 {
4419 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4420 }
4421 else
4422 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4423 {
4424 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4425 }
4426 else
4427 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4428 {
4429 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4430 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4431
4432 rc = patmIdtHandler(pVM, pInstrGC, cbInstr, pPatchRec, &cacheRec);
4433#ifdef VBOX_WITH_STATISTICS
4434 if ( rc == VINF_SUCCESS
4435 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4436 {
4437 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4438 }
4439#endif
4440 }
4441 else
4442 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4443 {
4444 switch (cpu.pCurInstr->uOpcode)
4445 {
4446 case OP_SYSENTER:
4447 case OP_PUSH:
4448 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4449 if (rc == VINF_SUCCESS)
4450 {
4451 if (rc == VINF_SUCCESS)
4452 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4453 return rc;
4454 }
4455 break;
4456
4457 default:
4458 rc = VERR_NOT_IMPLEMENTED;
4459 break;
4460 }
4461 }
4462 else
4463 {
4464 switch (cpu.pCurInstr->uOpcode)
4465 {
4466 case OP_SYSENTER:
4467 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4468 if (rc == VINF_SUCCESS)
4469 {
4470 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4471 return VINF_SUCCESS;
4472 }
4473 break;
4474
4475#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4476 case OP_JO:
4477 case OP_JNO:
4478 case OP_JC:
4479 case OP_JNC:
4480 case OP_JE:
4481 case OP_JNE:
4482 case OP_JBE:
4483 case OP_JNBE:
4484 case OP_JS:
4485 case OP_JNS:
4486 case OP_JP:
4487 case OP_JNP:
4488 case OP_JL:
4489 case OP_JNL:
4490 case OP_JLE:
4491 case OP_JNLE:
4492 case OP_JECXZ:
4493 case OP_LOOP:
4494 case OP_LOOPNE:
4495 case OP_LOOPE:
4496 case OP_JMP:
4497 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4498 {
4499 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4500 break;
4501 }
4502 return VERR_NOT_IMPLEMENTED;
4503#endif
4504
4505 case OP_PUSHF:
4506 case OP_CLI:
4507 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4508 rc = patmR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec);
4509 break;
4510
4511#ifndef VBOX_WITH_SAFE_STR
4512 case OP_STR:
4513#endif
4514 case OP_SGDT:
4515 case OP_SLDT:
4516 case OP_SIDT:
4517 case OP_CPUID:
4518 case OP_LSL:
4519 case OP_LAR:
4520 case OP_SMSW:
4521 case OP_VERW:
4522 case OP_VERR:
4523 case OP_IRET:
4524#ifdef VBOX_WITH_RAW_RING1
4525 case OP_MOV:
4526#endif
4527 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4528 break;
4529
4530 default:
4531 return VERR_NOT_IMPLEMENTED;
4532 }
4533 }
4534
4535 if (rc != VINF_SUCCESS)
4536 {
4537 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4538 {
4539 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4540 pPatchRec->patch.nrPatch2GuestRecs = 0;
4541 }
4542 pVM->patm.s.uCurrentPatchIdx--;
4543 }
4544 else
4545 {
4546 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4547 AssertRCReturn(rc, rc);
4548
4549 /* Keep track upper and lower boundaries of patched instructions */
4550 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4551 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4552 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4553 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4554
4555 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4556 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4557
4558 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4559 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4560
4561 rc = VINF_SUCCESS;
4562
4563 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4564 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4565 {
4566 rc = PATMR3DisablePatch(pVM, pInstrGC);
4567 AssertRCReturn(rc, rc);
4568 }
4569
4570#ifdef VBOX_WITH_STATISTICS
4571 /* Register statistics counter */
4572 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4573 {
4574 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4575 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4576#ifndef DEBUG_sandervl
4577 /* Full breakdown for the GUI. */
4578 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4579 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4580 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4581 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4582 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4583 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4584 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4585 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4586 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4587 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4588 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4589 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4590 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4591 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4592 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4593 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4594#endif
4595 }
4596#endif
4597
4598 /* Add debug symbol. */
4599 patmR3DbgAddPatch(pVM, pPatchRec);
4600 }
4601 /* Free leftover lock if any. */
4602 if (cacheRec.Lock.pvMap)
4603 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4604 return rc;
4605}
4606
4607/**
4608 * Query instruction size
4609 *
4610 * @returns VBox status code.
4611 * @param pVM Pointer to the VM.
4612 * @param pPatch Patch record
4613 * @param pInstrGC Instruction address
4614 */
4615static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4616{
4617 uint8_t *pInstrHC;
4618 PGMPAGEMAPLOCK Lock;
4619
4620 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4621 if (rc == VINF_SUCCESS)
4622 {
4623 DISCPUSTATE cpu;
4624 bool disret;
4625 uint32_t cbInstr;
4626
4627 disret = patmR3DisInstr(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE | PATMREAD_NOCHECK, &cpu, &cbInstr);
4628 PGMPhysReleasePageMappingLock(pVM, &Lock);
4629 if (disret)
4630 return cbInstr;
4631 }
4632 return 0;
4633}
4634
4635/**
4636 * Add patch to page record
4637 *
4638 * @returns VBox status code.
4639 * @param pVM Pointer to the VM.
4640 * @param pPage Page address
4641 * @param pPatch Patch record
4642 */
4643int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4644{
4645 PPATMPATCHPAGE pPatchPage;
4646 int rc;
4647
4648 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4649
4650 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4651 if (pPatchPage)
4652 {
4653 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4654 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4655 {
4656 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4657 PPATCHINFO *papPatchOld = pPatchPage->papPatch;
4658
4659 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4660 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH,
4661 (void **)&pPatchPage->papPatch);
4662 if (RT_FAILURE(rc))
4663 {
4664 Log(("Out of memory!!!!\n"));
4665 return VERR_NO_MEMORY;
4666 }
4667 memcpy(pPatchPage->papPatch, papPatchOld, cMaxPatchesOld * sizeof(pPatchPage->papPatch[0]));
4668 MMHyperFree(pVM, papPatchOld);
4669 }
4670 pPatchPage->papPatch[pPatchPage->cCount] = pPatch;
4671 pPatchPage->cCount++;
4672 }
4673 else
4674 {
4675 bool fInserted;
4676
4677 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4678 if (RT_FAILURE(rc))
4679 {
4680 Log(("Out of memory!!!!\n"));
4681 return VERR_NO_MEMORY;
4682 }
4683 pPatchPage->Core.Key = pPage;
4684 pPatchPage->cCount = 1;
4685 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4686
4687 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH,
4688 (void **)&pPatchPage->papPatch);
4689 if (RT_FAILURE(rc))
4690 {
4691 Log(("Out of memory!!!!\n"));
4692 MMHyperFree(pVM, pPatchPage);
4693 return VERR_NO_MEMORY;
4694 }
4695 pPatchPage->papPatch[0] = pPatch;
4696
4697 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4698 Assert(fInserted);
4699 pVM->patm.s.cPageRecords++;
4700
4701 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4702 }
4703 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4704
4705 /* Get the closest guest instruction (from below) */
4706 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4707 Assert(pGuestToPatchRec);
4708 if (pGuestToPatchRec)
4709 {
4710 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4711 if ( pPatchPage->pLowestAddrGC == 0
4712 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4713 {
4714 RTRCUINTPTR offset;
4715
4716 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4717
4718 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4719 /* If we're too close to the page boundary, then make sure an
4720 instruction from the previous page doesn't cross the
4721 boundary itself. */
4722 if (offset && offset < MAX_INSTR_SIZE)
4723 {
4724 /* Get the closest guest instruction (from above) */
4725 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4726
4727 if (pGuestToPatchRec)
4728 {
4729 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4730 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4731 {
4732 pPatchPage->pLowestAddrGC = pPage;
4733 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4734 }
4735 }
4736 }
4737 }
4738 }
4739
4740 /* Get the closest guest instruction (from above) */
4741 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4742 Assert(pGuestToPatchRec);
4743 if (pGuestToPatchRec)
4744 {
4745 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4746 if ( pPatchPage->pHighestAddrGC == 0
4747 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4748 {
4749 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4750 /* Increase by instruction size. */
4751 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4752//// Assert(size);
4753 pPatchPage->pHighestAddrGC += size;
4754 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4755 }
4756 }
4757
4758 return VINF_SUCCESS;
4759}
4760
4761/**
4762 * Remove patch from page record
4763 *
4764 * @returns VBox status code.
4765 * @param pVM Pointer to the VM.
4766 * @param pPage Page address
4767 * @param pPatch Patch record
4768 */
4769int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4770{
4771 PPATMPATCHPAGE pPatchPage;
4772 int rc;
4773
4774 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4775 Assert(pPatchPage);
4776
4777 if (!pPatchPage)
4778 return VERR_INVALID_PARAMETER;
4779
4780 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4781
4782 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4783 if (pPatchPage->cCount > 1)
4784 {
4785 uint32_t i;
4786
4787 /* Used by multiple patches */
4788 for (i = 0; i < pPatchPage->cCount; i++)
4789 {
4790 if (pPatchPage->papPatch[i] == pPatch)
4791 {
4792 /* close the gap between the remaining pointers. */
4793 uint32_t cNew = --pPatchPage->cCount;
4794 if (i < cNew)
4795 pPatchPage->papPatch[i] = pPatchPage->papPatch[cNew];
4796 pPatchPage->papPatch[cNew] = NULL;
4797 return VINF_SUCCESS;
4798 }
4799 }
4800 AssertMsgFailed(("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4801 }
4802 else
4803 {
4804 PPATMPATCHPAGE pPatchNode;
4805
4806 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4807
4808 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4809 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4810 Assert(pPatchNode && pPatchNode == pPatchPage);
4811
4812 Assert(pPatchPage->papPatch);
4813 rc = MMHyperFree(pVM, pPatchPage->papPatch);
4814 AssertRC(rc);
4815 rc = MMHyperFree(pVM, pPatchPage);
4816 AssertRC(rc);
4817 pVM->patm.s.cPageRecords--;
4818 }
4819 return VINF_SUCCESS;
4820}
4821
4822/**
4823 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4824 *
4825 * @returns VBox status code.
4826 * @param pVM Pointer to the VM.
4827 * @param pPatch Patch record
4828 */
4829int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4830{
4831 int rc;
4832 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4833
4834 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4835 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4836 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4837
4838 /** @todo optimize better (large gaps between current and next used page) */
4839 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4840 {
4841 /* Get the closest guest instruction (from above) */
4842 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4843 if ( pGuestToPatchRec
4844 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4845 )
4846 {
4847 /* Code in page really patched -> add record */
4848 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4849 AssertRC(rc);
4850 }
4851 }
4852 pPatch->flags |= PATMFL_CODE_MONITORED;
4853 return VINF_SUCCESS;
4854}
4855
4856/**
4857 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4858 *
4859 * @returns VBox status code.
4860 * @param pVM Pointer to the VM.
4861 * @param pPatch Patch record
4862 */
4863static int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4864{
4865 int rc;
4866 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4867
4868 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4869 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4870 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4871
4872 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4873 {
4874 /* Get the closest guest instruction (from above) */
4875 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4876 if ( pGuestToPatchRec
4877 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4878 )
4879 {
4880 /* Code in page really patched -> remove record */
4881 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4882 AssertRC(rc);
4883 }
4884 }
4885 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4886 return VINF_SUCCESS;
4887}
4888
4889/**
4890 * Notifies PATM about a (potential) write to code that has been patched.
4891 *
4892 * @returns VBox status code.
4893 * @param pVM Pointer to the VM.
4894 * @param GCPtr GC pointer to write address
4895 * @param cbWrite Nr of bytes to write
4896 *
4897 */
4898VMMR3_INT_DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4899{
4900 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4901
4902 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4903
4904 Assert(VM_IS_EMT(pVM));
4905 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4906
4907 /* Quick boundary check */
4908 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4909 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4910 )
4911 return VINF_SUCCESS;
4912
4913 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4914
4915 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4916 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4917
4918 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4919 {
4920loop_start:
4921 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4922 if (pPatchPage)
4923 {
4924 uint32_t i;
4925 bool fValidPatchWrite = false;
4926
4927 /* Quick check to see if the write is in the patched part of the page */
4928 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4929 || pPatchPage->pHighestAddrGC < GCPtr)
4930 {
4931 break;
4932 }
4933
4934 for (i=0;i<pPatchPage->cCount;i++)
4935 {
4936 if (pPatchPage->papPatch[i])
4937 {
4938 PPATCHINFO pPatch = pPatchPage->papPatch[i];
4939 RTRCPTR pPatchInstrGC;
4940 //unused: bool fForceBreak = false;
4941
4942 Assert(pPatchPage->papPatch[i]->flags & PATMFL_CODE_MONITORED);
4943 /** @todo inefficient and includes redundant checks for multiple pages. */
4944 for (uint32_t j=0; j<cbWrite; j++)
4945 {
4946 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4947
4948 if ( pPatch->cbPatchJump
4949 && pGuestPtrGC >= pPatch->pPrivInstrGC
4950 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4951 {
4952 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4953 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4954 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4955 if (rc == VINF_SUCCESS)
4956 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4957 goto loop_start;
4958
4959 continue;
4960 }
4961
4962 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4963 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4964 if (!pPatchInstrGC)
4965 {
4966 RTRCPTR pClosestInstrGC;
4967 uint32_t size;
4968
4969 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4970 if (pPatchInstrGC)
4971 {
4972 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4973 Assert(pClosestInstrGC <= pGuestPtrGC);
4974 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4975 /* Check if this is not a write into a gap between two patches */
4976 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4977 pPatchInstrGC = 0;
4978 }
4979 }
4980 if (pPatchInstrGC)
4981 {
4982 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4983
4984 fValidPatchWrite = true;
4985
4986 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4987 Assert(pPatchToGuestRec);
4988 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4989 {
4990 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4991
4992 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4993 {
4994 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4995
4996 patmR3MarkDirtyPatch(pVM, pPatch);
4997
4998 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4999 goto loop_start;
5000 }
5001 else
5002 {
5003 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
5004 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
5005
5006 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
5007 pPatchToGuestRec->fDirty = true;
5008
5009 *pInstrHC = 0xCC;
5010
5011 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
5012 }
5013 }
5014 /* else already marked dirty */
5015 }
5016 }
5017 }
5018 } /* for each patch */
5019
5020 if (fValidPatchWrite == false)
5021 {
5022 /* Write to a part of the page that either:
5023 * - doesn't contain any code (shared code/data); rather unlikely
5024 * - old code page that's no longer in active use.
5025 */
5026invalid_write_loop_start:
5027 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
5028
5029 if (pPatchPage)
5030 {
5031 for (i=0;i<pPatchPage->cCount;i++)
5032 {
5033 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5034
5035 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
5036 {
5037 /* Note: possibly dangerous assumption that all future writes will be harmless. */
5038 if (pPatch->flags & PATMFL_IDTHANDLER)
5039 {
5040 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5041
5042 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
5043 int rc = patmRemovePatchPages(pVM, pPatch);
5044 AssertRC(rc);
5045 }
5046 else
5047 {
5048 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5049 patmR3MarkDirtyPatch(pVM, pPatch);
5050 }
5051 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
5052 goto invalid_write_loop_start;
5053 }
5054 } /* for */
5055 }
5056 }
5057 }
5058 }
5059 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
5060 return VINF_SUCCESS;
5061
5062}
5063
5064/**
5065 * Disable all patches in a flushed page
5066 *
5067 * @returns VBox status code
5068 * @param pVM Pointer to the VM.
5069 * @param addr GC address of the page to flush
5070 * @note Currently only called by CSAMR3FlushPage; optimization to avoid
5071 * having to double check if the physical address has changed
5072 */
5073VMMR3_INT_DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
5074{
5075 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5076
5077 addr &= PAGE_BASE_GC_MASK;
5078
5079 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
5080 if (pPatchPage)
5081 {
5082 int i;
5083
5084 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
5085 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
5086 {
5087 if (pPatchPage->papPatch[i])
5088 {
5089 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5090
5091 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
5092 patmR3MarkDirtyPatch(pVM, pPatch);
5093 }
5094 }
5095 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
5096 }
5097 return VINF_SUCCESS;
5098}
5099
5100/**
5101 * Checks if the instructions at the specified address has been patched already.
5102 *
5103 * @returns boolean, patched or not
5104 * @param pVM Pointer to the VM.
5105 * @param pInstrGC Guest context pointer to instruction
5106 */
5107VMMR3_INT_DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
5108{
5109 Assert(!HMIsEnabled(pVM));
5110 PPATMPATCHREC pPatchRec;
5111 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5112 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
5113 return true;
5114 return false;
5115}
5116
5117/**
5118 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
5119 *
5120 * @returns VBox status code.
5121 * @param pVM Pointer to the VM.
5122 * @param pInstrGC GC address of instr
5123 * @param pByte opcode byte pointer (OUT)
5124 *
5125 */
5126VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
5127{
5128 PPATMPATCHREC pPatchRec;
5129
5130 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5131
5132 /* Shortcut. */
5133 if (!PATMIsEnabled(pVM))
5134 return VERR_PATCH_NOT_FOUND;
5135 Assert(!HMIsEnabled(pVM));
5136 if ( pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5137 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5138 return VERR_PATCH_NOT_FOUND;
5139
5140 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5141 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5142 if ( pPatchRec
5143 && pPatchRec->patch.uState == PATCH_ENABLED
5144 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5145 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5146 {
5147 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5148 *pByte = pPatchRec->patch.aPrivInstr[offset];
5149
5150 if (pPatchRec->patch.cbPatchJump == 1)
5151 {
5152 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5153 }
5154 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5155 return VINF_SUCCESS;
5156 }
5157 return VERR_PATCH_NOT_FOUND;
5158}
5159
5160/**
5161 * Read instruction bytes of the original code that was overwritten by the 5
5162 * bytes patch jump.
5163 *
5164 * @returns VINF_SUCCESS or VERR_PATCH_NOT_FOUND.
5165 * @param pVM Pointer to the VM.
5166 * @param GCPtrInstr GC address of instr
5167 * @param pbDst The output buffer.
5168 * @param cbToRead The maximum number bytes to read.
5169 * @param pcbRead Where to return the acutal number of bytes read.
5170 */
5171VMMR3_INT_DECL(int) PATMR3ReadOrgInstr(PVM pVM, RTGCPTR32 GCPtrInstr, uint8_t *pbDst, size_t cbToRead, size_t *pcbRead)
5172{
5173 /* Shortcut. */
5174 if (!PATMIsEnabled(pVM))
5175 return VERR_PATCH_NOT_FOUND;
5176 Assert(!HMIsEnabled(pVM));
5177 if ( GCPtrInstr < pVM->patm.s.pPatchedInstrGCLowest
5178 || GCPtrInstr > pVM->patm.s.pPatchedInstrGCHighest)
5179 return VERR_PATCH_NOT_FOUND;
5180
5181 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5182
5183 /*
5184 * If the patch is enabled and the pointer lies within 5 bytes of this
5185 * priv instr ptr, then we've got a hit!
5186 */
5187 RTGCPTR32 off;
5188 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree,
5189 GCPtrInstr, false /*fAbove*/);
5190 if ( pPatchRec
5191 && pPatchRec->patch.uState == PATCH_ENABLED
5192 && (off = GCPtrInstr - pPatchRec->patch.pPrivInstrGC) < pPatchRec->patch.cbPatchJump)
5193 {
5194 uint8_t const *pbSrc = &pPatchRec->patch.aPrivInstr[off];
5195 uint32_t const cbMax = pPatchRec->patch.cbPatchJump - off;
5196 if (cbToRead > cbMax)
5197 cbToRead = cbMax;
5198 switch (cbToRead)
5199 {
5200 case 5: pbDst[4] = pbSrc[4];
5201 case 4: pbDst[3] = pbSrc[3];
5202 case 3: pbDst[2] = pbSrc[2];
5203 case 2: pbDst[1] = pbSrc[1];
5204 case 1: pbDst[0] = pbSrc[0];
5205 break;
5206 default:
5207 memcpy(pbDst, pbSrc, cbToRead);
5208 }
5209 *pcbRead = cbToRead;
5210
5211 if (pPatchRec->patch.cbPatchJump == 1)
5212 Log(("PATMR3ReadOrgInstr: returning opcode %.*Rhxs for instruction at %RX32\n", cbToRead, pbSrc, GCPtrInstr));
5213 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5214 return VINF_SUCCESS;
5215 }
5216
5217 return VERR_PATCH_NOT_FOUND;
5218}
5219
5220/**
5221 * Disable patch for privileged instruction at specified location
5222 *
5223 * @returns VBox status code.
5224 * @param pVM Pointer to the VM.
5225 * @param pInstr Guest context point to privileged instruction
5226 *
5227 * @note returns failure if patching is not allowed or possible
5228 *
5229 */
5230VMMR3_INT_DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5231{
5232 PPATMPATCHREC pPatchRec;
5233 PPATCHINFO pPatch;
5234
5235 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5236 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5237 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5238 if (pPatchRec)
5239 {
5240 int rc = VINF_SUCCESS;
5241
5242 pPatch = &pPatchRec->patch;
5243
5244 /* Already disabled? */
5245 if (pPatch->uState == PATCH_DISABLED)
5246 return VINF_SUCCESS;
5247
5248 /* Clear the IDT entries for the patch we're disabling. */
5249 /* Note: very important as we clear IF in the patch itself */
5250 /** @todo this needs to be changed */
5251 if (pPatch->flags & PATMFL_IDTHANDLER)
5252 {
5253 uint32_t iGate;
5254
5255 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5256 if (iGate != (uint32_t)~0)
5257 {
5258 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5259 if (++cIDTHandlersDisabled < 256)
5260 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5261 }
5262 }
5263
5264 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5265 if ( pPatch->pPatchBlockOffset
5266 && pPatch->uState == PATCH_ENABLED)
5267 {
5268 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5269 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5270 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5271 }
5272
5273 /* IDT or function patches haven't changed any guest code. */
5274 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5275 {
5276 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5277 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5278
5279 if (pPatch->uState != PATCH_REFUSED)
5280 {
5281 uint8_t temp[16];
5282
5283 Assert(pPatch->cbPatchJump < sizeof(temp));
5284
5285 /* Let's first check if the guest code is still the same. */
5286 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5287 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5288 if (rc == VINF_SUCCESS)
5289 {
5290 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5291
5292 if ( temp[0] != 0xE9 /* jmp opcode */
5293 || *(RTRCINTPTR *)(&temp[1]) != displ
5294 )
5295 {
5296 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5297 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5298 /* Remove it completely */
5299 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5300 rc = PATMR3RemovePatch(pVM, pInstrGC);
5301 AssertRC(rc);
5302 return VWRN_PATCH_REMOVED;
5303 }
5304 patmRemoveJumpToPatch(pVM, pPatch);
5305 }
5306 else
5307 {
5308 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5309 pPatch->uState = PATCH_DISABLE_PENDING;
5310 }
5311 }
5312 else
5313 {
5314 AssertMsgFailed(("Patch was refused!\n"));
5315 return VERR_PATCH_ALREADY_DISABLED;
5316 }
5317 }
5318 else
5319 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5320 {
5321 uint8_t temp[16];
5322
5323 Assert(pPatch->cbPatchJump < sizeof(temp));
5324
5325 /* Let's first check if the guest code is still the same. */
5326 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5327 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5328 if (rc == VINF_SUCCESS)
5329 {
5330 if (temp[0] != 0xCC)
5331 {
5332 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5333 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5334 /* Remove it completely */
5335 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5336 rc = PATMR3RemovePatch(pVM, pInstrGC);
5337 AssertRC(rc);
5338 return VWRN_PATCH_REMOVED;
5339 }
5340 patmDeactivateInt3Patch(pVM, pPatch);
5341 }
5342 }
5343
5344 if (rc == VINF_SUCCESS)
5345 {
5346 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5347 if (pPatch->uState == PATCH_DISABLE_PENDING)
5348 {
5349 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5350 pPatch->uState = PATCH_UNUSABLE;
5351 }
5352 else
5353 if (pPatch->uState != PATCH_DIRTY)
5354 {
5355 pPatch->uOldState = pPatch->uState;
5356 pPatch->uState = PATCH_DISABLED;
5357 }
5358 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5359 }
5360
5361 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5362 return VINF_SUCCESS;
5363 }
5364 Log(("Patch not found!\n"));
5365 return VERR_PATCH_NOT_FOUND;
5366}
5367
5368/**
5369 * Permanently disable patch for privileged instruction at specified location
5370 *
5371 * @returns VBox status code.
5372 * @param pVM Pointer to the VM.
5373 * @param pInstr Guest context instruction pointer
5374 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5375 * @param pConflictPatch Conflicting patch
5376 *
5377 */
5378static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5379{
5380 NOREF(pConflictAddr);
5381#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5382 PATCHINFO patch;
5383 DISCPUSTATE cpu;
5384 R3PTRTYPE(uint8_t *) pInstrHC;
5385 uint32_t cbInstr;
5386 bool disret;
5387 int rc;
5388
5389 RT_ZERO(patch);
5390 pInstrHC = patmR3GCVirtToHCVirt(pVM, &patch, pInstrGC);
5391 disret = patmR3DisInstr(pVM, &patch, pInstrGC, pInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
5392 /*
5393 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5394 * with one that jumps right into the conflict patch.
5395 * Otherwise we must disable the conflicting patch to avoid serious problems.
5396 */
5397 if ( disret == true
5398 && (pConflictPatch->flags & PATMFL_CODE32)
5399 && (cpu.pCurInstr->uOpcode == OP_JMP || (cpu.pCurInstr->fOpType & DISOPTYPE_COND_CONTROLFLOW))
5400 && (cpu.Param1.fUse & DISUSE_IMMEDIATE32_REL))
5401 {
5402 /* Hint patches must be enabled first. */
5403 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5404 {
5405 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5406 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5407 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5408 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5409 /* Enabling might fail if the patched code has changed in the meantime. */
5410 if (rc != VINF_SUCCESS)
5411 return rc;
5412 }
5413
5414 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5415 if (RT_SUCCESS(rc))
5416 {
5417 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5418 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5419 return VINF_SUCCESS;
5420 }
5421 }
5422#endif
5423
5424 if (pConflictPatch->opcode == OP_CLI)
5425 {
5426 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5427 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5428 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5429 if (rc == VWRN_PATCH_REMOVED)
5430 return VINF_SUCCESS;
5431 if (RT_SUCCESS(rc))
5432 {
5433 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5434 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5435 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5436 if (rc == VERR_PATCH_NOT_FOUND)
5437 return VINF_SUCCESS; /* removed already */
5438
5439 AssertRC(rc);
5440 if (RT_SUCCESS(rc))
5441 {
5442 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5443 return VINF_SUCCESS;
5444 }
5445 }
5446 /* else turned into unusable patch (see below) */
5447 }
5448 else
5449 {
5450 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5451 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5452 if (rc == VWRN_PATCH_REMOVED)
5453 return VINF_SUCCESS;
5454 }
5455
5456 /* No need to monitor the code anymore. */
5457 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5458 {
5459 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5460 AssertRC(rc);
5461 }
5462 pConflictPatch->uState = PATCH_UNUSABLE;
5463 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5464 return VERR_PATCH_DISABLED;
5465}
5466
5467/**
5468 * Enable patch for privileged instruction at specified location
5469 *
5470 * @returns VBox status code.
5471 * @param pVM Pointer to the VM.
5472 * @param pInstr Guest context point to privileged instruction
5473 *
5474 * @note returns failure if patching is not allowed or possible
5475 *
5476 */
5477VMMR3_INT_DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5478{
5479 PPATMPATCHREC pPatchRec;
5480 PPATCHINFO pPatch;
5481
5482 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5483 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5484 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5485 if (pPatchRec)
5486 {
5487 int rc = VINF_SUCCESS;
5488
5489 pPatch = &pPatchRec->patch;
5490
5491 if (pPatch->uState == PATCH_DISABLED)
5492 {
5493 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5494 {
5495 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5496 uint8_t temp[16];
5497
5498 Assert(pPatch->cbPatchJump < sizeof(temp));
5499
5500 /* Let's first check if the guest code is still the same. */
5501 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5502 AssertRC(rc2);
5503 if (rc2 == VINF_SUCCESS)
5504 {
5505 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5506 {
5507 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5508 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5509 /* Remove it completely */
5510 rc = PATMR3RemovePatch(pVM, pInstrGC);
5511 AssertRC(rc);
5512 return VERR_PATCH_NOT_FOUND;
5513 }
5514
5515 PATMP2GLOOKUPREC cacheRec;
5516 RT_ZERO(cacheRec);
5517 cacheRec.pPatch = pPatch;
5518
5519 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5520 /* Free leftover lock if any. */
5521 if (cacheRec.Lock.pvMap)
5522 {
5523 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5524 cacheRec.Lock.pvMap = NULL;
5525 }
5526 AssertRC(rc2);
5527 if (RT_FAILURE(rc2))
5528 return rc2;
5529
5530#ifdef DEBUG
5531 {
5532 DISCPUSTATE cpu;
5533 char szOutput[256];
5534 uint32_t cbInstr;
5535 uint32_t i = 0;
5536 bool disret;
5537 while(i < pPatch->cbPatchJump)
5538 {
5539 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
5540 &cpu, &cbInstr, szOutput, sizeof(szOutput));
5541 Log(("Renewed patch instr: %s", szOutput));
5542 i += cbInstr;
5543 }
5544 }
5545#endif
5546 }
5547 }
5548 else
5549 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5550 {
5551 uint8_t temp[16];
5552
5553 Assert(pPatch->cbPatchJump < sizeof(temp));
5554
5555 /* Let's first check if the guest code is still the same. */
5556 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5557 AssertRC(rc2);
5558
5559 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5560 {
5561 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5562 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5563 rc = PATMR3RemovePatch(pVM, pInstrGC);
5564 AssertRC(rc);
5565 return VERR_PATCH_NOT_FOUND;
5566 }
5567
5568 rc2 = patmActivateInt3Patch(pVM, pPatch);
5569 if (RT_FAILURE(rc2))
5570 return rc2;
5571 }
5572
5573 pPatch->uState = pPatch->uOldState; //restore state
5574
5575 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5576 if (pPatch->pPatchBlockOffset)
5577 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5578
5579 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5580 }
5581 else
5582 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5583
5584 return rc;
5585 }
5586 return VERR_PATCH_NOT_FOUND;
5587}
5588
5589/**
5590 * Remove patch for privileged instruction at specified location
5591 *
5592 * @returns VBox status code.
5593 * @param pVM Pointer to the VM.
5594 * @param pPatchRec Patch record
5595 * @param fForceRemove Remove *all* patches
5596 */
5597int patmR3RemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5598{
5599 PPATCHINFO pPatch;
5600
5601 pPatch = &pPatchRec->patch;
5602
5603 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5604 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5605 {
5606 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5607 return VERR_ACCESS_DENIED;
5608 }
5609 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5610
5611 /* Note: NEVER EVER REUSE PATCH MEMORY */
5612 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5613
5614 if (pPatchRec->patch.pPatchBlockOffset)
5615 {
5616 PAVLOU32NODECORE pNode;
5617
5618 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5619 Assert(pNode);
5620 }
5621
5622 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5623 {
5624 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5625 AssertRC(rc);
5626 }
5627
5628#ifdef VBOX_WITH_STATISTICS
5629 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5630 {
5631 STAMR3Deregister(pVM, &pPatchRec->patch);
5632#ifndef DEBUG_sandervl
5633 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5634 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5635 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5636 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5637 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5638 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5639 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5640 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5641 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5642 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5643 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5644 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5645 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5646 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5647#endif
5648 }
5649#endif
5650
5651 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5652 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5653 pPatch->nrPatch2GuestRecs = 0;
5654 Assert(pPatch->Patch2GuestAddrTree == 0);
5655
5656 patmEmptyTree(pVM, &pPatch->FixupTree);
5657 pPatch->nrFixups = 0;
5658 Assert(pPatch->FixupTree == 0);
5659
5660 if (pPatchRec->patch.pTempInfo)
5661 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5662
5663 /* Note: might fail, because it has already been removed (e.g. during reset). */
5664 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5665
5666 /* Free the patch record */
5667 MMHyperFree(pVM, pPatchRec);
5668 return VINF_SUCCESS;
5669}
5670
5671/**
5672 * RTAvlU32DoWithAll() worker.
5673 * Checks whether the current trampoline instruction is the jump to the target patch
5674 * and updates the displacement to jump to the new target.
5675 *
5676 * @returns VBox status code.
5677 * @retval VERR_ALREADY_EXISTS if the jump was found.
5678 * @param pNode The current patch to guest record to check.
5679 * @param pvUser The refresh state.
5680 */
5681static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5682{
5683 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5684 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5685 PVM pVM = pRefreshPatchState->pVM;
5686
5687 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5688
5689 /*
5690 * Check if the patch instruction starts with a jump.
5691 * ASSUMES that there is no other patch to guest record that starts
5692 * with a jump.
5693 */
5694 if (*pPatchInstr == 0xE9)
5695 {
5696 /* Jump found, update the displacement. */
5697 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5698 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5699 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5700
5701 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5702 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5703
5704 *(uint32_t *)&pPatchInstr[1] = displ;
5705 return VERR_ALREADY_EXISTS; /** @todo better return code */
5706 }
5707
5708 return VINF_SUCCESS;
5709}
5710
5711/**
5712 * Attempt to refresh the patch by recompiling its entire code block
5713 *
5714 * @returns VBox status code.
5715 * @param pVM Pointer to the VM.
5716 * @param pPatchRec Patch record
5717 */
5718int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5719{
5720 PPATCHINFO pPatch;
5721 int rc;
5722 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5723 PTRAMPREC pTrampolinePatchesHead = NULL;
5724
5725 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5726
5727 pPatch = &pPatchRec->patch;
5728 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5729 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5730 {
5731 if (!pPatch->pTrampolinePatchesHead)
5732 {
5733 /*
5734 * It is sometimes possible that there are trampoline patches to this patch
5735 * but they are not recorded (after a saved state load for example).
5736 * Refuse to refresh those patches.
5737 * Can hurt performance in theory if the patched code is modified by the guest
5738 * and is executed often. However most of the time states are saved after the guest
5739 * code was modified and is not updated anymore afterwards so this shouldn't be a
5740 * big problem.
5741 */
5742 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5743 return VERR_PATCHING_REFUSED;
5744 }
5745 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5746 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5747 }
5748
5749 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5750
5751 rc = PATMR3DisablePatch(pVM, pInstrGC);
5752 AssertRC(rc);
5753
5754 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5755 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5756#ifdef VBOX_WITH_STATISTICS
5757 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5758 {
5759 STAMR3Deregister(pVM, &pPatchRec->patch);
5760#ifndef DEBUG_sandervl
5761 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5762 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5763 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5764 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5765 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5766 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5767 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5768 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5769 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5770 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5771 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5772 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5773 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5774 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5775#endif
5776 }
5777#endif
5778
5779 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5780
5781 /* Attempt to install a new patch. */
5782 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5783 if (RT_SUCCESS(rc))
5784 {
5785 RTRCPTR pPatchTargetGC;
5786 PPATMPATCHREC pNewPatchRec;
5787
5788 /* Determine target address in new patch */
5789 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5790 Assert(pPatchTargetGC);
5791 if (!pPatchTargetGC)
5792 {
5793 rc = VERR_PATCHING_REFUSED;
5794 goto failure;
5795 }
5796
5797 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5798 pPatch->uCurPatchOffset = 0;
5799
5800 /* insert jump to new patch in old patch block */
5801 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5802 if (RT_FAILURE(rc))
5803 goto failure;
5804
5805 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5806 Assert(pNewPatchRec); /* can't fail */
5807
5808 /* Remove old patch (only do that when everything is finished) */
5809 int rc2 = patmR3RemovePatch(pVM, pPatchRec, true /* force removal */);
5810 AssertRC(rc2);
5811
5812 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5813 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5814 Assert(fInserted); NOREF(fInserted);
5815
5816 Log(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5817 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5818
5819 /* Used by another patch, so don't remove it! */
5820 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5821
5822 if (pTrampolinePatchesHead)
5823 {
5824 /* Update all trampoline patches to jump to the new patch. */
5825 PTRAMPREC pTrampRec = NULL;
5826 PATMREFRESHPATCH RefreshPatch;
5827
5828 RefreshPatch.pVM = pVM;
5829 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5830
5831 pTrampRec = pTrampolinePatchesHead;
5832
5833 while (pTrampRec)
5834 {
5835 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5836
5837 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5838 /*
5839 * We have to find the right patch2guest record because there might be others
5840 * for statistics.
5841 */
5842 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5843 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5844 Assert(rc == VERR_ALREADY_EXISTS);
5845 rc = VINF_SUCCESS;
5846 pTrampRec = pTrampRec->pNext;
5847 }
5848 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5849 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5850 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5851 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5852 }
5853 }
5854
5855failure:
5856 if (RT_FAILURE(rc))
5857 {
5858 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5859
5860 /* Remove the new inactive patch */
5861 rc = PATMR3RemovePatch(pVM, pInstrGC);
5862 AssertRC(rc);
5863
5864 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5865 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5866 Assert(fInserted); NOREF(fInserted);
5867
5868 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5869 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5870 AssertRC(rc2);
5871
5872 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5873 }
5874 return rc;
5875}
5876
5877/**
5878 * Find patch for privileged instruction at specified location
5879 *
5880 * @returns Patch structure pointer if found; else NULL
5881 * @param pVM Pointer to the VM.
5882 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5883 * @param fIncludeHints Include hinted patches or not
5884 *
5885 */
5886PPATCHINFO patmFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5887{
5888 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5889 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5890 if (pPatchRec)
5891 {
5892 if ( pPatchRec->patch.uState == PATCH_ENABLED
5893 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5894 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5895 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5896 {
5897 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5898 return &pPatchRec->patch;
5899 }
5900 else
5901 if ( fIncludeHints
5902 && pPatchRec->patch.uState == PATCH_DISABLED
5903 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5904 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5905 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5906 {
5907 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5908 return &pPatchRec->patch;
5909 }
5910 }
5911 return NULL;
5912}
5913
5914/**
5915 * Checks whether the GC address is inside a generated patch jump
5916 *
5917 * @returns true -> yes, false -> no
5918 * @param pVM Pointer to the VM.
5919 * @param pAddr Guest context address.
5920 * @param pPatchAddr Guest context patch address (if true).
5921 */
5922VMMR3_INT_DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5923{
5924 RTRCPTR addr;
5925 PPATCHINFO pPatch;
5926
5927 Assert(!HMIsEnabled(pVM));
5928 if (PATMIsEnabled(pVM) == false)
5929 return false;
5930
5931 if (pPatchAddr == NULL)
5932 pPatchAddr = &addr;
5933
5934 *pPatchAddr = 0;
5935
5936 pPatch = patmFindActivePatchByEntrypoint(pVM, pAddr);
5937 if (pPatch)
5938 *pPatchAddr = pPatch->pPrivInstrGC;
5939
5940 return *pPatchAddr == 0 ? false : true;
5941}
5942
5943/**
5944 * Remove patch for privileged instruction at specified location
5945 *
5946 * @returns VBox status code.
5947 * @param pVM Pointer to the VM.
5948 * @param pInstr Guest context point to privileged instruction
5949 *
5950 * @note returns failure if patching is not allowed or possible
5951 *
5952 */
5953VMMR3_INT_DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5954{
5955 PPATMPATCHREC pPatchRec;
5956
5957 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5958 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5959 if (pPatchRec)
5960 {
5961 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5962 if (rc == VWRN_PATCH_REMOVED)
5963 return VINF_SUCCESS;
5964
5965 return patmR3RemovePatch(pVM, pPatchRec, false);
5966 }
5967 AssertFailed();
5968 return VERR_PATCH_NOT_FOUND;
5969}
5970
5971/**
5972 * Mark patch as dirty
5973 *
5974 * @returns VBox status code.
5975 * @param pVM Pointer to the VM.
5976 * @param pPatch Patch record
5977 *
5978 * @note returns failure if patching is not allowed or possible
5979 *
5980 */
5981static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5982{
5983 if (pPatch->pPatchBlockOffset)
5984 {
5985 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5986 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5987 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5988 }
5989
5990 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5991 /* Put back the replaced instruction. */
5992 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5993 if (rc == VWRN_PATCH_REMOVED)
5994 return VINF_SUCCESS;
5995
5996 /* Note: we don't restore patch pages for patches that are not enabled! */
5997 /* Note: be careful when changing this behaviour!! */
5998
5999 /* The patch pages are no longer marked for self-modifying code detection */
6000 if (pPatch->flags & PATMFL_CODE_MONITORED)
6001 {
6002 rc = patmRemovePatchPages(pVM, pPatch);
6003 AssertRCReturn(rc, rc);
6004 }
6005 pPatch->uState = PATCH_DIRTY;
6006
6007 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
6008 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6009
6010 return VINF_SUCCESS;
6011}
6012
6013/**
6014 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
6015 *
6016 * @returns VBox status code.
6017 * @param pVM Pointer to the VM.
6018 * @param pPatch Patch block structure pointer
6019 * @param pPatchGC GC address in patch block
6020 */
6021RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
6022{
6023 Assert(pPatch->Patch2GuestAddrTree);
6024 /* Get the closest record from below. */
6025 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6026 if (pPatchToGuestRec)
6027 return pPatchToGuestRec->pOrgInstrGC;
6028
6029 return 0;
6030}
6031
6032/**
6033 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6034 *
6035 * @returns corresponding GC pointer in patch block
6036 * @param pVM Pointer to the VM.
6037 * @param pPatch Current patch block pointer
6038 * @param pInstrGC Guest context pointer to privileged instruction
6039 *
6040 */
6041RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6042{
6043 if (pPatch->Guest2PatchAddrTree)
6044 {
6045 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
6046 if (pGuestToPatchRec)
6047 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6048 }
6049
6050 return 0;
6051}
6052
6053/**
6054 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6055 *
6056 * @returns corresponding GC pointer in patch block
6057 * @param pVM Pointer to the VM.
6058 * @param pInstrGC Guest context pointer to privileged instruction
6059 */
6060static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
6061{
6062 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
6063 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
6064 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
6065 return NIL_RTRCPTR;
6066}
6067
6068/**
6069 * Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no
6070 * identical match)
6071 *
6072 * @returns corresponding GC pointer in patch block
6073 * @param pVM Pointer to the VM.
6074 * @param pPatch Current patch block pointer
6075 * @param pInstrGC Guest context pointer to privileged instruction
6076 *
6077 */
6078RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6079{
6080 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
6081 if (pGuestToPatchRec)
6082 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6083 return NIL_RTRCPTR;
6084}
6085
6086/**
6087 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
6088 *
6089 * @returns original GC instruction pointer or 0 if not found
6090 * @param pVM Pointer to the VM.
6091 * @param pPatchGC GC address in patch block
6092 * @param pEnmState State of the translated address (out)
6093 *
6094 */
6095VMMR3_INT_DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
6096{
6097 PPATMPATCHREC pPatchRec;
6098 void *pvPatchCoreOffset;
6099 RTRCPTR pPrivInstrGC;
6100
6101 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
6102 Assert(!HMIsEnabled(pVM));
6103 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6104 if (pvPatchCoreOffset == 0)
6105 {
6106 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
6107 return 0;
6108 }
6109 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6110 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
6111 if (pEnmState)
6112 {
6113 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
6114 || pPatchRec->patch.uState == PATCH_DIRTY
6115 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
6116 || pPatchRec->patch.uState == PATCH_UNUSABLE),
6117 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
6118
6119 if ( !pPrivInstrGC
6120 || pPatchRec->patch.uState == PATCH_UNUSABLE
6121 || pPatchRec->patch.uState == PATCH_REFUSED)
6122 {
6123 pPrivInstrGC = 0;
6124 *pEnmState = PATMTRANS_FAILED;
6125 }
6126 else
6127 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
6128 {
6129 *pEnmState = PATMTRANS_INHIBITIRQ;
6130 }
6131 else
6132 if ( pPatchRec->patch.uState == PATCH_ENABLED
6133 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
6134 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
6135 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
6136 {
6137 *pEnmState = PATMTRANS_OVERWRITTEN;
6138 }
6139 else
6140 if (patmFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
6141 {
6142 *pEnmState = PATMTRANS_OVERWRITTEN;
6143 }
6144 else
6145 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
6146 {
6147 *pEnmState = PATMTRANS_PATCHSTART;
6148 }
6149 else
6150 *pEnmState = PATMTRANS_SAFE;
6151 }
6152 return pPrivInstrGC;
6153}
6154
6155/**
6156 * Returns the GC pointer of the patch for the specified GC address
6157 *
6158 * @returns VBox status code.
6159 * @param pVM Pointer to the VM.
6160 * @param pAddrGC Guest context address
6161 */
6162VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
6163{
6164 PPATMPATCHREC pPatchRec;
6165
6166 Assert(!HMIsEnabled(pVM));
6167
6168 /* Find the patch record. */
6169 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
6170 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
6171 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
6172 return PATCHCODE_PTR_GC(&pPatchRec->patch);
6173 return NIL_RTRCPTR;
6174}
6175
6176/**
6177 * Attempt to recover dirty instructions
6178 *
6179 * @returns VBox status code.
6180 * @param pVM Pointer to the VM.
6181 * @param pCtx Pointer to the guest CPU context.
6182 * @param pPatch Patch record.
6183 * @param pPatchToGuestRec Patch to guest address record.
6184 * @param pEip GC pointer of trapping instruction.
6185 */
6186static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
6187{
6188 DISCPUSTATE CpuOld, CpuNew;
6189 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
6190 int rc;
6191 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
6192 uint32_t cbDirty;
6193 PRECPATCHTOGUEST pRec;
6194 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
6195 PVMCPU pVCpu = VMMGetCpu0(pVM);
6196 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
6197
6198 pRec = pPatchToGuestRec;
6199 pCurInstrGC = pOrgInstrGC;
6200 pCurPatchInstrGC = pEip;
6201 cbDirty = 0;
6202 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6203
6204 /* Find all adjacent dirty instructions */
6205 while (true)
6206 {
6207 if (pRec->fJumpTarget)
6208 {
6209 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6210 pRec->fDirty = false;
6211 return VERR_PATCHING_REFUSED;
6212 }
6213
6214 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6215 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6216 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6217
6218 /* Only harmless instructions are acceptable. */
6219 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6220 if ( RT_FAILURE(rc)
6221 || !(CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS))
6222 {
6223 if (RT_SUCCESS(rc))
6224 cbDirty += CpuOld.cbInstr;
6225 else
6226 if (!cbDirty)
6227 cbDirty = 1;
6228 break;
6229 }
6230
6231#ifdef DEBUG
6232 char szBuf[256];
6233 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6234 szBuf, sizeof(szBuf), NULL);
6235 Log(("DIRTY: %s\n", szBuf));
6236#endif
6237 /* Mark as clean; if we fail we'll let it always fault. */
6238 pRec->fDirty = false;
6239
6240 /* Remove old lookup record. */
6241 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6242 pPatchToGuestRec = NULL;
6243
6244 pCurPatchInstrGC += CpuOld.cbInstr;
6245 cbDirty += CpuOld.cbInstr;
6246
6247 /* Let's see if there's another dirty instruction right after. */
6248 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6249 if (!pRec || !pRec->fDirty)
6250 break; /* no more dirty instructions */
6251
6252 /* In case of complex instructions the next guest instruction could be quite far off. */
6253 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6254 }
6255
6256 if ( RT_SUCCESS(rc)
6257 && (CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS)
6258 )
6259 {
6260 uint32_t cbLeft;
6261
6262 pCurPatchInstrHC = pPatchInstrHC;
6263 pCurPatchInstrGC = pEip;
6264 cbLeft = cbDirty;
6265
6266 while (cbLeft && RT_SUCCESS(rc))
6267 {
6268 bool fValidInstr;
6269
6270 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6271
6272 fValidInstr = !!(CpuNew.pCurInstr->fOpType & DISOPTYPE_HARMLESS);
6273 if ( !fValidInstr
6274 && (CpuNew.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
6275 )
6276 {
6277 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6278
6279 if ( pTargetGC >= pOrgInstrGC
6280 && pTargetGC <= pOrgInstrGC + cbDirty
6281 )
6282 {
6283 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6284 fValidInstr = true;
6285 }
6286 }
6287
6288 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6289 if ( rc == VINF_SUCCESS
6290 && CpuNew.cbInstr <= cbLeft /* must still fit */
6291 && fValidInstr
6292 )
6293 {
6294#ifdef DEBUG
6295 char szBuf[256];
6296 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6297 szBuf, sizeof(szBuf), NULL);
6298 Log(("NEW: %s\n", szBuf));
6299#endif
6300
6301 /* Copy the new instruction. */
6302 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.cbInstr);
6303 AssertRC(rc);
6304
6305 /* Add a new lookup record for the duplicated instruction. */
6306 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6307 }
6308 else
6309 {
6310#ifdef DEBUG
6311 char szBuf[256];
6312 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6313 szBuf, sizeof(szBuf), NULL);
6314 Log(("NEW: %s (FAILED)\n", szBuf));
6315#endif
6316 /* Restore the old lookup record for the duplicated instruction. */
6317 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6318
6319 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6320 rc = VERR_PATCHING_REFUSED;
6321 break;
6322 }
6323 pCurInstrGC += CpuNew.cbInstr;
6324 pCurPatchInstrHC += CpuNew.cbInstr;
6325 pCurPatchInstrGC += CpuNew.cbInstr;
6326 cbLeft -= CpuNew.cbInstr;
6327
6328 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6329 if (!cbLeft)
6330 {
6331 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6332 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6333 {
6334 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6335 if (pRec)
6336 {
6337 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6338 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6339
6340 Assert(!pRec->fDirty);
6341
6342 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6343 if (cbFiller >= SIZEOF_NEARJUMP32)
6344 {
6345 pPatchFillHC[0] = 0xE9;
6346 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6347#ifdef DEBUG
6348 char szBuf[256];
6349 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC,
6350 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6351 Log(("FILL: %s\n", szBuf));
6352#endif
6353 }
6354 else
6355 {
6356 for (unsigned i = 0; i < cbFiller; i++)
6357 {
6358 pPatchFillHC[i] = 0x90; /* NOP */
6359#ifdef DEBUG
6360 char szBuf[256];
6361 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC + i,
6362 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6363 Log(("FILL: %s\n", szBuf));
6364#endif
6365 }
6366 }
6367 }
6368 }
6369 }
6370 }
6371 }
6372 else
6373 rc = VERR_PATCHING_REFUSED;
6374
6375 if (RT_SUCCESS(rc))
6376 {
6377 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6378 }
6379 else
6380 {
6381 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6382 Assert(cbDirty);
6383
6384 /* Mark the whole instruction stream with breakpoints. */
6385 if (cbDirty)
6386 memset(pPatchInstrHC, 0xCC, cbDirty);
6387
6388 if ( pVM->patm.s.fOutOfMemory == false
6389 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6390 {
6391 rc = patmR3RefreshPatch(pVM, pPatch);
6392 if (RT_FAILURE(rc))
6393 {
6394 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6395 }
6396 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6397 rc = VERR_PATCHING_REFUSED;
6398 }
6399 }
6400 return rc;
6401}
6402
6403/**
6404 * Handle trap inside patch code
6405 *
6406 * @returns VBox status code.
6407 * @param pVM Pointer to the VM.
6408 * @param pCtx Pointer to the guest CPU context.
6409 * @param pEip GC pointer of trapping instruction.
6410 * @param ppNewEip GC pointer to new instruction.
6411 */
6412VMMR3_INT_DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6413{
6414 PPATMPATCHREC pPatch = 0;
6415 void *pvPatchCoreOffset;
6416 RTRCUINTPTR offset;
6417 RTRCPTR pNewEip;
6418 int rc ;
6419 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6420 PVMCPU pVCpu = VMMGetCpu0(pVM);
6421
6422 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6423 Assert(pVM->cCpus == 1);
6424
6425 pNewEip = 0;
6426 *ppNewEip = 0;
6427
6428 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6429
6430 /* Find the patch record. */
6431 /* Note: there might not be a patch to guest translation record (global function) */
6432 offset = pEip - pVM->patm.s.pPatchMemGC;
6433 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6434 if (pvPatchCoreOffset)
6435 {
6436 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6437
6438 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6439
6440 if (pPatch->patch.uState == PATCH_DIRTY)
6441 {
6442 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6443 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6444 {
6445 /* Function duplication patches set fPIF to 1 on entry */
6446 pVM->patm.s.pGCStateHC->fPIF = 1;
6447 }
6448 }
6449 else
6450 if (pPatch->patch.uState == PATCH_DISABLED)
6451 {
6452 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6453 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6454 {
6455 /* Function duplication patches set fPIF to 1 on entry */
6456 pVM->patm.s.pGCStateHC->fPIF = 1;
6457 }
6458 }
6459 else
6460 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6461 {
6462 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6463
6464 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6465 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6466 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6467 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6468 }
6469
6470 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6471 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6472
6473 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6474 pPatch->patch.cTraps++;
6475 PATM_STAT_FAULT_INC(&pPatch->patch);
6476 }
6477 else
6478 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6479
6480 /* Check if we were interrupted in PATM generated instruction code. */
6481 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6482 {
6483 DISCPUSTATE Cpu;
6484 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6485 AssertRC(rc);
6486
6487 if ( rc == VINF_SUCCESS
6488 && ( Cpu.pCurInstr->uOpcode == OP_PUSHF
6489 || Cpu.pCurInstr->uOpcode == OP_PUSH
6490 || Cpu.pCurInstr->uOpcode == OP_CALL)
6491 )
6492 {
6493 uint64_t fFlags;
6494
6495 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6496
6497 if (Cpu.pCurInstr->uOpcode == OP_PUSH)
6498 {
6499 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6500 if ( rc == VINF_SUCCESS
6501 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6502 {
6503 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6504
6505 /* Reset the PATM stack. */
6506 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6507
6508 pVM->patm.s.pGCStateHC->fPIF = 1;
6509
6510 Log(("Faulting push -> go back to the original instruction\n"));
6511
6512 /* continue at the original instruction */
6513 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6514 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6515 return VINF_SUCCESS;
6516 }
6517 }
6518
6519 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6520 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6521 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6522 if (rc == VINF_SUCCESS)
6523 {
6524 /* The guest page *must* be present. */
6525 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6526 if ( rc == VINF_SUCCESS
6527 && (fFlags & X86_PTE_P))
6528 {
6529 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6530 return VINF_PATCH_CONTINUE;
6531 }
6532 }
6533 }
6534 else
6535 if (pPatch->patch.pPrivInstrGC == pNewEip)
6536 {
6537 /* Invalidated patch or first instruction overwritten.
6538 * We can ignore the fPIF state in this case.
6539 */
6540 /* Reset the PATM stack. */
6541 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6542
6543 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6544
6545 pVM->patm.s.pGCStateHC->fPIF = 1;
6546
6547 /* continue at the original instruction */
6548 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6549 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6550 return VINF_SUCCESS;
6551 }
6552
6553 char szBuf[256];
6554 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6555
6556 /* Very bad. We crashed in emitted code. Probably stack? */
6557 if (pPatch)
6558 {
6559 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6560 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6561 }
6562 else
6563 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6564 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6565 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6566 }
6567
6568 /* From here on, we must have a valid patch to guest translation. */
6569 if (pvPatchCoreOffset == 0)
6570 {
6571 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6572 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6573 return VERR_PATCH_NOT_FOUND;
6574 }
6575
6576 /* Take care of dirty/changed instructions. */
6577 if (pPatchToGuestRec->fDirty)
6578 {
6579 Assert(pPatchToGuestRec->Core.Key == offset);
6580 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6581
6582 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6583 if (RT_SUCCESS(rc))
6584 {
6585 /* Retry the current instruction. */
6586 pNewEip = pEip;
6587 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6588 }
6589 else
6590 {
6591 /* Reset the PATM stack. */
6592 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6593
6594 rc = VINF_SUCCESS; /* Continue at original instruction. */
6595 }
6596
6597 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6598 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6599 return rc;
6600 }
6601
6602#ifdef VBOX_STRICT
6603 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6604 {
6605 DISCPUSTATE cpu;
6606 bool disret;
6607 uint32_t cbInstr;
6608 PATMP2GLOOKUPREC cacheRec;
6609 RT_ZERO(cacheRec);
6610 cacheRec.pPatch = &pPatch->patch;
6611
6612 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6613 &cpu, &cbInstr);
6614 if (cacheRec.Lock.pvMap)
6615 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6616
6617 if (disret && cpu.pCurInstr->uOpcode == OP_RETN)
6618 {
6619 RTRCPTR retaddr;
6620 PCPUMCTX pCtx2;
6621
6622 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6623
6624 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6625 AssertRC(rc);
6626
6627 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6628 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6629 }
6630 }
6631#endif
6632
6633 /* Return original address, correct by subtracting the CS base address. */
6634 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6635
6636 /* Reset the PATM stack. */
6637 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6638
6639 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6640 {
6641 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6642 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6643#ifdef VBOX_STRICT
6644 DISCPUSTATE cpu;
6645 bool disret;
6646 uint32_t cbInstr;
6647 PATMP2GLOOKUPREC cacheRec;
6648 RT_ZERO(cacheRec);
6649 cacheRec.pPatch = &pPatch->patch;
6650
6651 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
6652 &cpu, &cbInstr);
6653 if (cacheRec.Lock.pvMap)
6654 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6655
6656 if (disret && (cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_INT3))
6657 {
6658 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6659 &cpu, &cbInstr);
6660 if (cacheRec.Lock.pvMap)
6661 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6662
6663 Assert(cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_IRET);
6664 }
6665#endif
6666 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6667 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6668 }
6669
6670 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6671 DBGFR3_DISAS_INSTR_LOG(pVCpu, pCtx->cs.Sel, pNewEip, "PATCHRET: ");
6672 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6673 {
6674 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6675 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6676 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6677 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6678 return VERR_PATCH_DISABLED;
6679 }
6680
6681#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6682 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6683 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6684 {
6685 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6686 //we are only wasting time, back out the patch
6687 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6688 pTrapRec->pNextPatchInstr = 0;
6689 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6690 return VERR_PATCH_DISABLED;
6691 }
6692#endif
6693
6694 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6695 return VINF_SUCCESS;
6696}
6697
6698
6699/**
6700 * Handle page-fault in monitored page
6701 *
6702 * @returns VBox status code.
6703 * @param pVM Pointer to the VM.
6704 */
6705VMMR3_INT_DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6706{
6707 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6708
6709 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6710 addr &= PAGE_BASE_GC_MASK;
6711
6712 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6713 AssertRC(rc); NOREF(rc);
6714
6715 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6716 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6717 {
6718 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6719 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6720 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6721 if (rc == VWRN_PATCH_REMOVED)
6722 return VINF_SUCCESS;
6723
6724 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6725
6726 if (addr == pPatchRec->patch.pPrivInstrGC)
6727 addr++;
6728 }
6729
6730 for(;;)
6731 {
6732 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6733
6734 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6735 break;
6736
6737 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6738 {
6739 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6740 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6741 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6742 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6743 }
6744 addr = pPatchRec->patch.pPrivInstrGC + 1;
6745 }
6746
6747 pVM->patm.s.pvFaultMonitor = 0;
6748 return VINF_SUCCESS;
6749}
6750
6751
6752#ifdef VBOX_WITH_STATISTICS
6753
6754static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6755{
6756 if (pPatch->flags & PATMFL_SYSENTER)
6757 {
6758 return "SYSENT";
6759 }
6760 else
6761 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6762 {
6763 static char szTrap[16];
6764 uint32_t iGate;
6765
6766 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6767 if (iGate < 256)
6768 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6769 else
6770 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6771 return szTrap;
6772 }
6773 else
6774 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6775 return "DUPFUNC";
6776 else
6777 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6778 return "FUNCCALL";
6779 else
6780 if (pPatch->flags & PATMFL_TRAMPOLINE)
6781 return "TRAMP";
6782 else
6783 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6784}
6785
6786static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6787{
6788 NOREF(pVM);
6789 switch(pPatch->uState)
6790 {
6791 case PATCH_ENABLED:
6792 return "ENA";
6793 case PATCH_DISABLED:
6794 return "DIS";
6795 case PATCH_DIRTY:
6796 return "DIR";
6797 case PATCH_UNUSABLE:
6798 return "UNU";
6799 case PATCH_REFUSED:
6800 return "REF";
6801 case PATCH_DISABLE_PENDING:
6802 return "DIP";
6803 default:
6804 AssertFailed();
6805 return " ";
6806 }
6807}
6808
6809/**
6810 * Resets the sample.
6811 * @param pVM Pointer to the VM.
6812 * @param pvSample The sample registered using STAMR3RegisterCallback.
6813 */
6814static void patmResetStat(PVM pVM, void *pvSample)
6815{
6816 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6817 Assert(pPatch);
6818
6819 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6820 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6821}
6822
6823/**
6824 * Prints the sample into the buffer.
6825 *
6826 * @param pVM Pointer to the VM.
6827 * @param pvSample The sample registered using STAMR3RegisterCallback.
6828 * @param pszBuf The buffer to print into.
6829 * @param cchBuf The size of the buffer.
6830 */
6831static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6832{
6833 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6834 Assert(pPatch);
6835
6836 Assert(pPatch->uState != PATCH_REFUSED);
6837 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6838
6839 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6840 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6841 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6842}
6843
6844/**
6845 * Returns the GC address of the corresponding patch statistics counter
6846 *
6847 * @returns Stat address
6848 * @param pVM Pointer to the VM.
6849 * @param pPatch Patch structure
6850 */
6851RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6852{
6853 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6854 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6855}
6856
6857#endif /* VBOX_WITH_STATISTICS */
6858#ifdef VBOX_WITH_DEBUGGER
6859
6860/**
6861 * The '.patmoff' command.
6862 *
6863 * @returns VBox status.
6864 * @param pCmd Pointer to the command descriptor (as registered).
6865 * @param pCmdHlp Pointer to command helper functions.
6866 * @param pVM Pointer to the current VM (if any).
6867 * @param paArgs Pointer to (readonly) array of arguments.
6868 * @param cArgs Number of arguments in the array.
6869 */
6870static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6871{
6872 /*
6873 * Validate input.
6874 */
6875 NOREF(cArgs); NOREF(paArgs);
6876 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6877 PVM pVM = pUVM->pVM;
6878 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6879
6880 if (HMIsEnabled(pVM))
6881 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6882
6883 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6884 PATMR3AllowPatching(pVM->pUVM, false);
6885 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6886}
6887
6888/**
6889 * The '.patmon' command.
6890 *
6891 * @returns VBox status.
6892 * @param pCmd Pointer to the command descriptor (as registered).
6893 * @param pCmdHlp Pointer to command helper functions.
6894 * @param pVM Pointer to the current VM (if any).
6895 * @param paArgs Pointer to (readonly) array of arguments.
6896 * @param cArgs Number of arguments in the array.
6897 */
6898static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6899{
6900 /*
6901 * Validate input.
6902 */
6903 NOREF(cArgs); NOREF(paArgs);
6904 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6905 PVM pVM = pUVM->pVM;
6906 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6907
6908 if (HMIsEnabled(pVM))
6909 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6910
6911 PATMR3AllowPatching(pVM->pUVM, true);
6912 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6913 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6914}
6915
6916#endif /* VBOX_WITH_DEBUGGER */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette