VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 45714

Last change on this file since 45714 was 45620, checked in by vboxsync, 12 years ago

CSAM,PATM: Don't bother initializing anything if HMIsEnabled(). Also, don't allow the components to be enabled.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 261.8 KB
Line 
1/* $Id: PATM.cpp 45620 2013-04-18 20:07:14Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * @note Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2013 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/cpumdis.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/hm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/trpm.h>
35#include <VBox/vmm/cfgm.h>
36#include <VBox/param.h>
37#include <VBox/vmm/selm.h>
38#include <VBox/vmm/csam.h>
39#include <iprt/avl.h>
40#include "PATMInternal.h"
41#include "PATMPatch.h"
42#include <VBox/vmm/vm.h>
43#include <VBox/vmm/uvm.h>
44#include <VBox/dbg.h>
45#include <VBox/err.h>
46#include <VBox/log.h>
47#include <iprt/assert.h>
48#include <iprt/asm.h>
49#include <VBox/dis.h>
50#include <VBox/disopcode.h>
51#include "internal/pgm.h"
52
53#include <iprt/string.h>
54#include "PATMA.h"
55
56//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
57//#define PATM_DISABLE_ALL
58
59/**
60 * Refresh trampoline patch state.
61 */
62typedef struct PATMREFRESHPATCH
63{
64 /** Pointer to the VM structure. */
65 PVM pVM;
66 /** The trampoline patch record. */
67 PPATCHINFO pPatchTrampoline;
68 /** The new patch we want to jump to. */
69 PPATCHINFO pPatchRec;
70} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
71
72
73#define PATMREAD_RAWCODE 1 /* read code as-is */
74#define PATMREAD_ORGCODE 2 /* read original guest opcode bytes; not the patched bytes */
75#define PATMREAD_NOCHECK 4 /* don't check for patch conflicts */
76
77/*
78 * Private structure used during disassembly
79 */
80typedef struct
81{
82 PVM pVM;
83 PPATCHINFO pPatchInfo;
84 R3PTRTYPE(uint8_t *) pbInstrHC;
85 RTRCPTR pInstrGC;
86 uint32_t fReadFlags;
87} PATMDISASM, *PPATMDISASM;
88
89
90/*******************************************************************************
91* Internal Functions *
92*******************************************************************************/
93
94static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
95static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
96static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
97
98#ifdef LOG_ENABLED // keep gcc quiet
99static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
100#endif
101#ifdef VBOX_WITH_STATISTICS
102static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
103static void patmResetStat(PVM pVM, void *pvSample);
104static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
105#endif
106
107#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
108#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
109
110static int patmReinit(PVM pVM);
111static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
112static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC);
113static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch);
114
115#ifdef VBOX_WITH_DEBUGGER
116static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
117static FNDBGCCMD patmr3CmdOn;
118static FNDBGCCMD patmr3CmdOff;
119
120/** Command descriptors. */
121static const DBGCCMD g_aCmds[] =
122{
123 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
124 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
125 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
126};
127#endif
128
129/* Don't want to break saved states, so put it here as a global variable. */
130static unsigned int cIDTHandlersDisabled = 0;
131
132/**
133 * Initializes the PATM.
134 *
135 * @returns VBox status code.
136 * @param pVM Pointer to the VM.
137 */
138VMMR3_INT_DECL(int) PATMR3Init(PVM pVM)
139{
140 int rc;
141
142 /*
143 * We only need a saved state dummy loader if HM is enabled.
144 */
145 if (HMIsEnabled(pVM))
146 {
147 pVM->fPATMEnabled = false;
148 return SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, 0,
149 NULL, NULL, NULL,
150 NULL, NULL, NULL,
151 NULL, patmR3LoadDummy, NULL);
152 }
153
154 /*
155 * Raw-mode.
156 */
157 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
158
159 /* These values can't change as they are hardcoded in patch code (old saved states!) */
160 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
161 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
162 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
163 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
164
165 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
166 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
167
168 /* Allocate patch memory and GC patch state memory. */
169 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
170 /* Add another page in case the generated code is much larger than expected. */
171 /** @todo bad safety precaution */
172 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
173 if (RT_FAILURE(rc))
174 {
175 Log(("MMHyperAlloc failed with %Rrc\n", rc));
176 return rc;
177 }
178 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
179
180 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
181 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
182 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
183
184 /*
185 * Hypervisor memory for GC status data (read/write)
186 *
187 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
188 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
189 *
190 */
191 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
192 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
193 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
194
195 /* Hypervisor memory for patch statistics */
196 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
197 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
198
199 /* Memory for patch lookup trees. */
200 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
201 AssertRCReturn(rc, rc);
202 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
203
204#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
205 /* Check CFGM option. */
206 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
207 if (RT_FAILURE(rc))
208# ifdef PATM_DISABLE_ALL
209 pVM->fPATMEnabled = false;
210# else
211 pVM->fPATMEnabled = true;
212# endif
213#endif
214
215 rc = patmReinit(pVM);
216 AssertRC(rc);
217 if (RT_FAILURE(rc))
218 return rc;
219
220 /*
221 * Register save and load state notifiers.
222 */
223 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
224 NULL, NULL, NULL,
225 NULL, patmR3Save, NULL,
226 NULL, patmR3Load, NULL);
227 AssertRCReturn(rc, rc);
228
229#ifdef VBOX_WITH_DEBUGGER
230 /*
231 * Debugger commands.
232 */
233 static bool s_fRegisteredCmds = false;
234 if (!s_fRegisteredCmds)
235 {
236 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
237 if (RT_SUCCESS(rc2))
238 s_fRegisteredCmds = true;
239 }
240#endif
241
242#ifdef VBOX_WITH_STATISTICS
243 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
244 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
245 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
246 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
247 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
248 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
249 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
250 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
251
252 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
253 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
254
255 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
256 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
257 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
258
259 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
260 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
261 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
262 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
263 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
264
265 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
266 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
267
268 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
269 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
270
271 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
272 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
273 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
274
275 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
276 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
277 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
278
279 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
280 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
281
282 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
283 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
284 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
285 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
286
287 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
288 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
289
290 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
291 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
292
293 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
294 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
295 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
296
297 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
298 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
299 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
300 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
301
302 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
303 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
304 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
305 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
306 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
307
308 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
309#endif /* VBOX_WITH_STATISTICS */
310
311 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
312 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
313 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
314 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
315 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
316 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
317 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
318 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
319
320 return rc;
321}
322
323/**
324 * Finalizes HMA page attributes.
325 *
326 * @returns VBox status code.
327 * @param pVM Pointer to the VM.
328 */
329VMMR3_INT_DECL(int) PATMR3InitFinalize(PVM pVM)
330{
331 if (HMIsEnabled(pVM))
332 return VINF_SUCCESS;
333
334 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
335 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
336 if (RT_FAILURE(rc))
337 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
338
339 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
340 if (RT_FAILURE(rc))
341 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
342
343 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
344 if (RT_FAILURE(rc))
345 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
346
347 return rc;
348}
349
350/**
351 * (Re)initializes PATM
352 *
353 * @param pVM The VM.
354 */
355static int patmReinit(PVM pVM)
356{
357 int rc;
358
359 /*
360 * Assert alignment and sizes.
361 */
362 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
363 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
364
365 /*
366 * Setup any fixed pointers and offsets.
367 */
368 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
369
370#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
371#ifndef PATM_DISABLE_ALL
372 pVM->fPATMEnabled = true;
373#endif
374#endif
375
376 Assert(pVM->patm.s.pGCStateHC);
377 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
378 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
379
380 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
381 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
382
383 Assert(pVM->patm.s.pGCStackHC);
384 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
385 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
386 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
387 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
388
389 Assert(pVM->patm.s.pStatsHC);
390 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
391 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
392
393 Assert(pVM->patm.s.pPatchMemHC);
394 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
395 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
396 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
397
398 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
399 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
400
401 Assert(pVM->patm.s.PatchLookupTreeHC);
402 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
403
404 /*
405 * (Re)Initialize PATM structure
406 */
407 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
408 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
409 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
410 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
411 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
412 pVM->patm.s.pvFaultMonitor = 0;
413 pVM->patm.s.deltaReloc = 0;
414
415 /* Lowest and highest patched instruction */
416 pVM->patm.s.pPatchedInstrGCLowest = ~0;
417 pVM->patm.s.pPatchedInstrGCHighest = 0;
418
419 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
420 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
421 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
422
423 pVM->patm.s.pfnSysEnterPatchGC = 0;
424 pVM->patm.s.pfnSysEnterGC = 0;
425
426 pVM->patm.s.fOutOfMemory = false;
427
428 pVM->patm.s.pfnHelperCallGC = 0;
429
430 /* Generate all global functions to be used by future patches. */
431 /* We generate a fake patch in order to use the existing code for relocation. */
432 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
433 if (RT_FAILURE(rc))
434 {
435 Log(("Out of memory!!!!\n"));
436 return VERR_NO_MEMORY;
437 }
438 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
439 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
440 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
441
442 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
443 AssertRC(rc);
444
445 /* Update free pointer in patch memory. */
446 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
447 /* Round to next 8 byte boundary. */
448 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
449 return rc;
450}
451
452
453/**
454 * Applies relocations to data and code managed by this
455 * component. This function will be called at init and
456 * whenever the VMM need to relocate it self inside the GC.
457 *
458 * The PATM will update the addresses used by the switcher.
459 *
460 * @param pVM The VM.
461 */
462VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM)
463{
464 if (HMIsEnabled(pVM))
465 return;
466
467 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
468 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
469
470 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
471 if (delta)
472 {
473 PCPUMCTX pCtx;
474
475 /* Update CPUMCTX guest context pointer. */
476 pVM->patm.s.pCPUMCtxGC += delta;
477
478 pVM->patm.s.deltaReloc = delta;
479
480 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
481
482 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
483
484 /* If we are running patch code right now, then also adjust EIP. */
485 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
486 pCtx->eip += delta;
487
488 pVM->patm.s.pGCStateGC = GCPtrNew;
489 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
490
491 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
492
493 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
494
495 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
496
497 if (pVM->patm.s.pfnSysEnterPatchGC)
498 pVM->patm.s.pfnSysEnterPatchGC += delta;
499
500 /* Deal with the global patch functions. */
501 pVM->patm.s.pfnHelperCallGC += delta;
502 pVM->patm.s.pfnHelperRetGC += delta;
503 pVM->patm.s.pfnHelperIretGC += delta;
504 pVM->patm.s.pfnHelperJumpGC += delta;
505
506 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
507 }
508}
509
510
511/**
512 * Terminates the PATM.
513 *
514 * Termination means cleaning up and freeing all resources,
515 * the VM it self is at this point powered off or suspended.
516 *
517 * @returns VBox status code.
518 * @param pVM Pointer to the VM.
519 */
520VMMR3_INT_DECL(int) PATMR3Term(PVM pVM)
521{
522 if (HMIsEnabled(pVM))
523 return VINF_SUCCESS;
524
525 /* Memory was all allocated from the two MM heaps and requires no freeing. */
526 return VINF_SUCCESS;
527}
528
529
530/**
531 * PATM reset callback.
532 *
533 * @returns VBox status code.
534 * @param pVM The VM which is reset.
535 */
536VMMR3_INT_DECL(int) PATMR3Reset(PVM pVM)
537{
538 Log(("PATMR3Reset\n"));
539 if (HMIsEnabled(pVM))
540 return VINF_SUCCESS;
541
542 /* Free all patches. */
543 for (;;)
544 {
545 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
546 if (pPatchRec)
547 patmR3RemovePatch(pVM, pPatchRec, true);
548 else
549 break;
550 }
551 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
552 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
553 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
554 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
555
556 int rc = patmReinit(pVM);
557 if (RT_SUCCESS(rc))
558 rc = PATMR3InitFinalize(pVM); /* paranoia */
559
560 return rc;
561}
562
563/**
564 * @callback_method_impl{FNDISREADBYTES}
565 */
566static DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
567{
568 PATMDISASM *pDisInfo = (PATMDISASM *)pDis->pvUser;
569
570/** @todo change this to read more! */
571 /*
572 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
573 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
574 */
575 /** @todo could change in the future! */
576 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
577 {
578 size_t cbRead = cbMaxRead;
579 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
580 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
581 if (RT_SUCCESS(rc))
582 {
583 if (cbRead >= cbMinRead)
584 {
585 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
586 return VINF_SUCCESS;
587 }
588
589 cbMinRead -= (uint8_t)cbRead;
590 cbMaxRead -= (uint8_t)cbRead;
591 offInstr += (uint8_t)cbRead;
592 uSrcAddr += cbRead;
593 }
594
595#ifdef VBOX_STRICT
596 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
597 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
598 {
599 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr, NULL) == false);
600 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr + cbMinRead-1, NULL) == false);
601 }
602#endif
603 }
604
605 int rc = VINF_SUCCESS;
606 RTGCPTR32 uSrcAddr = (RTGCPTR32)pDis->uInstrAddr + offInstr;
607 if ( !pDisInfo->pbInstrHC
608 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(uSrcAddr + cbMinRead - 1)
609 && !PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr)))
610 {
611 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr));
612 rc = PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
613 offInstr += cbMinRead;
614 }
615 else
616 {
617 /*
618 * pbInstrHC is the base address; adjust according to the GC pointer.
619 *
620 * Try read the max number of bytes here. Since the disassembler only
621 * ever uses these bytes for the current instruction, it doesn't matter
622 * much if we accidentally read the start of the next instruction even
623 * if it happens to be a patch jump or int3.
624 */
625 uint8_t const *pbInstrHC = pDisInfo->pbInstrHC; AssertPtr(pbInstrHC);
626 pbInstrHC += uSrcAddr - pDisInfo->pInstrGC;
627
628 size_t cbMaxRead1 = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
629 size_t cbMaxRead2 = PAGE_SIZE - ((uintptr_t)pbInstrHC & PAGE_OFFSET_MASK);
630 size_t cbToRead = RT_MIN(cbMaxRead1, RT_MAX(cbMaxRead2, cbMinRead));
631 if (cbToRead > cbMaxRead)
632 cbToRead = cbMaxRead;
633
634 memcpy(&pDis->abInstr[offInstr], pbInstrHC, cbToRead);
635 offInstr += (uint8_t)cbToRead;
636 }
637
638 pDis->cbCachedInstr = offInstr;
639 return rc;
640}
641
642
643DECLINLINE(bool) patmR3DisInstrToStr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
644 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
645{
646 PATMDISASM disinfo;
647 disinfo.pVM = pVM;
648 disinfo.pPatchInfo = pPatch;
649 disinfo.pbInstrHC = pbInstrHC;
650 disinfo.pInstrGC = InstrGCPtr32;
651 disinfo.fReadFlags = fReadFlags;
652 return RT_SUCCESS(DISInstrToStrWithReader(InstrGCPtr32,
653 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
654 patmReadBytes, &disinfo,
655 pCpu, pcbInstr, pszOutput, cbOutput));
656}
657
658
659DECLINLINE(bool) patmR3DisInstr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
660 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
661{
662 PATMDISASM disinfo;
663 disinfo.pVM = pVM;
664 disinfo.pPatchInfo = pPatch;
665 disinfo.pbInstrHC = pbInstrHC;
666 disinfo.pInstrGC = InstrGCPtr32;
667 disinfo.fReadFlags = fReadFlags;
668 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32,
669 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
670 patmReadBytes, &disinfo,
671 pCpu, pcbInstr));
672}
673
674
675DECLINLINE(bool) patmR3DisInstrNoStrOpMode(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC,
676 uint32_t fReadFlags,
677 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
678{
679 PATMDISASM disinfo;
680 disinfo.pVM = pVM;
681 disinfo.pPatchInfo = pPatch;
682 disinfo.pbInstrHC = pbInstrHC;
683 disinfo.pInstrGC = InstrGCPtr32;
684 disinfo.fReadFlags = fReadFlags;
685 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32, pPatch->uOpMode, patmReadBytes, &disinfo,
686 pCpu, pcbInstr));
687}
688
689#ifdef LOG_ENABLED
690# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
691 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_ORGCODE, a_szComment, " patch:")
692# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
693 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_RAWCODE, a_szComment, " patch:")
694
695# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) \
696 do { \
697 if (LogIsEnabled()) \
698 patmLogRawPatchInstr(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2); \
699 } while (0)
700
701static void patmLogRawPatchInstr(PVM pVM, PPATCHINFO pPatch, uint32_t fFlags,
702 const char *pszComment1, const char *pszComment2)
703{
704 DISCPUSTATE DisState;
705 char szOutput[128];
706 szOutput[0] = '\0';
707 patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC, NULL, fFlags,
708 &DisState, NULL, szOutput, sizeof(szOutput));
709 Log(("%s%s %s", pszComment1, pszComment2, szOutput));
710}
711
712#else
713# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
714# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
715# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) do { } while (0)
716#endif
717
718
719/**
720 * Callback function for RTAvloU32DoWithAll
721 *
722 * Updates all fixups in the patches
723 *
724 * @returns VBox status code.
725 * @param pNode Current node
726 * @param pParam Pointer to the VM.
727 */
728static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
729{
730 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
731 PVM pVM = (PVM)pParam;
732 RTRCINTPTR delta;
733 int rc;
734
735 /* Nothing to do if the patch is not active. */
736 if (pPatch->patch.uState == PATCH_REFUSED)
737 return 0;
738
739 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
740 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Org patch jump:", "");
741
742 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
743 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
744
745 /*
746 * Apply fixups
747 */
748 PRELOCREC pRec = 0;
749 AVLPVKEY key = 0;
750
751 while (true)
752 {
753 /* Get the record that's closest from above */
754 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
755 if (pRec == 0)
756 break;
757
758 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
759
760 switch (pRec->uType)
761 {
762 case FIXUP_ABSOLUTE:
763 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
764 if ( !pRec->pSource
765 || PATMIsPatchGCAddr(pVM, pRec->pSource))
766 {
767 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
768 }
769 else
770 {
771 uint8_t curInstr[15];
772 uint8_t oldInstr[15];
773 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
774
775 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
776
777 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
778 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
779
780 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
781 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
782
783 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
784
785 if ( rc == VERR_PAGE_NOT_PRESENT
786 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
787 {
788 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
789
790 Log(("PATM: Patch page not present -> check later!\n"));
791 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
792 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
793 }
794 else
795 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
796 {
797 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
798 /*
799 * Disable patch; this is not a good solution
800 */
801 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
802 pPatch->patch.uState = PATCH_DISABLED;
803 }
804 else
805 if (RT_SUCCESS(rc))
806 {
807 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
808 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
809 AssertRC(rc);
810 }
811 }
812 break;
813
814 case FIXUP_REL_JMPTOPATCH:
815 {
816 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
817
818 if ( pPatch->patch.uState == PATCH_ENABLED
819 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
820 {
821 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
822 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
823 RTRCPTR pJumpOffGC;
824 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
825 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
826
827#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
828 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
829#else
830 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
831#endif
832
833 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
834#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
835 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
836 {
837 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
838
839 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
840 oldJump[0] = pPatch->patch.aPrivInstr[0];
841 oldJump[1] = pPatch->patch.aPrivInstr[1];
842 *(RTRCUINTPTR *)&oldJump[2] = displOld;
843 }
844 else
845#endif
846 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
847 {
848 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
849 oldJump[0] = 0xE9;
850 *(RTRCUINTPTR *)&oldJump[1] = displOld;
851 }
852 else
853 {
854 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
855 continue; //this should never happen!!
856 }
857 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
858
859 /*
860 * Read old patch jump and compare it to the one we previously installed
861 */
862 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
863 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
864
865 if ( rc == VERR_PAGE_NOT_PRESENT
866 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
867 {
868 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
869
870 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
871 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
872 }
873 else
874 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
875 {
876 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
877 /*
878 * Disable patch; this is not a good solution
879 */
880 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
881 pPatch->patch.uState = PATCH_DISABLED;
882 }
883 else
884 if (RT_SUCCESS(rc))
885 {
886 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
887 AssertRC(rc);
888 }
889 else
890 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
891 }
892 else
893 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
894
895 pRec->pDest = pTarget;
896 break;
897 }
898
899 case FIXUP_REL_JMPTOGUEST:
900 {
901 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
902 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
903
904 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
905 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
906 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
907 pRec->pSource = pSource;
908 break;
909 }
910
911 default:
912 AssertMsg(0, ("Invalid fixup type!!\n"));
913 return VERR_INVALID_PARAMETER;
914 }
915 }
916
917 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
918 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Rel patch jump:", "");
919 return 0;
920}
921
922/**
923 * \#PF Handler callback for virtual access handler ranges.
924 *
925 * Important to realize that a physical page in a range can have aliases, and
926 * for ALL and WRITE handlers these will also trigger.
927 *
928 * @returns VINF_SUCCESS if the handler have carried out the operation.
929 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
930 * @param pVM Pointer to the VM.
931 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
932 * @param pvPtr The HC mapping of that address.
933 * @param pvBuf What the guest is reading/writing.
934 * @param cbBuf How much it's reading/writing.
935 * @param enmAccessType The access type.
936 * @param pvUser User argument.
937 */
938DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
939 PGMACCESSTYPE enmAccessType, void *pvUser)
940{
941 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
942 NOREF(pvPtr); NOREF(pvBuf); NOREF(cbBuf); NOREF(pvUser);
943
944 /** @todo could be the wrong virtual address (alias) */
945 pVM->patm.s.pvFaultMonitor = GCPtr;
946 PATMR3HandleMonitoredPage(pVM);
947 return VINF_PGM_HANDLER_DO_DEFAULT;
948}
949
950#ifdef VBOX_WITH_DEBUGGER
951
952/**
953 * Callback function for RTAvloU32DoWithAll
954 *
955 * Enables the patch that's being enumerated
956 *
957 * @returns 0 (continue enumeration).
958 * @param pNode Current node
959 * @param pVM Pointer to the VM.
960 */
961static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
962{
963 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
964
965 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
966 return 0;
967}
968
969
970/**
971 * Callback function for RTAvloU32DoWithAll
972 *
973 * Disables the patch that's being enumerated
974 *
975 * @returns 0 (continue enumeration).
976 * @param pNode Current node
977 * @param pVM Pointer to the VM.
978 */
979static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
980{
981 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
982
983 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
984 return 0;
985}
986
987#endif /* VBOX_WITH_DEBUGGER */
988#ifdef UNUSED_FUNCTIONS
989
990/**
991 * Returns the host context pointer and size of the patch memory block
992 *
993 * @returns Host context pointer.
994 * @param pVM Pointer to the VM.
995 * @param pcb Size of the patch memory block
996 * @internal
997 */
998VMMR3_INT_DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
999{
1000 AssertReturn(!HMIsEnabled(pVM), NULL);
1001 if (pcb)
1002 *pcb = pVM->patm.s.cbPatchMem;
1003 return pVM->patm.s.pPatchMemHC;
1004}
1005
1006
1007/**
1008 * Returns the guest context pointer and size of the patch memory block
1009 *
1010 * @returns Guest context pointer.
1011 * @param pVM Pointer to the VM.
1012 * @param pcb Size of the patch memory block
1013 */
1014VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
1015{
1016 AssertReturn(!HMIsEnabled(pVM), NIL_RTRCPTR);
1017 if (pcb)
1018 *pcb = pVM->patm.s.cbPatchMem;
1019 return pVM->patm.s.pPatchMemGC;
1020}
1021
1022#endif /* UNUSED_FUNCTIONS */
1023
1024/**
1025 * Returns the host context pointer of the GC context structure
1026 *
1027 * @returns VBox status code.
1028 * @param pVM Pointer to the VM.
1029 */
1030VMMR3_INT_DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
1031{
1032 AssertReturn(!HMIsEnabled(pVM), NULL);
1033 return pVM->patm.s.pGCStateHC;
1034}
1035
1036
1037#ifdef UNUSED_FUNCTION
1038/**
1039 * Checks whether the HC address is part of our patch region
1040 *
1041 * @returns true/false.
1042 * @param pVM Pointer to the VM.
1043 * @param pAddrHC Host context ring-3 address to check.
1044 */
1045VMMR3_INT_DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, void *pAddrHC)
1046{
1047 return (uintptr_t)pAddrHC >= (uintptr_t)pVM->patm.s.pPatchMemHC
1048 && (uintptr_t)pAddrHC < (uintptr_t)pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem;
1049}
1050#endif
1051
1052
1053/**
1054 * Allows or disallow patching of privileged instructions executed by the guest OS
1055 *
1056 * @returns VBox status code.
1057 * @param pUVM The user mode VM handle.
1058 * @param fAllowPatching Allow/disallow patching
1059 */
1060VMMR3DECL(int) PATMR3AllowPatching(PUVM pUVM, bool fAllowPatching)
1061{
1062 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1063 PVM pVM = pUVM->pVM;
1064 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1065
1066 if (!HMIsEnabled(pVM))
1067 pVM->fPATMEnabled = fAllowPatching;
1068 else
1069 Assert(!pVM->fPATMEnabled);
1070 return VINF_SUCCESS;
1071}
1072
1073
1074/**
1075 * Checks if the patch manager is enabled or not.
1076 *
1077 * @returns true if enabled, false if not (or if invalid handle).
1078 * @param pUVM The user mode VM handle.
1079 */
1080VMMR3DECL(bool) PATMR3IsEnabled(PUVM pUVM)
1081{
1082 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1083 PVM pVM = pUVM->pVM;
1084 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1085 return PATMIsEnabled(pVM);
1086}
1087
1088
1089/**
1090 * Convert a GC patch block pointer to a HC patch pointer
1091 *
1092 * @returns HC pointer or NULL if it's not a GC patch pointer
1093 * @param pVM Pointer to the VM.
1094 * @param pAddrGC GC pointer
1095 */
1096VMMR3_INT_DECL(void *) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
1097{
1098 AssertReturn(!HMIsEnabled(pVM), NULL);
1099 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
1100 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
1101 return NULL;
1102}
1103
1104
1105/**
1106 * Convert guest context address to host context pointer
1107 *
1108 * @returns VBox status code.
1109 * @param pVM Pointer to the VM.
1110 * @param pCacheRec Address conversion cache record
1111 * @param pGCPtr Guest context pointer
1112 *
1113 * @returns Host context pointer or NULL in case of an error
1114 *
1115 */
1116R3PTRTYPE(uint8_t *) patmR3GCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
1117{
1118 int rc;
1119 R3PTRTYPE(uint8_t *) pHCPtr;
1120 uint32_t offset;
1121
1122 if (PATMIsPatchGCAddr(pVM, pGCPtr))
1123 {
1124 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1125 Assert(pPatch);
1126 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
1127 }
1128
1129 offset = pGCPtr & PAGE_OFFSET_MASK;
1130 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1131 return pCacheRec->pPageLocStartHC + offset;
1132
1133 /* Release previous lock if any. */
1134 if (pCacheRec->Lock.pvMap)
1135 {
1136 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1137 pCacheRec->Lock.pvMap = NULL;
1138 }
1139
1140 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1141 if (rc != VINF_SUCCESS)
1142 {
1143 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1144 return NULL;
1145 }
1146 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1147 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1148 return pHCPtr;
1149}
1150
1151
1152/**
1153 * Calculates and fills in all branch targets
1154 *
1155 * @returns VBox status code.
1156 * @param pVM Pointer to the VM.
1157 * @param pPatch Current patch block pointer
1158 *
1159 */
1160static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1161{
1162 int32_t displ;
1163
1164 PJUMPREC pRec = 0;
1165 unsigned nrJumpRecs = 0;
1166
1167 /*
1168 * Set all branch targets inside the patch block.
1169 * We remove all jump records as they are no longer needed afterwards.
1170 */
1171 while (true)
1172 {
1173 RCPTRTYPE(uint8_t *) pInstrGC;
1174 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1175
1176 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1177 if (pRec == 0)
1178 break;
1179
1180 nrJumpRecs++;
1181
1182 /* HC in patch block to GC in patch block. */
1183 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1184
1185 if (pRec->opcode == OP_CALL)
1186 {
1187 /* Special case: call function replacement patch from this patch block.
1188 */
1189 PPATMPATCHREC pFunctionRec = patmQueryFunctionPatch(pVM, pRec->pTargetGC);
1190 if (!pFunctionRec)
1191 {
1192 int rc;
1193
1194 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1195 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1196 else
1197 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1198
1199 if (RT_FAILURE(rc))
1200 {
1201 uint8_t *pPatchHC;
1202 RTRCPTR pPatchGC;
1203 RTRCPTR pOrgInstrGC;
1204
1205 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1206 Assert(pOrgInstrGC);
1207
1208 /* Failure for some reason -> mark exit point with int 3. */
1209 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1210
1211 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1212 Assert(pPatchGC);
1213
1214 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1215
1216 /* Set a breakpoint at the very beginning of the recompiled instruction */
1217 *pPatchHC = 0xCC;
1218
1219 continue;
1220 }
1221 }
1222 else
1223 {
1224 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1225 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1226 }
1227
1228 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1229 }
1230 else
1231 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1232
1233 if (pBranchTargetGC == 0)
1234 {
1235 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1236 return VERR_PATCHING_REFUSED;
1237 }
1238 /* Our jumps *always* have a dword displacement (to make things easier). */
1239 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1240 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1241 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1242 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1243 }
1244 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1245 Assert(pPatch->JumpTree == 0);
1246 return VINF_SUCCESS;
1247}
1248
1249/**
1250 * Add an illegal instruction record
1251 *
1252 * @param pVM Pointer to the VM.
1253 * @param pPatch Patch structure ptr
1254 * @param pInstrGC Guest context pointer to privileged instruction
1255 *
1256 */
1257static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1258{
1259 PAVLPVNODECORE pRec;
1260
1261 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1262 Assert(pRec);
1263 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1264
1265 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1266 Assert(ret); NOREF(ret);
1267 pPatch->pTempInfo->nrIllegalInstr++;
1268}
1269
1270static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1271{
1272 PAVLPVNODECORE pRec;
1273
1274 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1275 if (pRec)
1276 return true;
1277 else
1278 return false;
1279}
1280
1281/**
1282 * Add a patch to guest lookup record
1283 *
1284 * @param pVM Pointer to the VM.
1285 * @param pPatch Patch structure ptr
1286 * @param pPatchInstrHC Guest context pointer to patch block
1287 * @param pInstrGC Guest context pointer to privileged instruction
1288 * @param enmType Lookup type
1289 * @param fDirty Dirty flag
1290 *
1291 * @note Be extremely careful with this function. Make absolutely sure the guest
1292 * address is correct! (to avoid executing instructions twice!)
1293 */
1294void patmR3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1295{
1296 bool ret;
1297 PRECPATCHTOGUEST pPatchToGuestRec;
1298 PRECGUESTTOPATCH pGuestToPatchRec;
1299 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1300
1301 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1302 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1303
1304 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1305 {
1306 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1307 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1308 return; /* already there */
1309
1310 Assert(!pPatchToGuestRec);
1311 }
1312#ifdef VBOX_STRICT
1313 else
1314 {
1315 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1316 Assert(!pPatchToGuestRec);
1317 }
1318#endif
1319
1320 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1321 Assert(pPatchToGuestRec);
1322 pPatchToGuestRec->Core.Key = PatchOffset;
1323 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1324 pPatchToGuestRec->enmType = enmType;
1325 pPatchToGuestRec->fDirty = fDirty;
1326
1327 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1328 Assert(ret);
1329
1330 /* GC to patch address */
1331 if (enmType == PATM_LOOKUP_BOTHDIR)
1332 {
1333 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1334 if (!pGuestToPatchRec)
1335 {
1336 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1337 pGuestToPatchRec->Core.Key = pInstrGC;
1338 pGuestToPatchRec->PatchOffset = PatchOffset;
1339
1340 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1341 Assert(ret);
1342 }
1343 }
1344
1345 pPatch->nrPatch2GuestRecs++;
1346}
1347
1348
1349/**
1350 * Removes a patch to guest lookup record
1351 *
1352 * @param pVM Pointer to the VM.
1353 * @param pPatch Patch structure ptr
1354 * @param pPatchInstrGC Guest context pointer to patch block
1355 */
1356void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1357{
1358 PAVLU32NODECORE pNode;
1359 PAVLU32NODECORE pNode2;
1360 PRECPATCHTOGUEST pPatchToGuestRec;
1361 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1362
1363 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1364 Assert(pPatchToGuestRec);
1365 if (pPatchToGuestRec)
1366 {
1367 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1368 {
1369 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1370
1371 Assert(pGuestToPatchRec->Core.Key);
1372 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1373 Assert(pNode2);
1374 }
1375 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1376 Assert(pNode);
1377
1378 MMR3HeapFree(pPatchToGuestRec);
1379 pPatch->nrPatch2GuestRecs--;
1380 }
1381}
1382
1383
1384/**
1385 * RTAvlPVDestroy callback.
1386 */
1387static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1388{
1389 MMR3HeapFree(pNode);
1390 return 0;
1391}
1392
1393/**
1394 * Empty the specified tree (PV tree, MMR3 heap)
1395 *
1396 * @param pVM Pointer to the VM.
1397 * @param ppTree Tree to empty
1398 */
1399static void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1400{
1401 NOREF(pVM);
1402 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1403}
1404
1405
1406/**
1407 * RTAvlU32Destroy callback.
1408 */
1409static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1410{
1411 MMR3HeapFree(pNode);
1412 return 0;
1413}
1414
1415/**
1416 * Empty the specified tree (U32 tree, MMR3 heap)
1417 *
1418 * @param pVM Pointer to the VM.
1419 * @param ppTree Tree to empty
1420 */
1421static void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1422{
1423 NOREF(pVM);
1424 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1425}
1426
1427
1428/**
1429 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1430 *
1431 * @returns VBox status code.
1432 * @param pVM Pointer to the VM.
1433 * @param pCpu CPU disassembly state
1434 * @param pInstrGC Guest context pointer to privileged instruction
1435 * @param pCurInstrGC Guest context pointer to the current instruction
1436 * @param pCacheRec Cache record ptr
1437 *
1438 */
1439static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1440{
1441 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1442 bool fIllegalInstr = false;
1443
1444 /*
1445 * Preliminary heuristics:
1446 *- no call instructions without a fixed displacement between cli and sti/popf
1447 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1448 *- no nested pushf/cli
1449 *- sti/popf should be the (eventual) target of all branches
1450 *- no near or far returns; no int xx, no into
1451 *
1452 * Note: Later on we can impose less stricter guidelines if the need arises
1453 */
1454
1455 /* Bail out if the patch gets too big. */
1456 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1457 {
1458 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1459 fIllegalInstr = true;
1460 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1461 }
1462 else
1463 {
1464 /* No unconditional jumps or calls without fixed displacements. */
1465 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1466 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1467 )
1468 {
1469 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1470 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1471 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1472 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1473 )
1474 {
1475 fIllegalInstr = true;
1476 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1477 }
1478 }
1479
1480 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1481 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->uOpcode == OP_JMP)
1482 {
1483 if ( pCurInstrGC > pPatch->pPrivInstrGC
1484 && pCurInstrGC + pCpu->cbInstr < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1485 {
1486 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1487 /* We turn this one into a int 3 callable patch. */
1488 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1489 }
1490 }
1491 else
1492 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1493 if (pPatch->opcode == OP_PUSHF)
1494 {
1495 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->uOpcode == OP_PUSHF)
1496 {
1497 fIllegalInstr = true;
1498 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1499 }
1500 }
1501
1502 /* no far returns */
1503 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1504 {
1505 pPatch->pTempInfo->nrRetInstr++;
1506 fIllegalInstr = true;
1507 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1508 }
1509 else if ( pCpu->pCurInstr->uOpcode == OP_INT3
1510 || pCpu->pCurInstr->uOpcode == OP_INT
1511 || pCpu->pCurInstr->uOpcode == OP_INTO)
1512 {
1513 /* No int xx or into either. */
1514 fIllegalInstr = true;
1515 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1516 }
1517 }
1518
1519 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1520
1521 /* Illegal instruction -> end of analysis phase for this code block */
1522 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1523 return VINF_SUCCESS;
1524
1525 /* Check for exit points. */
1526 switch (pCpu->pCurInstr->uOpcode)
1527 {
1528 case OP_SYSEXIT:
1529 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1530
1531 case OP_SYSENTER:
1532 case OP_ILLUD2:
1533 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1534 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1535 return VINF_SUCCESS;
1536
1537 case OP_STI:
1538 case OP_POPF:
1539 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1540 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1541 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1542 {
1543 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1544 return VERR_PATCHING_REFUSED;
1545 }
1546 if (pPatch->opcode == OP_PUSHF)
1547 {
1548 if (pCpu->pCurInstr->uOpcode == OP_POPF)
1549 {
1550 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1551 return VINF_SUCCESS;
1552
1553 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1554 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1555 pPatch->flags |= PATMFL_CHECK_SIZE;
1556 }
1557 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1558 }
1559 /* else: fall through. */
1560 case OP_RETN: /* exit point for function replacement */
1561 return VINF_SUCCESS;
1562
1563 case OP_IRET:
1564 return VINF_SUCCESS; /* exitpoint */
1565
1566 case OP_CPUID:
1567 case OP_CALL:
1568 case OP_JMP:
1569 break;
1570
1571#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1572 case OP_STR:
1573 break;
1574#endif
1575
1576 default:
1577 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1578 {
1579 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1580 return VINF_SUCCESS; /* exit point */
1581 }
1582 break;
1583 }
1584
1585 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1586 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW))
1587 {
1588 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1589 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->cbInstr));
1590 return VINF_SUCCESS;
1591 }
1592
1593 return VWRN_CONTINUE_ANALYSIS;
1594}
1595
1596/**
1597 * Analyses the instructions inside a function for compliance
1598 *
1599 * @returns VBox status code.
1600 * @param pVM Pointer to the VM.
1601 * @param pCpu CPU disassembly state
1602 * @param pInstrGC Guest context pointer to privileged instruction
1603 * @param pCurInstrGC Guest context pointer to the current instruction
1604 * @param pCacheRec Cache record ptr
1605 *
1606 */
1607static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1608{
1609 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1610 bool fIllegalInstr = false;
1611 NOREF(pInstrGC);
1612
1613 //Preliminary heuristics:
1614 //- no call instructions
1615 //- ret ends a block
1616
1617 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1618
1619 // bail out if the patch gets too big
1620 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1621 {
1622 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1623 fIllegalInstr = true;
1624 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1625 }
1626 else
1627 {
1628 // no unconditional jumps or calls without fixed displacements
1629 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1630 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1631 )
1632 {
1633 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1634 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1635 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1636 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1637 )
1638 {
1639 fIllegalInstr = true;
1640 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1641 }
1642 }
1643 else /* no far returns */
1644 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1645 {
1646 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1647 fIllegalInstr = true;
1648 }
1649 else /* no int xx or into either */
1650 if (pCpu->pCurInstr->uOpcode == OP_INT3 || pCpu->pCurInstr->uOpcode == OP_INT || pCpu->pCurInstr->uOpcode == OP_INTO)
1651 {
1652 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1653 fIllegalInstr = true;
1654 }
1655
1656 #if 0
1657 ///@todo we can handle certain in/out and privileged instructions in the guest context
1658 if (pCpu->pCurInstr->fOpType & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr->uOpcode != OP_STI)
1659 {
1660 Log(("Illegal instructions for function patch!!\n"));
1661 return VERR_PATCHING_REFUSED;
1662 }
1663 #endif
1664 }
1665
1666 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1667
1668 /* Illegal instruction -> end of analysis phase for this code block */
1669 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1670 {
1671 return VINF_SUCCESS;
1672 }
1673
1674 // Check for exit points
1675 switch (pCpu->pCurInstr->uOpcode)
1676 {
1677 case OP_ILLUD2:
1678 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1679 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1680 return VINF_SUCCESS;
1681
1682 case OP_IRET:
1683 case OP_SYSEXIT: /* will fault or emulated in GC */
1684 case OP_RETN:
1685 return VINF_SUCCESS;
1686
1687#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1688 case OP_STR:
1689 break;
1690#endif
1691
1692 case OP_POPF:
1693 case OP_STI:
1694 return VWRN_CONTINUE_ANALYSIS;
1695 default:
1696 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1697 {
1698 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1699 return VINF_SUCCESS; /* exit point */
1700 }
1701 return VWRN_CONTINUE_ANALYSIS;
1702 }
1703
1704 return VWRN_CONTINUE_ANALYSIS;
1705}
1706
1707/**
1708 * Recompiles the instructions in a code block
1709 *
1710 * @returns VBox status code.
1711 * @param pVM Pointer to the VM.
1712 * @param pCpu CPU disassembly state
1713 * @param pInstrGC Guest context pointer to privileged instruction
1714 * @param pCurInstrGC Guest context pointer to the current instruction
1715 * @param pCacheRec Cache record ptr
1716 *
1717 */
1718static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1719{
1720 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1721 int rc = VINF_SUCCESS;
1722 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1723
1724 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1725
1726 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1727 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1728 {
1729 /*
1730 * Been there, done that; so insert a jump (we don't want to duplicate code)
1731 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1732 */
1733 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1734 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1735 }
1736
1737 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1738 {
1739 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1740 }
1741 else
1742 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1743
1744 if (RT_FAILURE(rc))
1745 return rc;
1746
1747 /* Note: Never do a direct return unless a failure is encountered! */
1748
1749 /* Clear recompilation of next instruction flag; we are doing that right here. */
1750 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1751 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1752
1753 /* Add lookup record for patch to guest address translation */
1754 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1755
1756 /* Update lowest and highest instruction address for this patch */
1757 if (pCurInstrGC < pPatch->pInstrGCLowest)
1758 pPatch->pInstrGCLowest = pCurInstrGC;
1759 else
1760 if (pCurInstrGC > pPatch->pInstrGCHighest)
1761 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->cbInstr;
1762
1763 /* Illegal instruction -> end of recompile phase for this code block. */
1764 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1765 {
1766 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1767 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1768 goto end;
1769 }
1770
1771 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1772 * Indirect calls are handled below.
1773 */
1774 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1775 && (pCpu->pCurInstr->uOpcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1776 && (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J))
1777 {
1778 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1779 if (pTargetGC == 0)
1780 {
1781 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
1782 return VERR_PATCHING_REFUSED;
1783 }
1784
1785 if (pCpu->pCurInstr->uOpcode == OP_CALL)
1786 {
1787 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1788 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1789 if (RT_FAILURE(rc))
1790 goto end;
1791 }
1792 else
1793 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->uOpcode, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1794
1795 if (RT_SUCCESS(rc))
1796 rc = VWRN_CONTINUE_RECOMPILE;
1797
1798 goto end;
1799 }
1800
1801 switch (pCpu->pCurInstr->uOpcode)
1802 {
1803 case OP_CLI:
1804 {
1805 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1806 * until we've found the proper exit point(s).
1807 */
1808 if ( pCurInstrGC != pInstrGC
1809 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1810 )
1811 {
1812 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1813 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1814 }
1815 /* Set by irq inhibition; no longer valid now. */
1816 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1817
1818 rc = patmPatchGenCli(pVM, pPatch);
1819 if (RT_SUCCESS(rc))
1820 rc = VWRN_CONTINUE_RECOMPILE;
1821 break;
1822 }
1823
1824 case OP_MOV:
1825 if (pCpu->pCurInstr->fOpType & DISOPTYPE_POTENTIALLY_DANGEROUS)
1826 {
1827 /* mov ss, src? */
1828 if ( (pCpu->Param1.fUse & DISUSE_REG_SEG)
1829 && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS))
1830 {
1831 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1832 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1833 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1834 }
1835#if 0 /* necessary for Haiku */
1836 else
1837 if ( (pCpu->Param2.fUse & DISUSE_REG_SEG)
1838 && (pCpu->Param2.Base.idxSegReg == USE_REG_SS)
1839 && (pCpu->Param1.fUse & (DISUSE_REG_GEN32|DISUSE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1840 {
1841 /* mov GPR, ss */
1842 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1843 if (RT_SUCCESS(rc))
1844 rc = VWRN_CONTINUE_RECOMPILE;
1845 break;
1846 }
1847#endif
1848 }
1849 goto duplicate_instr;
1850
1851 case OP_POP:
1852 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1853 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS)
1854 {
1855 Assert(pCpu->pCurInstr->fOpType & DISOPTYPE_INHIBIT_IRQS);
1856
1857 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1858 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1859 }
1860 goto duplicate_instr;
1861
1862 case OP_STI:
1863 {
1864 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1865
1866 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1867 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1868 {
1869 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1870 fInhibitIRQInstr = true;
1871 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1872 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1873 }
1874 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1875
1876 if (RT_SUCCESS(rc))
1877 {
1878 DISCPUSTATE cpu = *pCpu;
1879 unsigned cbInstr;
1880 int disret;
1881 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1882
1883 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1884
1885 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1886 { /* Force pNextInstrHC out of scope after using it */
1887 uint8_t *pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1888 if (pNextInstrHC == NULL)
1889 {
1890 AssertFailed();
1891 return VERR_PATCHING_REFUSED;
1892 }
1893
1894 // Disassemble the next instruction
1895 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
1896 }
1897 if (disret == false)
1898 {
1899 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1900 return VERR_PATCHING_REFUSED;
1901 }
1902 pReturnInstrGC = pNextInstrGC + cbInstr;
1903
1904 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1905 || pReturnInstrGC <= pInstrGC
1906 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1907 )
1908 {
1909 /* Not an exit point for function duplication patches */
1910 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1911 && RT_SUCCESS(rc))
1912 {
1913 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1914 rc = VWRN_CONTINUE_RECOMPILE;
1915 }
1916 else
1917 rc = VINF_SUCCESS; //exit point
1918 }
1919 else {
1920 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1921 rc = VERR_PATCHING_REFUSED; //not allowed!!
1922 }
1923 }
1924 break;
1925 }
1926
1927 case OP_POPF:
1928 {
1929 bool fGenerateJmpBack = (pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32);
1930
1931 /* Not an exit point for IDT handler or function replacement patches */
1932 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1933 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1934 fGenerateJmpBack = false;
1935
1936 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->cbInstr, !!(pCpu->fPrefix & DISPREFIX_OPSIZE), fGenerateJmpBack);
1937 if (RT_SUCCESS(rc))
1938 {
1939 if (fGenerateJmpBack == false)
1940 {
1941 /* Not an exit point for IDT handler or function replacement patches */
1942 rc = VWRN_CONTINUE_RECOMPILE;
1943 }
1944 else
1945 {
1946 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1947 rc = VINF_SUCCESS; /* exit point! */
1948 }
1949 }
1950 break;
1951 }
1952
1953 case OP_PUSHF:
1954 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1955 if (RT_SUCCESS(rc))
1956 rc = VWRN_CONTINUE_RECOMPILE;
1957 break;
1958
1959 case OP_PUSH:
1960 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1961 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS)
1962 {
1963 rc = patmPatchGenPushCS(pVM, pPatch);
1964 if (RT_SUCCESS(rc))
1965 rc = VWRN_CONTINUE_RECOMPILE;
1966 break;
1967 }
1968 goto duplicate_instr;
1969
1970 case OP_IRET:
1971 Log(("IRET at %RRv\n", pCurInstrGC));
1972 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1973 if (RT_SUCCESS(rc))
1974 {
1975 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1976 rc = VINF_SUCCESS; /* exit point by definition */
1977 }
1978 break;
1979
1980 case OP_ILLUD2:
1981 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1982 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1983 if (RT_SUCCESS(rc))
1984 rc = VINF_SUCCESS; /* exit point by definition */
1985 Log(("Illegal opcode (0xf 0xb)\n"));
1986 break;
1987
1988 case OP_CPUID:
1989 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1990 if (RT_SUCCESS(rc))
1991 rc = VWRN_CONTINUE_RECOMPILE;
1992 break;
1993
1994 case OP_STR:
1995#ifdef VBOX_WITH_SAFE_STR /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table and move OP_STR into #ifndef */
1996 /* Now safe because our shadow TR entry is identical to the guest's. */
1997 goto duplicate_instr;
1998#endif
1999 case OP_SLDT:
2000 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
2001 if (RT_SUCCESS(rc))
2002 rc = VWRN_CONTINUE_RECOMPILE;
2003 break;
2004
2005 case OP_SGDT:
2006 case OP_SIDT:
2007 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
2008 if (RT_SUCCESS(rc))
2009 rc = VWRN_CONTINUE_RECOMPILE;
2010 break;
2011
2012 case OP_RETN:
2013 /* retn is an exit point for function patches */
2014 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
2015 if (RT_SUCCESS(rc))
2016 rc = VINF_SUCCESS; /* exit point by definition */
2017 break;
2018
2019 case OP_SYSEXIT:
2020 /* Duplicate it, so it can be emulated in GC (or fault). */
2021 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2022 if (RT_SUCCESS(rc))
2023 rc = VINF_SUCCESS; /* exit point by definition */
2024 break;
2025
2026 case OP_CALL:
2027 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2028 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2029 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2030 */
2031 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2032 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far calls! */)
2033 {
2034 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
2035 if (RT_SUCCESS(rc))
2036 {
2037 rc = VWRN_CONTINUE_RECOMPILE;
2038 }
2039 break;
2040 }
2041 goto gen_illegal_instr;
2042
2043 case OP_JMP:
2044 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2045 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2046 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2047 */
2048 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2049 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far jumps! */)
2050 {
2051 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
2052 if (RT_SUCCESS(rc))
2053 rc = VINF_SUCCESS; /* end of branch */
2054 break;
2055 }
2056 goto gen_illegal_instr;
2057
2058 case OP_INT3:
2059 case OP_INT:
2060 case OP_INTO:
2061 goto gen_illegal_instr;
2062
2063 case OP_MOV_DR:
2064 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2065 if (pCpu->pCurInstr->fParam2 == OP_PARM_Dd)
2066 {
2067 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
2068 if (RT_SUCCESS(rc))
2069 rc = VWRN_CONTINUE_RECOMPILE;
2070 break;
2071 }
2072 goto duplicate_instr;
2073
2074 case OP_MOV_CR:
2075 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2076 if (pCpu->pCurInstr->fParam2 == OP_PARM_Cd)
2077 {
2078 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
2079 if (RT_SUCCESS(rc))
2080 rc = VWRN_CONTINUE_RECOMPILE;
2081 break;
2082 }
2083 goto duplicate_instr;
2084
2085 default:
2086 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW | DISOPTYPE_PRIVILEGED_NOTRAP))
2087 {
2088gen_illegal_instr:
2089 rc = patmPatchGenIllegalInstr(pVM, pPatch);
2090 if (RT_SUCCESS(rc))
2091 rc = VINF_SUCCESS; /* exit point by definition */
2092 }
2093 else
2094 {
2095duplicate_instr:
2096 Log(("patmPatchGenDuplicate\n"));
2097 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2098 if (RT_SUCCESS(rc))
2099 rc = VWRN_CONTINUE_RECOMPILE;
2100 }
2101 break;
2102 }
2103
2104end:
2105
2106 if ( !fInhibitIRQInstr
2107 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2108 {
2109 int rc2;
2110 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2111
2112 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2113 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
2114 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
2115 {
2116 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
2117
2118 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2119 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
2120 rc = VINF_SUCCESS; /* end of the line */
2121 }
2122 else
2123 {
2124 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
2125 }
2126 if (RT_FAILURE(rc2))
2127 rc = rc2;
2128 }
2129
2130 if (RT_SUCCESS(rc))
2131 {
2132 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
2133 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
2134 && pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32
2135 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
2136 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
2137 )
2138 {
2139 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2140
2141 // The end marker for this kind of patch is any instruction at a location outside our patch jump
2142 Log(("patmRecompileCallback: end found for single instruction patch at %RRv cbInstr %d\n", pNextInstrGC, pCpu->cbInstr));
2143
2144 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2145 AssertRC(rc);
2146 }
2147 }
2148 return rc;
2149}
2150
2151
2152#ifdef LOG_ENABLED
2153
2154/**
2155 * Add a disasm jump record (temporary for prevent duplicate analysis)
2156 *
2157 * @param pVM Pointer to the VM.
2158 * @param pPatch Patch structure ptr
2159 * @param pInstrGC Guest context pointer to privileged instruction
2160 *
2161 */
2162static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2163{
2164 PAVLPVNODECORE pRec;
2165
2166 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2167 Assert(pRec);
2168 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2169
2170 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2171 Assert(ret);
2172}
2173
2174/**
2175 * Checks if jump target has been analysed before.
2176 *
2177 * @returns VBox status code.
2178 * @param pPatch Patch struct
2179 * @param pInstrGC Jump target
2180 *
2181 */
2182static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2183{
2184 PAVLPVNODECORE pRec;
2185
2186 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2187 if (pRec)
2188 return true;
2189 return false;
2190}
2191
2192/**
2193 * For proper disassembly of the final patch block
2194 *
2195 * @returns VBox status code.
2196 * @param pVM Pointer to the VM.
2197 * @param pCpu CPU disassembly state
2198 * @param pInstrGC Guest context pointer to privileged instruction
2199 * @param pCurInstrGC Guest context pointer to the current instruction
2200 * @param pCacheRec Cache record ptr
2201 *
2202 */
2203int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2204{
2205 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2206 NOREF(pInstrGC);
2207
2208 if (pCpu->pCurInstr->uOpcode == OP_INT3)
2209 {
2210 /* Could be an int3 inserted in a call patch. Check to be sure */
2211 DISCPUSTATE cpu;
2212 RTRCPTR pOrgJumpGC;
2213
2214 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2215
2216 { /* Force pOrgJumpHC out of scope after using it */
2217 uint8_t *pOrgJumpHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2218
2219 bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL);
2220 if (!disret || cpu.pCurInstr->uOpcode != OP_CALL || cpu.Param1.cb != 4 /* only near calls */)
2221 return VINF_SUCCESS;
2222 }
2223 return VWRN_CONTINUE_ANALYSIS;
2224 }
2225
2226 if ( pCpu->pCurInstr->uOpcode == OP_ILLUD2
2227 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2228 {
2229 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2230 return VWRN_CONTINUE_ANALYSIS;
2231 }
2232
2233 if ( (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2234 || pCpu->pCurInstr->uOpcode == OP_INT
2235 || pCpu->pCurInstr->uOpcode == OP_IRET
2236 || pCpu->pCurInstr->uOpcode == OP_RETN
2237 || pCpu->pCurInstr->uOpcode == OP_RETF
2238 )
2239 {
2240 return VINF_SUCCESS;
2241 }
2242
2243 if (pCpu->pCurInstr->uOpcode == OP_ILLUD2)
2244 return VINF_SUCCESS;
2245
2246 return VWRN_CONTINUE_ANALYSIS;
2247}
2248
2249
2250/**
2251 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2252 *
2253 * @returns VBox status code.
2254 * @param pVM Pointer to the VM.
2255 * @param pInstrGC Guest context pointer to the initial privileged instruction
2256 * @param pCurInstrGC Guest context pointer to the current instruction
2257 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2258 * @param pCacheRec Cache record ptr
2259 *
2260 */
2261int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2262{
2263 DISCPUSTATE cpu;
2264 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2265 int rc = VWRN_CONTINUE_ANALYSIS;
2266 uint32_t cbInstr, delta;
2267 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2268 bool disret;
2269 char szOutput[256];
2270
2271 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2272
2273 /* We need this to determine branch targets (and for disassembling). */
2274 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2275
2276 while (rc == VWRN_CONTINUE_ANALYSIS)
2277 {
2278 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2279 if (pCurInstrHC == NULL)
2280 {
2281 rc = VERR_PATCHING_REFUSED;
2282 goto end;
2283 }
2284
2285 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_RAWCODE,
2286 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2287 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2288 {
2289 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2290
2291 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2292 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2293 else
2294 Log(("DIS %s", szOutput));
2295
2296 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2297 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2298 {
2299 rc = VINF_SUCCESS;
2300 goto end;
2301 }
2302 }
2303 else
2304 Log(("DIS: %s", szOutput));
2305
2306 if (disret == false)
2307 {
2308 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2309 rc = VINF_SUCCESS;
2310 goto end;
2311 }
2312
2313 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2314 if (rc != VWRN_CONTINUE_ANALYSIS) {
2315 break; //done!
2316 }
2317
2318 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2319 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2320 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2321 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2322 )
2323 {
2324 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2325 RTRCPTR pOrgTargetGC;
2326
2327 if (pTargetGC == 0)
2328 {
2329 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2330 rc = VERR_PATCHING_REFUSED;
2331 break;
2332 }
2333
2334 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2335 {
2336 //jump back to guest code
2337 rc = VINF_SUCCESS;
2338 goto end;
2339 }
2340 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2341
2342 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2343 {
2344 rc = VINF_SUCCESS;
2345 goto end;
2346 }
2347
2348 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2349 {
2350 /* New jump, let's check it. */
2351 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2352
2353 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2354 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2355 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2356
2357 if (rc != VINF_SUCCESS) {
2358 break; //done!
2359 }
2360 }
2361 if (cpu.pCurInstr->uOpcode == OP_JMP)
2362 {
2363 /* Unconditional jump; return to caller. */
2364 rc = VINF_SUCCESS;
2365 goto end;
2366 }
2367
2368 rc = VWRN_CONTINUE_ANALYSIS;
2369 }
2370 pCurInstrGC += cbInstr;
2371 }
2372end:
2373 return rc;
2374}
2375
2376/**
2377 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2378 *
2379 * @returns VBox status code.
2380 * @param pVM Pointer to the VM.
2381 * @param pInstrGC Guest context pointer to the initial privileged instruction
2382 * @param pCurInstrGC Guest context pointer to the current instruction
2383 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2384 * @param pCacheRec Cache record ptr
2385 *
2386 */
2387int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2388{
2389 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2390
2391 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2392 /* Free all disasm jump records. */
2393 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2394 return rc;
2395}
2396
2397#endif /* LOG_ENABLED */
2398
2399/**
2400 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2401 * If so, this patch is permanently disabled.
2402 *
2403 * @param pVM Pointer to the VM.
2404 * @param pInstrGC Guest context pointer to instruction
2405 * @param pConflictGC Guest context pointer to check
2406 *
2407 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2408 *
2409 */
2410VMMR3_INT_DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2411{
2412 AssertReturn(!HMIsEnabled(pVM), VERR_PATCH_NO_CONFLICT);
2413 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2414 if (pTargetPatch)
2415 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2416 return VERR_PATCH_NO_CONFLICT;
2417}
2418
2419/**
2420 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2421 *
2422 * @returns VBox status code.
2423 * @param pVM Pointer to the VM.
2424 * @param pInstrGC Guest context pointer to privileged instruction
2425 * @param pCurInstrGC Guest context pointer to the current instruction
2426 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2427 * @param pCacheRec Cache record ptr
2428 *
2429 */
2430static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2431{
2432 DISCPUSTATE cpu;
2433 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2434 int rc = VWRN_CONTINUE_ANALYSIS;
2435 uint32_t cbInstr;
2436 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2437 bool disret;
2438#ifdef LOG_ENABLED
2439 char szOutput[256];
2440#endif
2441
2442 while (rc == VWRN_CONTINUE_RECOMPILE)
2443 {
2444 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2445 if (pCurInstrHC == NULL)
2446 {
2447 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2448 goto end;
2449 }
2450#ifdef LOG_ENABLED
2451 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE,
2452 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2453 Log(("Recompile: %s", szOutput));
2454#else
2455 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
2456#endif
2457 if (disret == false)
2458 {
2459 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2460
2461 /* Add lookup record for patch to guest address translation */
2462 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2463 patmPatchGenIllegalInstr(pVM, pPatch);
2464 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2465 goto end;
2466 }
2467
2468 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2469 if (rc != VWRN_CONTINUE_RECOMPILE)
2470 {
2471 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2472 if ( rc == VINF_SUCCESS
2473 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2474 {
2475 DISCPUSTATE cpunext;
2476 uint32_t opsizenext;
2477 uint8_t *pNextInstrHC;
2478 RTRCPTR pNextInstrGC = pCurInstrGC + cbInstr;
2479
2480 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2481
2482 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2483 * Recompile the next instruction as well
2484 */
2485 pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2486 if (pNextInstrHC == NULL)
2487 {
2488 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2489 goto end;
2490 }
2491 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpunext, &opsizenext);
2492 if (disret == false)
2493 {
2494 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2495 goto end;
2496 }
2497 switch(cpunext.pCurInstr->uOpcode)
2498 {
2499 case OP_IRET: /* inhibit cleared in generated code */
2500 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2501 case OP_HLT:
2502 break; /* recompile these */
2503
2504 default:
2505 if (cpunext.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2506 {
2507 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2508
2509 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2510 AssertRC(rc);
2511 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2512 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2513 }
2514 break;
2515 }
2516
2517 /* Note: after a cli we must continue to a proper exit point */
2518 if (cpunext.pCurInstr->uOpcode != OP_CLI)
2519 {
2520 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2521 if (RT_SUCCESS(rc))
2522 {
2523 rc = VINF_SUCCESS;
2524 goto end;
2525 }
2526 break;
2527 }
2528 else
2529 rc = VWRN_CONTINUE_RECOMPILE;
2530 }
2531 else
2532 break; /* done! */
2533 }
2534
2535 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2536
2537
2538 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2539 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2540 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2541 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2542 )
2543 {
2544 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2545 if (addr == 0)
2546 {
2547 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2548 rc = VERR_PATCHING_REFUSED;
2549 break;
2550 }
2551
2552 Log(("Jump encountered target %RRv\n", addr));
2553
2554 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2555 if (!(cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW))
2556 {
2557 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2558 /* First we need to finish this linear code stream until the next exit point. */
2559 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+cbInstr, pfnPATMR3Recompile, pCacheRec);
2560 if (RT_FAILURE(rc))
2561 {
2562 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2563 break; //fatal error
2564 }
2565 }
2566
2567 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2568 {
2569 /* New code; let's recompile it. */
2570 Log(("patmRecompileCodeStream continue with jump\n"));
2571
2572 /*
2573 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2574 * this patch so we can continue our analysis
2575 *
2576 * We rely on CSAM to detect and resolve conflicts
2577 */
2578 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, addr);
2579 if(pTargetPatch)
2580 {
2581 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2582 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2583 }
2584
2585 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2586 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2587 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2588
2589 if(pTargetPatch)
2590 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2591
2592 if (RT_FAILURE(rc))
2593 {
2594 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2595 break; //done!
2596 }
2597 }
2598 /* Always return to caller here; we're done! */
2599 rc = VINF_SUCCESS;
2600 goto end;
2601 }
2602 else
2603 if (cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW)
2604 {
2605 rc = VINF_SUCCESS;
2606 goto end;
2607 }
2608 pCurInstrGC += cbInstr;
2609 }
2610end:
2611 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2612 return rc;
2613}
2614
2615
2616/**
2617 * Generate the jump from guest to patch code
2618 *
2619 * @returns VBox status code.
2620 * @param pVM Pointer to the VM.
2621 * @param pPatch Patch record
2622 * @param pCacheRec Guest translation lookup cache record
2623 */
2624static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2625{
2626 uint8_t temp[8];
2627 uint8_t *pPB;
2628 int rc;
2629
2630 Assert(pPatch->cbPatchJump <= sizeof(temp));
2631 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2632
2633 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2634 Assert(pPB);
2635
2636#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2637 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2638 {
2639 Assert(pPatch->pPatchJumpDestGC);
2640
2641 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2642 {
2643 // jmp [PatchCode]
2644 if (fAddFixup)
2645 {
2646 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2647 {
2648 Log(("Relocation failed for the jump in the guest code!!\n"));
2649 return VERR_PATCHING_REFUSED;
2650 }
2651 }
2652
2653 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2654 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2655 }
2656 else
2657 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2658 {
2659 // jmp [PatchCode]
2660 if (fAddFixup)
2661 {
2662 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2663 {
2664 Log(("Relocation failed for the jump in the guest code!!\n"));
2665 return VERR_PATCHING_REFUSED;
2666 }
2667 }
2668
2669 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2670 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2671 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2672 }
2673 else
2674 {
2675 Assert(0);
2676 return VERR_PATCHING_REFUSED;
2677 }
2678 }
2679 else
2680#endif
2681 {
2682 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2683
2684 // jmp [PatchCode]
2685 if (fAddFixup)
2686 {
2687 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2688 {
2689 Log(("Relocation failed for the jump in the guest code!!\n"));
2690 return VERR_PATCHING_REFUSED;
2691 }
2692 }
2693 temp[0] = 0xE9; //jmp
2694 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2695 }
2696 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2697 AssertRC(rc);
2698
2699 if (rc == VINF_SUCCESS)
2700 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2701
2702 return rc;
2703}
2704
2705/**
2706 * Remove the jump from guest to patch code
2707 *
2708 * @returns VBox status code.
2709 * @param pVM Pointer to the VM.
2710 * @param pPatch Patch record
2711 */
2712static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2713{
2714#ifdef DEBUG
2715 DISCPUSTATE cpu;
2716 char szOutput[256];
2717 uint32_t cbInstr, i = 0;
2718 bool disret;
2719
2720 while (i < pPatch->cbPrivInstr)
2721 {
2722 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2723 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2724 if (disret == false)
2725 break;
2726
2727 Log(("Org patch jump: %s", szOutput));
2728 Assert(cbInstr);
2729 i += cbInstr;
2730 }
2731#endif
2732
2733 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2734 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2735#ifdef DEBUG
2736 if (rc == VINF_SUCCESS)
2737 {
2738 i = 0;
2739 while (i < pPatch->cbPrivInstr)
2740 {
2741 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2742 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2743 if (disret == false)
2744 break;
2745
2746 Log(("Org instr: %s", szOutput));
2747 Assert(cbInstr);
2748 i += cbInstr;
2749 }
2750 }
2751#endif
2752 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2753 return rc;
2754}
2755
2756/**
2757 * Generate the call from guest to patch code
2758 *
2759 * @returns VBox status code.
2760 * @param pVM Pointer to the VM.
2761 * @param pPatch Patch record
2762 * @param pInstrHC HC address where to insert the jump
2763 * @param pCacheRec Guest translation cache record
2764 */
2765static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2766{
2767 uint8_t temp[8];
2768 uint8_t *pPB;
2769 int rc;
2770
2771 Assert(pPatch->cbPatchJump <= sizeof(temp));
2772
2773 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2774 Assert(pPB);
2775
2776 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2777
2778 // jmp [PatchCode]
2779 if (fAddFixup)
2780 {
2781 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2782 {
2783 Log(("Relocation failed for the jump in the guest code!!\n"));
2784 return VERR_PATCHING_REFUSED;
2785 }
2786 }
2787
2788 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2789 temp[0] = pPatch->aPrivInstr[0];
2790 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2791
2792 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2793 AssertRC(rc);
2794
2795 return rc;
2796}
2797
2798
2799/**
2800 * Patch cli/sti pushf/popf instruction block at specified location
2801 *
2802 * @returns VBox status code.
2803 * @param pVM Pointer to the VM.
2804 * @param pInstrGC Guest context point to privileged instruction
2805 * @param pInstrHC Host context point to privileged instruction
2806 * @param uOpcode Instruction opcode
2807 * @param uOpSize Size of starting instruction
2808 * @param pPatchRec Patch record
2809 *
2810 * @note returns failure if patching is not allowed or possible
2811 *
2812 */
2813static int patmR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2814 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2815{
2816 PPATCHINFO pPatch = &pPatchRec->patch;
2817 int rc = VERR_PATCHING_REFUSED;
2818 uint32_t orgOffsetPatchMem = ~0;
2819 RTRCPTR pInstrStart;
2820 bool fInserted;
2821 NOREF(pInstrHC); NOREF(uOpSize);
2822
2823 /* Save original offset (in case of failures later on) */
2824 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2825 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2826
2827 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2828 switch (uOpcode)
2829 {
2830 case OP_MOV:
2831 break;
2832
2833 case OP_CLI:
2834 case OP_PUSHF:
2835 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2836 /* Note: special precautions are taken when disabling and enabling such patches. */
2837 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2838 break;
2839
2840 default:
2841 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2842 {
2843 AssertMsg(0, ("patmR3PatchBlock: Invalid opcode %x\n", uOpcode));
2844 return VERR_INVALID_PARAMETER;
2845 }
2846 }
2847
2848 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2849 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2850
2851 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2852 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2853 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2854 )
2855 {
2856 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2857 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2858 rc = VERR_PATCHING_REFUSED;
2859 goto failure;
2860 }
2861
2862 pPatch->nrPatch2GuestRecs = 0;
2863 pInstrStart = pInstrGC;
2864
2865#ifdef PATM_ENABLE_CALL
2866 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2867#endif
2868
2869 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2870 pPatch->uCurPatchOffset = 0;
2871
2872 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2873 {
2874 Assert(pPatch->flags & PATMFL_INTHANDLER);
2875
2876 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2877 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2878 if (RT_FAILURE(rc))
2879 goto failure;
2880 }
2881
2882 /***************************************************************************************************************************/
2883 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2884 /***************************************************************************************************************************/
2885#ifdef VBOX_WITH_STATISTICS
2886 if (!(pPatch->flags & PATMFL_SYSENTER))
2887 {
2888 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2889 if (RT_FAILURE(rc))
2890 goto failure;
2891 }
2892#endif
2893
2894 PATMP2GLOOKUPREC cacheRec;
2895 RT_ZERO(cacheRec);
2896 cacheRec.pPatch = pPatch;
2897
2898 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2899 /* Free leftover lock if any. */
2900 if (cacheRec.Lock.pvMap)
2901 {
2902 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2903 cacheRec.Lock.pvMap = NULL;
2904 }
2905 if (rc != VINF_SUCCESS)
2906 {
2907 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2908 goto failure;
2909 }
2910
2911 /* Calculated during analysis. */
2912 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2913 {
2914 /* Most likely cause: we encountered an illegal instruction very early on. */
2915 /** @todo could turn it into an int3 callable patch. */
2916 Log(("patmR3PatchBlock: patch block too small -> refuse\n"));
2917 rc = VERR_PATCHING_REFUSED;
2918 goto failure;
2919 }
2920
2921 /* size of patch block */
2922 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2923
2924
2925 /* Update free pointer in patch memory. */
2926 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2927 /* Round to next 8 byte boundary. */
2928 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2929
2930 /*
2931 * Insert into patch to guest lookup tree
2932 */
2933 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2934 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2935 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2936 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2937 if (!fInserted)
2938 {
2939 rc = VERR_PATCHING_REFUSED;
2940 goto failure;
2941 }
2942
2943 /* Note that patmr3SetBranchTargets can install additional patches!! */
2944 rc = patmr3SetBranchTargets(pVM, pPatch);
2945 if (rc != VINF_SUCCESS)
2946 {
2947 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2948 goto failure;
2949 }
2950
2951#ifdef LOG_ENABLED
2952 Log(("Patch code ----------------------------------------------------------\n"));
2953 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2954 /* Free leftover lock if any. */
2955 if (cacheRec.Lock.pvMap)
2956 {
2957 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2958 cacheRec.Lock.pvMap = NULL;
2959 }
2960 Log(("Patch code ends -----------------------------------------------------\n"));
2961#endif
2962
2963 /* make a copy of the guest code bytes that will be overwritten */
2964 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2965
2966 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2967 AssertRC(rc);
2968
2969 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2970 {
2971 /*uint8_t bASMInt3 = 0xCC; - unused */
2972
2973 Log(("patmR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2974 /* Replace first opcode byte with 'int 3'. */
2975 rc = patmActivateInt3Patch(pVM, pPatch);
2976 if (RT_FAILURE(rc))
2977 goto failure;
2978
2979 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2980 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2981
2982 pPatch->flags &= ~PATMFL_INSTR_HINT;
2983 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2984 }
2985 else
2986 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2987 {
2988 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2989 /* now insert a jump in the guest code */
2990 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2991 AssertRC(rc);
2992 if (RT_FAILURE(rc))
2993 goto failure;
2994
2995 }
2996
2997 PATM_LOG_RAW_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
2998
2999 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3000 pPatch->pTempInfo->nrIllegalInstr = 0;
3001
3002 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3003
3004 pPatch->uState = PATCH_ENABLED;
3005 return VINF_SUCCESS;
3006
3007failure:
3008 if (pPatchRec->CoreOffset.Key)
3009 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3010
3011 patmEmptyTree(pVM, &pPatch->FixupTree);
3012 pPatch->nrFixups = 0;
3013
3014 patmEmptyTree(pVM, &pPatch->JumpTree);
3015 pPatch->nrJumpRecs = 0;
3016
3017 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3018 pPatch->pTempInfo->nrIllegalInstr = 0;
3019
3020 /* Turn this cli patch into a dummy. */
3021 pPatch->uState = PATCH_REFUSED;
3022 pPatch->pPatchBlockOffset = 0;
3023
3024 // Give back the patch memory we no longer need
3025 Assert(orgOffsetPatchMem != (uint32_t)~0);
3026 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3027
3028 return rc;
3029}
3030
3031/**
3032 * Patch IDT handler
3033 *
3034 * @returns VBox status code.
3035 * @param pVM Pointer to the VM.
3036 * @param pInstrGC Guest context point to privileged instruction
3037 * @param uOpSize Size of starting instruction
3038 * @param pPatchRec Patch record
3039 * @param pCacheRec Cache record ptr
3040 *
3041 * @note returns failure if patching is not allowed or possible
3042 *
3043 */
3044static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3045{
3046 PPATCHINFO pPatch = &pPatchRec->patch;
3047 bool disret;
3048 DISCPUSTATE cpuPush, cpuJmp;
3049 uint32_t cbInstr;
3050 RTRCPTR pCurInstrGC = pInstrGC;
3051 uint8_t *pCurInstrHC, *pInstrHC;
3052 uint32_t orgOffsetPatchMem = ~0;
3053
3054 pInstrHC = pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
3055 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
3056
3057 /*
3058 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
3059 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
3060 * condition here and only patch the common entypoint once.
3061 */
3062 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &cbInstr);
3063 Assert(disret);
3064 if (disret && cpuPush.pCurInstr->uOpcode == OP_PUSH)
3065 {
3066 RTRCPTR pJmpInstrGC;
3067 int rc;
3068 pCurInstrGC += cbInstr;
3069
3070 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuJmp, &cbInstr);
3071 if ( disret
3072 && cpuJmp.pCurInstr->uOpcode == OP_JMP
3073 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
3074 )
3075 {
3076 bool fInserted;
3077 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3078 if (pJmpPatch == 0)
3079 {
3080 /* Patch it first! */
3081 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
3082 if (rc != VINF_SUCCESS)
3083 goto failure;
3084 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3085 Assert(pJmpPatch);
3086 }
3087 if (pJmpPatch->patch.uState != PATCH_ENABLED)
3088 goto failure;
3089
3090 /* save original offset (in case of failures later on) */
3091 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3092
3093 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3094 pPatch->uCurPatchOffset = 0;
3095 pPatch->nrPatch2GuestRecs = 0;
3096
3097#ifdef VBOX_WITH_STATISTICS
3098 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3099 if (RT_FAILURE(rc))
3100 goto failure;
3101#endif
3102
3103 /* Install fake cli patch (to clear the virtual IF) */
3104 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
3105 if (RT_FAILURE(rc))
3106 goto failure;
3107
3108 /* Add lookup record for patch to guest address translation (for the push) */
3109 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
3110
3111 /* Duplicate push. */
3112 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
3113 if (RT_FAILURE(rc))
3114 goto failure;
3115
3116 /* Generate jump to common entrypoint. */
3117 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
3118 if (RT_FAILURE(rc))
3119 goto failure;
3120
3121 /* size of patch block */
3122 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3123
3124 /* Update free pointer in patch memory. */
3125 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3126 /* Round to next 8 byte boundary */
3127 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3128
3129 /* There's no jump from guest to patch code. */
3130 pPatch->cbPatchJump = 0;
3131
3132
3133#ifdef LOG_ENABLED
3134 Log(("Patch code ----------------------------------------------------------\n"));
3135 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3136 Log(("Patch code ends -----------------------------------------------------\n"));
3137#endif
3138 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3139
3140 /*
3141 * Insert into patch to guest lookup tree
3142 */
3143 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3144 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3145 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3146 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3147
3148 pPatch->uState = PATCH_ENABLED;
3149
3150 return VINF_SUCCESS;
3151 }
3152 }
3153failure:
3154 /* Give back the patch memory we no longer need */
3155 if (orgOffsetPatchMem != (uint32_t)~0)
3156 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3157
3158 return patmR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3159}
3160
3161/**
3162 * Install a trampoline to call a guest trap handler directly
3163 *
3164 * @returns VBox status code.
3165 * @param pVM Pointer to the VM.
3166 * @param pInstrGC Guest context point to privileged instruction
3167 * @param pPatchRec Patch record
3168 * @param pCacheRec Cache record ptr
3169 *
3170 */
3171static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3172{
3173 PPATCHINFO pPatch = &pPatchRec->patch;
3174 int rc = VERR_PATCHING_REFUSED;
3175 uint32_t orgOffsetPatchMem = ~0;
3176 bool fInserted;
3177
3178 // save original offset (in case of failures later on)
3179 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3180
3181 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3182 pPatch->uCurPatchOffset = 0;
3183 pPatch->nrPatch2GuestRecs = 0;
3184
3185#ifdef VBOX_WITH_STATISTICS
3186 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3187 if (RT_FAILURE(rc))
3188 goto failure;
3189#endif
3190
3191 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3192 if (RT_FAILURE(rc))
3193 goto failure;
3194
3195 /* size of patch block */
3196 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3197
3198 /* Update free pointer in patch memory. */
3199 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3200 /* Round to next 8 byte boundary */
3201 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3202
3203 /* There's no jump from guest to patch code. */
3204 pPatch->cbPatchJump = 0;
3205
3206#ifdef LOG_ENABLED
3207 Log(("Patch code ----------------------------------------------------------\n"));
3208 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3209 Log(("Patch code ends -----------------------------------------------------\n"));
3210#endif
3211 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "TRAP handler");
3212 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3213
3214 /*
3215 * Insert into patch to guest lookup tree
3216 */
3217 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3218 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3219 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3220 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3221
3222 pPatch->uState = PATCH_ENABLED;
3223 return VINF_SUCCESS;
3224
3225failure:
3226 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3227
3228 /* Turn this cli patch into a dummy. */
3229 pPatch->uState = PATCH_REFUSED;
3230 pPatch->pPatchBlockOffset = 0;
3231
3232 /* Give back the patch memory we no longer need */
3233 Assert(orgOffsetPatchMem != (uint32_t)~0);
3234 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3235
3236 return rc;
3237}
3238
3239
3240#ifdef LOG_ENABLED
3241/**
3242 * Check if the instruction is patched as a common idt handler
3243 *
3244 * @returns true or false
3245 * @param pVM Pointer to the VM.
3246 * @param pInstrGC Guest context point to the instruction
3247 *
3248 */
3249static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3250{
3251 PPATMPATCHREC pRec;
3252
3253 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3254 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3255 return true;
3256 return false;
3257}
3258#endif //DEBUG
3259
3260
3261/**
3262 * Duplicates a complete function
3263 *
3264 * @returns VBox status code.
3265 * @param pVM Pointer to the VM.
3266 * @param pInstrGC Guest context point to privileged instruction
3267 * @param pPatchRec Patch record
3268 * @param pCacheRec Cache record ptr
3269 *
3270 */
3271static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3272{
3273 PPATCHINFO pPatch = &pPatchRec->patch;
3274 int rc = VERR_PATCHING_REFUSED;
3275 uint32_t orgOffsetPatchMem = ~0;
3276 bool fInserted;
3277
3278 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3279 /* Save original offset (in case of failures later on). */
3280 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3281
3282 /* We will not go on indefinitely with call instruction handling. */
3283 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3284 {
3285 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3286 return VERR_PATCHING_REFUSED;
3287 }
3288
3289 pVM->patm.s.ulCallDepth++;
3290
3291#ifdef PATM_ENABLE_CALL
3292 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3293#endif
3294
3295 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3296
3297 pPatch->nrPatch2GuestRecs = 0;
3298 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3299 pPatch->uCurPatchOffset = 0;
3300
3301 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3302 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3303 if (RT_FAILURE(rc))
3304 goto failure;
3305
3306#ifdef VBOX_WITH_STATISTICS
3307 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3308 if (RT_FAILURE(rc))
3309 goto failure;
3310#endif
3311
3312 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3313 if (rc != VINF_SUCCESS)
3314 {
3315 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3316 goto failure;
3317 }
3318
3319 //size of patch block
3320 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3321
3322 //update free pointer in patch memory
3323 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3324 /* Round to next 8 byte boundary. */
3325 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3326
3327 pPatch->uState = PATCH_ENABLED;
3328
3329 /*
3330 * Insert into patch to guest lookup tree
3331 */
3332 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3333 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3334 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3335 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3336 if (!fInserted)
3337 {
3338 rc = VERR_PATCHING_REFUSED;
3339 goto failure;
3340 }
3341
3342 /* Note that patmr3SetBranchTargets can install additional patches!! */
3343 rc = patmr3SetBranchTargets(pVM, pPatch);
3344 if (rc != VINF_SUCCESS)
3345 {
3346 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3347 goto failure;
3348 }
3349
3350#ifdef LOG_ENABLED
3351 Log(("Patch code ----------------------------------------------------------\n"));
3352 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3353 Log(("Patch code ends -----------------------------------------------------\n"));
3354#endif
3355
3356 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3357
3358 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3359 pPatch->pTempInfo->nrIllegalInstr = 0;
3360
3361 pVM->patm.s.ulCallDepth--;
3362 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3363 return VINF_SUCCESS;
3364
3365failure:
3366 if (pPatchRec->CoreOffset.Key)
3367 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3368
3369 patmEmptyTree(pVM, &pPatch->FixupTree);
3370 pPatch->nrFixups = 0;
3371
3372 patmEmptyTree(pVM, &pPatch->JumpTree);
3373 pPatch->nrJumpRecs = 0;
3374
3375 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3376 pPatch->pTempInfo->nrIllegalInstr = 0;
3377
3378 /* Turn this cli patch into a dummy. */
3379 pPatch->uState = PATCH_REFUSED;
3380 pPatch->pPatchBlockOffset = 0;
3381
3382 // Give back the patch memory we no longer need
3383 Assert(orgOffsetPatchMem != (uint32_t)~0);
3384 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3385
3386 pVM->patm.s.ulCallDepth--;
3387 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3388 return rc;
3389}
3390
3391/**
3392 * Creates trampoline code to jump inside an existing patch
3393 *
3394 * @returns VBox status code.
3395 * @param pVM Pointer to the VM.
3396 * @param pInstrGC Guest context point to privileged instruction
3397 * @param pPatchRec Patch record
3398 *
3399 */
3400static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3401{
3402 PPATCHINFO pPatch = &pPatchRec->patch;
3403 RTRCPTR pPage, pPatchTargetGC = 0;
3404 uint32_t orgOffsetPatchMem = ~0;
3405 int rc = VERR_PATCHING_REFUSED;
3406 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3407 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3408 bool fInserted = false;
3409
3410 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3411 /* Save original offset (in case of failures later on). */
3412 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3413
3414 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3415 /** @todo we already checked this before */
3416 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3417
3418 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3419 if (pPatchPage)
3420 {
3421 uint32_t i;
3422
3423 for (i=0;i<pPatchPage->cCount;i++)
3424 {
3425 if (pPatchPage->papPatch[i])
3426 {
3427 pPatchToJmp = pPatchPage->papPatch[i];
3428
3429 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3430 && pPatchToJmp->uState == PATCH_ENABLED)
3431 {
3432 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3433 if (pPatchTargetGC)
3434 {
3435 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3436 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3437 Assert(pPatchToGuestRec);
3438
3439 pPatchToGuestRec->fJumpTarget = true;
3440 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3441 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3442 break;
3443 }
3444 }
3445 }
3446 }
3447 }
3448 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3449
3450 /*
3451 * Only record the trampoline patch if this is the first patch to the target
3452 * or we recorded other patches already.
3453 * The goal is to refuse refreshing function duplicates if the guest
3454 * modifies code after a saved state was loaded because it is not possible
3455 * to save the relation between trampoline and target without changing the
3456 * saved satte version.
3457 */
3458 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3459 || pPatchToJmp->pTrampolinePatchesHead)
3460 {
3461 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3462 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3463 if (!pTrampRec)
3464 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3465
3466 pTrampRec->pPatchTrampoline = pPatchRec;
3467 }
3468
3469 pPatch->nrPatch2GuestRecs = 0;
3470 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3471 pPatch->uCurPatchOffset = 0;
3472
3473 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3474 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3475 if (RT_FAILURE(rc))
3476 goto failure;
3477
3478#ifdef VBOX_WITH_STATISTICS
3479 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3480 if (RT_FAILURE(rc))
3481 goto failure;
3482#endif
3483
3484 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3485 if (RT_FAILURE(rc))
3486 goto failure;
3487
3488 /*
3489 * Insert into patch to guest lookup tree
3490 */
3491 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3492 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3493 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3494 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3495 if (!fInserted)
3496 {
3497 rc = VERR_PATCHING_REFUSED;
3498 goto failure;
3499 }
3500
3501 /* size of patch block */
3502 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3503
3504 /* Update free pointer in patch memory. */
3505 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3506 /* Round to next 8 byte boundary */
3507 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3508
3509 /* There's no jump from guest to patch code. */
3510 pPatch->cbPatchJump = 0;
3511
3512 /* Enable the patch. */
3513 pPatch->uState = PATCH_ENABLED;
3514 /* We allow this patch to be called as a function. */
3515 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3516
3517 if (pTrampRec)
3518 {
3519 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3520 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3521 }
3522 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3523 return VINF_SUCCESS;
3524
3525failure:
3526 if (pPatchRec->CoreOffset.Key)
3527 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3528
3529 patmEmptyTree(pVM, &pPatch->FixupTree);
3530 pPatch->nrFixups = 0;
3531
3532 patmEmptyTree(pVM, &pPatch->JumpTree);
3533 pPatch->nrJumpRecs = 0;
3534
3535 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3536 pPatch->pTempInfo->nrIllegalInstr = 0;
3537
3538 /* Turn this cli patch into a dummy. */
3539 pPatch->uState = PATCH_REFUSED;
3540 pPatch->pPatchBlockOffset = 0;
3541
3542 // Give back the patch memory we no longer need
3543 Assert(orgOffsetPatchMem != (uint32_t)~0);
3544 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3545
3546 if (pTrampRec)
3547 MMR3HeapFree(pTrampRec);
3548
3549 return rc;
3550}
3551
3552
3553/**
3554 * Patch branch target function for call/jump at specified location.
3555 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3556 *
3557 * @returns VBox status code.
3558 * @param pVM Pointer to the VM.
3559 * @param pCtx Pointer to the guest CPU context.
3560 *
3561 */
3562VMMR3_INT_DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3563{
3564 RTRCPTR pBranchTarget, pPage;
3565 int rc;
3566 RTRCPTR pPatchTargetGC = 0;
3567 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
3568
3569 pBranchTarget = pCtx->edx;
3570 pBranchTarget = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3571
3572 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3573 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3574
3575 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3576 if (pPatchPage)
3577 {
3578 uint32_t i;
3579
3580 for (i=0;i<pPatchPage->cCount;i++)
3581 {
3582 if (pPatchPage->papPatch[i])
3583 {
3584 PPATCHINFO pPatch = pPatchPage->papPatch[i];
3585
3586 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3587 && pPatch->uState == PATCH_ENABLED)
3588 {
3589 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3590 if (pPatchTargetGC)
3591 {
3592 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3593 break;
3594 }
3595 }
3596 }
3597 }
3598 }
3599
3600 if (pPatchTargetGC)
3601 {
3602 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3603 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3604 }
3605 else
3606 {
3607 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3608 }
3609
3610 if (rc == VINF_SUCCESS)
3611 {
3612 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3613 Assert(pPatchTargetGC);
3614 }
3615
3616 if (pPatchTargetGC)
3617 {
3618 pCtx->eax = pPatchTargetGC;
3619 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3620 }
3621 else
3622 {
3623 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3624 pCtx->eax = 0;
3625 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3626 }
3627 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3628 rc = patmAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3629 AssertRC(rc);
3630
3631 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3632 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3633 return VINF_SUCCESS;
3634}
3635
3636/**
3637 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3638 *
3639 * @returns VBox status code.
3640 * @param pVM Pointer to the VM.
3641 * @param pCpu Disassembly CPU structure ptr
3642 * @param pInstrGC Guest context point to privileged instruction
3643 * @param pCacheRec Cache record ptr
3644 *
3645 */
3646static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3647{
3648 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3649 int rc = VERR_PATCHING_REFUSED;
3650 DISCPUSTATE cpu;
3651 RTRCPTR pTargetGC;
3652 PPATMPATCHREC pPatchFunction;
3653 uint32_t cbInstr;
3654 bool disret;
3655
3656 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3657 Assert((pCpu->pCurInstr->uOpcode == OP_CALL || pCpu->pCurInstr->uOpcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32);
3658
3659 if ((pCpu->pCurInstr->uOpcode != OP_CALL && pCpu->pCurInstr->uOpcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32)
3660 {
3661 rc = VERR_PATCHING_REFUSED;
3662 goto failure;
3663 }
3664
3665 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3666 if (pTargetGC == 0)
3667 {
3668 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
3669 rc = VERR_PATCHING_REFUSED;
3670 goto failure;
3671 }
3672
3673 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3674 if (pPatchFunction == NULL)
3675 {
3676 for(;;)
3677 {
3678 /* It could be an indirect call (call -> jmp dest).
3679 * Note that it's dangerous to assume the jump will never change...
3680 */
3681 uint8_t *pTmpInstrHC;
3682
3683 pTmpInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3684 Assert(pTmpInstrHC);
3685 if (pTmpInstrHC == 0)
3686 break;
3687
3688 disret = patmR3DisInstr(pVM, pPatch, pTargetGC, pTmpInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
3689 if (disret == false || cpu.pCurInstr->uOpcode != OP_JMP)
3690 break;
3691
3692 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3693 if (pTargetGC == 0)
3694 {
3695 break;
3696 }
3697
3698 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3699 break;
3700 }
3701 if (pPatchFunction == 0)
3702 {
3703 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3704 rc = VERR_PATCHING_REFUSED;
3705 goto failure;
3706 }
3707 }
3708
3709 // make a copy of the guest code bytes that will be overwritten
3710 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3711
3712 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3713 AssertRC(rc);
3714
3715 /* Now replace the original call in the guest code */
3716 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3717 AssertRC(rc);
3718 if (RT_FAILURE(rc))
3719 goto failure;
3720
3721 /* Lowest and highest address for write monitoring. */
3722 pPatch->pInstrGCLowest = pInstrGC;
3723 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3724 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "Call");
3725
3726 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3727
3728 pPatch->uState = PATCH_ENABLED;
3729 return VINF_SUCCESS;
3730
3731failure:
3732 /* Turn this patch into a dummy. */
3733 pPatch->uState = PATCH_REFUSED;
3734
3735 return rc;
3736}
3737
3738/**
3739 * Replace the address in an MMIO instruction with the cached version.
3740 *
3741 * @returns VBox status code.
3742 * @param pVM Pointer to the VM.
3743 * @param pInstrGC Guest context point to privileged instruction
3744 * @param pCpu Disassembly CPU structure ptr
3745 * @param pCacheRec Cache record ptr
3746 *
3747 * @note returns failure if patching is not allowed or possible
3748 *
3749 */
3750static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3751{
3752 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3753 uint8_t *pPB;
3754 int rc = VERR_PATCHING_REFUSED;
3755
3756 Assert(pVM->patm.s.mmio.pCachedData);
3757 if (!pVM->patm.s.mmio.pCachedData)
3758 goto failure;
3759
3760 if (pCpu->Param2.fUse != DISUSE_DISPLACEMENT32)
3761 goto failure;
3762
3763 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3764 if (pPB == 0)
3765 goto failure;
3766
3767 /* Add relocation record for cached data access. */
3768 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3769 {
3770 Log(("Relocation failed for cached mmio address!!\n"));
3771 return VERR_PATCHING_REFUSED;
3772 }
3773 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "MMIO patch old instruction:", "");
3774
3775 /* Save original instruction. */
3776 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3777 AssertRC(rc);
3778
3779 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3780
3781 /* Replace address with that of the cached item. */
3782 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3783 AssertRC(rc);
3784 if (RT_FAILURE(rc))
3785 {
3786 goto failure;
3787 }
3788
3789 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3790 pVM->patm.s.mmio.pCachedData = 0;
3791 pVM->patm.s.mmio.GCPhys = 0;
3792 pPatch->uState = PATCH_ENABLED;
3793 return VINF_SUCCESS;
3794
3795failure:
3796 /* Turn this patch into a dummy. */
3797 pPatch->uState = PATCH_REFUSED;
3798
3799 return rc;
3800}
3801
3802
3803/**
3804 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3805 *
3806 * @returns VBox status code.
3807 * @param pVM Pointer to the VM.
3808 * @param pInstrGC Guest context point to privileged instruction
3809 * @param pPatch Patch record
3810 *
3811 * @note returns failure if patching is not allowed or possible
3812 *
3813 */
3814static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3815{
3816 DISCPUSTATE cpu;
3817 uint32_t cbInstr;
3818 bool disret;
3819 uint8_t *pInstrHC;
3820
3821 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3822
3823 /* Convert GC to HC address. */
3824 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3825 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3826
3827 /* Disassemble mmio instruction. */
3828 disret = patmR3DisInstrNoStrOpMode(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE,
3829 &cpu, &cbInstr);
3830 if (disret == false)
3831 {
3832 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3833 return VERR_PATCHING_REFUSED;
3834 }
3835
3836 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
3837 if (cbInstr > MAX_INSTR_SIZE)
3838 return VERR_PATCHING_REFUSED;
3839 if (cpu.Param2.fUse != DISUSE_DISPLACEMENT32)
3840 return VERR_PATCHING_REFUSED;
3841
3842 /* Add relocation record for cached data access. */
3843 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3844 {
3845 Log(("Relocation failed for cached mmio address!!\n"));
3846 return VERR_PATCHING_REFUSED;
3847 }
3848 /* Replace address with that of the cached item. */
3849 *(RTRCPTR *)&pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3850
3851 /* Lowest and highest address for write monitoring. */
3852 pPatch->pInstrGCLowest = pInstrGC;
3853 pPatch->pInstrGCHighest = pInstrGC + cpu.cbInstr;
3854
3855 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3856 pVM->patm.s.mmio.pCachedData = 0;
3857 pVM->patm.s.mmio.GCPhys = 0;
3858 return VINF_SUCCESS;
3859}
3860
3861/**
3862 * Activates an int3 patch
3863 *
3864 * @returns VBox status code.
3865 * @param pVM Pointer to the VM.
3866 * @param pPatch Patch record
3867 */
3868static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3869{
3870 uint8_t bASMInt3 = 0xCC;
3871 int rc;
3872
3873 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3874 Assert(pPatch->uState != PATCH_ENABLED);
3875
3876 /* Replace first opcode byte with 'int 3'. */
3877 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
3878 AssertRC(rc);
3879
3880 pPatch->cbPatchJump = sizeof(bASMInt3);
3881
3882 return rc;
3883}
3884
3885/**
3886 * Deactivates an int3 patch
3887 *
3888 * @returns VBox status code.
3889 * @param pVM Pointer to the VM.
3890 * @param pPatch Patch record
3891 */
3892static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3893{
3894 uint8_t ASMInt3 = 0xCC;
3895 int rc;
3896
3897 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3898 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3899
3900 /* Restore first opcode byte. */
3901 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3902 AssertRC(rc);
3903 return rc;
3904}
3905
3906/**
3907 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically
3908 * in the raw-mode context.
3909 *
3910 * @returns VBox status code.
3911 * @param pVM Pointer to the VM.
3912 * @param pInstrGC Guest context point to privileged instruction
3913 * @param pInstrHC Host context point to privileged instruction
3914 * @param pCpu Disassembly CPU structure ptr
3915 * @param pPatch Patch record
3916 *
3917 * @note returns failure if patching is not allowed or possible
3918 *
3919 */
3920int patmR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3921{
3922 uint8_t bASMInt3 = 0xCC;
3923 int rc;
3924
3925 /* Note: Do not use patch memory here! It might called during patch installation too. */
3926 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "patmR3PatchInstrInt3:", "");
3927
3928 /* Save the original instruction. */
3929 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3930 AssertRC(rc);
3931 pPatch->cbPatchJump = sizeof(bASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3932
3933 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3934
3935 /* Replace first opcode byte with 'int 3'. */
3936 rc = patmActivateInt3Patch(pVM, pPatch);
3937 if (RT_FAILURE(rc))
3938 goto failure;
3939
3940 /* Lowest and highest address for write monitoring. */
3941 pPatch->pInstrGCLowest = pInstrGC;
3942 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3943
3944 pPatch->uState = PATCH_ENABLED;
3945 return VINF_SUCCESS;
3946
3947failure:
3948 /* Turn this patch into a dummy. */
3949 return VERR_PATCHING_REFUSED;
3950}
3951
3952#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3953/**
3954 * Patch a jump instruction at specified location
3955 *
3956 * @returns VBox status code.
3957 * @param pVM Pointer to the VM.
3958 * @param pInstrGC Guest context point to privileged instruction
3959 * @param pInstrHC Host context point to privileged instruction
3960 * @param pCpu Disassembly CPU structure ptr
3961 * @param pPatchRec Patch record
3962 *
3963 * @note returns failure if patching is not allowed or possible
3964 *
3965 */
3966int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3967{
3968 PPATCHINFO pPatch = &pPatchRec->patch;
3969 int rc = VERR_PATCHING_REFUSED;
3970
3971 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3972 pPatch->uCurPatchOffset = 0;
3973 pPatch->cbPatchBlockSize = 0;
3974 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3975
3976 /*
3977 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3978 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3979 */
3980 switch (pCpu->pCurInstr->uOpcode)
3981 {
3982 case OP_JO:
3983 case OP_JNO:
3984 case OP_JC:
3985 case OP_JNC:
3986 case OP_JE:
3987 case OP_JNE:
3988 case OP_JBE:
3989 case OP_JNBE:
3990 case OP_JS:
3991 case OP_JNS:
3992 case OP_JP:
3993 case OP_JNP:
3994 case OP_JL:
3995 case OP_JNL:
3996 case OP_JLE:
3997 case OP_JNLE:
3998 case OP_JMP:
3999 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
4000 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL);
4001 if (!(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL))
4002 goto failure;
4003
4004 Assert(pCpu->cbInstr == SIZEOF_NEARJUMP32 || pCpu->cbInstr == SIZEOF_NEAR_COND_JUMP32);
4005 if (pCpu->cbInstr != SIZEOF_NEARJUMP32 && pCpu->cbInstr != SIZEOF_NEAR_COND_JUMP32)
4006 goto failure;
4007
4008 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->cbInstr))
4009 {
4010 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
4011 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
4012 rc = VERR_PATCHING_REFUSED;
4013 goto failure;
4014 }
4015
4016 break;
4017
4018 default:
4019 goto failure;
4020 }
4021
4022 // make a copy of the guest code bytes that will be overwritten
4023 Assert(pCpu->cbInstr <= sizeof(pPatch->aPrivInstr));
4024 Assert(pCpu->cbInstr >= SIZEOF_NEARJUMP32);
4025 pPatch->cbPatchJump = pCpu->cbInstr;
4026
4027 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
4028 AssertRC(rc);
4029
4030 /* Now insert a jump in the guest code. */
4031 /*
4032 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
4033 * references the target instruction in the conflict patch.
4034 */
4035 RTRCPTR pJmpDest = patmR3GuestGCPtrToPatchGCPtrSimple(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue);
4036
4037 AssertMsg(pJmpDest, ("patmR3GuestGCPtrToPatchGCPtrSimple failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue));
4038 pPatch->pPatchJumpDestGC = pJmpDest;
4039
4040 PATMP2GLOOKUPREC cacheRec;
4041 RT_ZERO(cacheRec);
4042 cacheRec.pPatch = pPatch;
4043
4044 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
4045 /* Free leftover lock if any. */
4046 if (cacheRec.Lock.pvMap)
4047 {
4048 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4049 cacheRec.Lock.pvMap = NULL;
4050 }
4051 AssertRC(rc);
4052 if (RT_FAILURE(rc))
4053 goto failure;
4054
4055 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
4056
4057 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
4058 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
4059
4060 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
4061
4062 /* Lowest and highest address for write monitoring. */
4063 pPatch->pInstrGCLowest = pInstrGC;
4064 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
4065
4066 pPatch->uState = PATCH_ENABLED;
4067 return VINF_SUCCESS;
4068
4069failure:
4070 /* Turn this cli patch into a dummy. */
4071 pPatch->uState = PATCH_REFUSED;
4072
4073 return rc;
4074}
4075#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
4076
4077
4078/**
4079 * Gives hint to PATM about supervisor guest instructions
4080 *
4081 * @returns VBox status code.
4082 * @param pVM Pointer to the VM.
4083 * @param pInstr Guest context point to privileged instruction
4084 * @param flags Patch flags
4085 */
4086VMMR3_INT_DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4087{
4088 Assert(pInstrGC);
4089 Assert(flags == PATMFL_CODE32);
4090
4091 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4092 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4093}
4094
4095/**
4096 * Patch privileged instruction at specified location
4097 *
4098 * @returns VBox status code.
4099 * @param pVM Pointer to the VM.
4100 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4101 * @param flags Patch flags
4102 *
4103 * @note returns failure if patching is not allowed or possible
4104 */
4105VMMR3_INT_DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4106{
4107 DISCPUSTATE cpu;
4108 R3PTRTYPE(uint8_t *) pInstrHC;
4109 uint32_t cbInstr;
4110 PPATMPATCHREC pPatchRec;
4111 PCPUMCTX pCtx = 0;
4112 bool disret;
4113 int rc;
4114 PVMCPU pVCpu = VMMGetCpu0(pVM);
4115 LogFlow(("PATMR3InstallPatch: %08x (%#llx)\n", pInstrGC, flags));
4116
4117 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4118
4119 if ( !pVM
4120 || pInstrGC == 0
4121 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4122 {
4123 AssertFailed();
4124 return VERR_INVALID_PARAMETER;
4125 }
4126
4127 if (PATMIsEnabled(pVM) == false)
4128 return VERR_PATCHING_REFUSED;
4129
4130 /* Test for patch conflict only with patches that actually change guest code. */
4131 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4132 {
4133 PPATCHINFO pConflictPatch = patmFindActivePatchByEntrypoint(pVM, pInstrGC);
4134 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4135 if (pConflictPatch != 0)
4136 return VERR_PATCHING_REFUSED;
4137 }
4138
4139 if (!(flags & PATMFL_CODE32))
4140 {
4141 /** @todo Only 32 bits code right now */
4142 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4143 return VERR_NOT_IMPLEMENTED;
4144 }
4145
4146 /* We ran out of patch memory; don't bother anymore. */
4147 if (pVM->patm.s.fOutOfMemory == true)
4148 return VERR_PATCHING_REFUSED;
4149
4150#if 0 /* DONT COMMIT ENABLED! */
4151 /* Blacklisted NT4SP1 areas - debugging why we sometimes crash early on, */
4152 if ( 0
4153 //|| (pInstrGC - 0x80010000U) < 0x10000U // NT4SP1 HAL
4154 //|| (pInstrGC - 0x80010000U) < 0x5000U // NT4SP1 HAL
4155 //|| (pInstrGC - 0x80013000U) < 0x2000U // NT4SP1 HAL
4156 //|| (pInstrGC - 0x80014000U) < 0x1000U // NT4SP1 HAL
4157 //|| (pInstrGC - 0x80014000U) < 0x800U // NT4SP1 HAL
4158 //|| (pInstrGC - 0x80014400U) < 0x400U // NT4SP1 HAL
4159 //|| (pInstrGC - 0x80014400U) < 0x200U // NT4SP1 HAL
4160 //|| (pInstrGC - 0x80014400U) < 0x100U // NT4SP1 HAL
4161 //|| (pInstrGC - 0x80014500U) < 0x100U // NT4SP1 HAL - negative
4162 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4163 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4164 //|| (pInstrGC - 0x80014440U) < 0x40U // NT4SP1 HAL
4165 //|| (pInstrGC - 0x80014440U) < 0x20U // NT4SP1 HAL
4166 || pInstrGC == 0x80014447 /* KfLowerIrql */
4167 || 0)
4168 {
4169 Log(("PATMR3InstallPatch: %08x is blacklisted\n", pInstrGC));
4170 return VERR_PATCHING_REFUSED;
4171 }
4172#endif
4173
4174 /* Make sure the code selector is wide open; otherwise refuse. */
4175 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4176 if (CPUMGetGuestCPL(pVCpu) == 0)
4177 {
4178 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4179 if (pInstrGCFlat != pInstrGC)
4180 {
4181 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs.Sel, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4182 return VERR_PATCHING_REFUSED;
4183 }
4184 }
4185
4186 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4187 if (!(flags & PATMFL_GUEST_SPECIFIC))
4188 {
4189 /* New code. Make sure CSAM has a go at it first. */
4190 CSAMR3CheckCode(pVM, pInstrGC);
4191 }
4192
4193 /* Note: obsolete */
4194 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4195 && (flags & PATMFL_MMIO_ACCESS))
4196 {
4197 RTRCUINTPTR offset;
4198 void *pvPatchCoreOffset;
4199
4200 /* Find the patch record. */
4201 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4202 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4203 if (pvPatchCoreOffset == NULL)
4204 {
4205 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4206 return VERR_PATCH_NOT_FOUND; //fatal error
4207 }
4208 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4209
4210 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4211 }
4212
4213 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4214
4215 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4216 if (pPatchRec)
4217 {
4218 Assert(!(flags & PATMFL_TRAMPOLINE));
4219
4220 /* Hints about existing patches are ignored. */
4221 if (flags & PATMFL_INSTR_HINT)
4222 return VERR_PATCHING_REFUSED;
4223
4224 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4225 {
4226 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4227 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4228 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4229 }
4230
4231 if (pPatchRec->patch.uState == PATCH_DISABLED)
4232 {
4233 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4234 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4235 {
4236 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4237 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4238 }
4239 else
4240 Log(("Enabling patch %RRv again\n", pInstrGC));
4241
4242 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4243 rc = PATMR3EnablePatch(pVM, pInstrGC);
4244 if (RT_SUCCESS(rc))
4245 return VWRN_PATCH_ENABLED;
4246
4247 return rc;
4248 }
4249 if ( pPatchRec->patch.uState == PATCH_ENABLED
4250 || pPatchRec->patch.uState == PATCH_DIRTY)
4251 {
4252 /*
4253 * The patch might have been overwritten.
4254 */
4255 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4256 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4257 {
4258 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4259 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4260 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4261 {
4262 if (flags & PATMFL_IDTHANDLER)
4263 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4264
4265 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4266 }
4267 }
4268 rc = PATMR3RemovePatch(pVM, pInstrGC);
4269 if (RT_FAILURE(rc))
4270 return VERR_PATCHING_REFUSED;
4271 }
4272 else
4273 {
4274 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4275 /* already tried it once! */
4276 return VERR_PATCHING_REFUSED;
4277 }
4278 }
4279
4280 RTGCPHYS GCPhys;
4281 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4282 if (rc != VINF_SUCCESS)
4283 {
4284 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4285 return rc;
4286 }
4287 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4288 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4289 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4290 {
4291 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4292 return VERR_PATCHING_REFUSED;
4293 }
4294
4295 /* Initialize cache record for guest address translations. */
4296 bool fInserted;
4297 PATMP2GLOOKUPREC cacheRec;
4298 RT_ZERO(cacheRec);
4299
4300 pInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4301 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4302
4303 /* Allocate patch record. */
4304 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4305 if (RT_FAILURE(rc))
4306 {
4307 Log(("Out of memory!!!!\n"));
4308 return VERR_NO_MEMORY;
4309 }
4310 pPatchRec->Core.Key = pInstrGC;
4311 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4312 /* Insert patch record into the lookup tree. */
4313 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4314 Assert(fInserted);
4315
4316 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4317 pPatchRec->patch.flags = flags;
4318 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
4319 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4320
4321 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4322 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4323
4324 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4325 {
4326 /*
4327 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4328 */
4329 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4330 if (pPatchNear)
4331 {
4332 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4333 {
4334 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4335
4336 pPatchRec->patch.uState = PATCH_UNUSABLE;
4337 /*
4338 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4339 */
4340 return VERR_PATCHING_REFUSED;
4341 }
4342 }
4343 }
4344
4345 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4346 if (pPatchRec->patch.pTempInfo == 0)
4347 {
4348 Log(("Out of memory!!!!\n"));
4349 return VERR_NO_MEMORY;
4350 }
4351
4352 disret = patmR3DisInstrNoStrOpMode(pVM, &pPatchRec->patch, pInstrGC, NULL, PATMREAD_ORGCODE, &cpu, &cbInstr);
4353 if (disret == false)
4354 {
4355 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4356 return VERR_PATCHING_REFUSED;
4357 }
4358
4359 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
4360 if (cbInstr > MAX_INSTR_SIZE)
4361 return VERR_PATCHING_REFUSED;
4362
4363 pPatchRec->patch.cbPrivInstr = cbInstr;
4364 pPatchRec->patch.opcode = cpu.pCurInstr->uOpcode;
4365
4366 /* Restricted hinting for now. */
4367 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->uOpcode == OP_CLI);
4368
4369 /* Initialize cache record patch pointer. */
4370 cacheRec.pPatch = &pPatchRec->patch;
4371
4372 /* Allocate statistics slot */
4373 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4374 {
4375 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4376 }
4377 else
4378 {
4379 Log(("WARNING: Patch index wrap around!!\n"));
4380 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4381 }
4382
4383 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4384 {
4385 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4386 }
4387 else
4388 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4389 {
4390 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4391 }
4392 else
4393 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4394 {
4395 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4396 }
4397 else
4398 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4399 {
4400 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4401 }
4402 else
4403 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4404 {
4405 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4406 }
4407 else
4408 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4409 {
4410 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4411 }
4412 else
4413 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4414 {
4415 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4416 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4417
4418 rc = patmIdtHandler(pVM, pInstrGC, cbInstr, pPatchRec, &cacheRec);
4419#ifdef VBOX_WITH_STATISTICS
4420 if ( rc == VINF_SUCCESS
4421 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4422 {
4423 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4424 }
4425#endif
4426 }
4427 else
4428 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4429 {
4430 switch (cpu.pCurInstr->uOpcode)
4431 {
4432 case OP_SYSENTER:
4433 case OP_PUSH:
4434 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4435 if (rc == VINF_SUCCESS)
4436 {
4437 if (rc == VINF_SUCCESS)
4438 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4439 return rc;
4440 }
4441 break;
4442
4443 default:
4444 rc = VERR_NOT_IMPLEMENTED;
4445 break;
4446 }
4447 }
4448 else
4449 {
4450 switch (cpu.pCurInstr->uOpcode)
4451 {
4452 case OP_SYSENTER:
4453 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4454 if (rc == VINF_SUCCESS)
4455 {
4456 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4457 return VINF_SUCCESS;
4458 }
4459 break;
4460
4461#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4462 case OP_JO:
4463 case OP_JNO:
4464 case OP_JC:
4465 case OP_JNC:
4466 case OP_JE:
4467 case OP_JNE:
4468 case OP_JBE:
4469 case OP_JNBE:
4470 case OP_JS:
4471 case OP_JNS:
4472 case OP_JP:
4473 case OP_JNP:
4474 case OP_JL:
4475 case OP_JNL:
4476 case OP_JLE:
4477 case OP_JNLE:
4478 case OP_JECXZ:
4479 case OP_LOOP:
4480 case OP_LOOPNE:
4481 case OP_LOOPE:
4482 case OP_JMP:
4483 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4484 {
4485 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4486 break;
4487 }
4488 return VERR_NOT_IMPLEMENTED;
4489#endif
4490
4491 case OP_PUSHF:
4492 case OP_CLI:
4493 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4494 rc = patmR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec);
4495 break;
4496
4497#ifndef VBOX_WITH_SAFE_STR
4498 case OP_STR:
4499#endif
4500 case OP_SGDT:
4501 case OP_SLDT:
4502 case OP_SIDT:
4503 case OP_CPUID:
4504 case OP_LSL:
4505 case OP_LAR:
4506 case OP_SMSW:
4507 case OP_VERW:
4508 case OP_VERR:
4509 case OP_IRET:
4510#ifdef VBOX_WITH_RAW_RING1
4511 case OP_MOV:
4512#endif
4513 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4514 break;
4515
4516 default:
4517 return VERR_NOT_IMPLEMENTED;
4518 }
4519 }
4520
4521 if (rc != VINF_SUCCESS)
4522 {
4523 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4524 {
4525 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4526 pPatchRec->patch.nrPatch2GuestRecs = 0;
4527 }
4528 pVM->patm.s.uCurrentPatchIdx--;
4529 }
4530 else
4531 {
4532 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4533 AssertRCReturn(rc, rc);
4534
4535 /* Keep track upper and lower boundaries of patched instructions */
4536 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4537 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4538 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4539 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4540
4541 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4542 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4543
4544 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4545 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4546
4547 rc = VINF_SUCCESS;
4548
4549 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4550 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4551 {
4552 rc = PATMR3DisablePatch(pVM, pInstrGC);
4553 AssertRCReturn(rc, rc);
4554 }
4555
4556#ifdef VBOX_WITH_STATISTICS
4557 /* Register statistics counter */
4558 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4559 {
4560 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4561 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4562#ifndef DEBUG_sandervl
4563 /* Full breakdown for the GUI. */
4564 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4565 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4566 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4567 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4568 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4569 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4570 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4571 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4572 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4573 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4574 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4575 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4576 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4577 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4578 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4579 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4580#endif
4581 }
4582#endif
4583 }
4584 /* Free leftover lock if any. */
4585 if (cacheRec.Lock.pvMap)
4586 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4587 return rc;
4588}
4589
4590/**
4591 * Query instruction size
4592 *
4593 * @returns VBox status code.
4594 * @param pVM Pointer to the VM.
4595 * @param pPatch Patch record
4596 * @param pInstrGC Instruction address
4597 */
4598static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4599{
4600 uint8_t *pInstrHC;
4601 PGMPAGEMAPLOCK Lock;
4602
4603 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4604 if (rc == VINF_SUCCESS)
4605 {
4606 DISCPUSTATE cpu;
4607 bool disret;
4608 uint32_t cbInstr;
4609
4610 disret = patmR3DisInstr(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE | PATMREAD_NOCHECK, &cpu, &cbInstr);
4611 PGMPhysReleasePageMappingLock(pVM, &Lock);
4612 if (disret)
4613 return cbInstr;
4614 }
4615 return 0;
4616}
4617
4618/**
4619 * Add patch to page record
4620 *
4621 * @returns VBox status code.
4622 * @param pVM Pointer to the VM.
4623 * @param pPage Page address
4624 * @param pPatch Patch record
4625 */
4626int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4627{
4628 PPATMPATCHPAGE pPatchPage;
4629 int rc;
4630
4631 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4632
4633 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4634 if (pPatchPage)
4635 {
4636 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4637 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4638 {
4639 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4640 PPATCHINFO *papPatchOld = pPatchPage->papPatch;
4641
4642 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4643 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH,
4644 (void **)&pPatchPage->papPatch);
4645 if (RT_FAILURE(rc))
4646 {
4647 Log(("Out of memory!!!!\n"));
4648 return VERR_NO_MEMORY;
4649 }
4650 memcpy(pPatchPage->papPatch, papPatchOld, cMaxPatchesOld * sizeof(pPatchPage->papPatch[0]));
4651 MMHyperFree(pVM, papPatchOld);
4652 }
4653 pPatchPage->papPatch[pPatchPage->cCount] = pPatch;
4654 pPatchPage->cCount++;
4655 }
4656 else
4657 {
4658 bool fInserted;
4659
4660 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4661 if (RT_FAILURE(rc))
4662 {
4663 Log(("Out of memory!!!!\n"));
4664 return VERR_NO_MEMORY;
4665 }
4666 pPatchPage->Core.Key = pPage;
4667 pPatchPage->cCount = 1;
4668 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4669
4670 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH,
4671 (void **)&pPatchPage->papPatch);
4672 if (RT_FAILURE(rc))
4673 {
4674 Log(("Out of memory!!!!\n"));
4675 MMHyperFree(pVM, pPatchPage);
4676 return VERR_NO_MEMORY;
4677 }
4678 pPatchPage->papPatch[0] = pPatch;
4679
4680 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4681 Assert(fInserted);
4682 pVM->patm.s.cPageRecords++;
4683
4684 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4685 }
4686 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4687
4688 /* Get the closest guest instruction (from below) */
4689 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4690 Assert(pGuestToPatchRec);
4691 if (pGuestToPatchRec)
4692 {
4693 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4694 if ( pPatchPage->pLowestAddrGC == 0
4695 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4696 {
4697 RTRCUINTPTR offset;
4698
4699 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4700
4701 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4702 /* If we're too close to the page boundary, then make sure an
4703 instruction from the previous page doesn't cross the
4704 boundary itself. */
4705 if (offset && offset < MAX_INSTR_SIZE)
4706 {
4707 /* Get the closest guest instruction (from above) */
4708 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4709
4710 if (pGuestToPatchRec)
4711 {
4712 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4713 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4714 {
4715 pPatchPage->pLowestAddrGC = pPage;
4716 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4717 }
4718 }
4719 }
4720 }
4721 }
4722
4723 /* Get the closest guest instruction (from above) */
4724 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4725 Assert(pGuestToPatchRec);
4726 if (pGuestToPatchRec)
4727 {
4728 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4729 if ( pPatchPage->pHighestAddrGC == 0
4730 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4731 {
4732 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4733 /* Increase by instruction size. */
4734 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4735//// Assert(size);
4736 pPatchPage->pHighestAddrGC += size;
4737 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4738 }
4739 }
4740
4741 return VINF_SUCCESS;
4742}
4743
4744/**
4745 * Remove patch from page record
4746 *
4747 * @returns VBox status code.
4748 * @param pVM Pointer to the VM.
4749 * @param pPage Page address
4750 * @param pPatch Patch record
4751 */
4752int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4753{
4754 PPATMPATCHPAGE pPatchPage;
4755 int rc;
4756
4757 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4758 Assert(pPatchPage);
4759
4760 if (!pPatchPage)
4761 return VERR_INVALID_PARAMETER;
4762
4763 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4764
4765 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4766 if (pPatchPage->cCount > 1)
4767 {
4768 uint32_t i;
4769
4770 /* Used by multiple patches */
4771 for (i = 0; i < pPatchPage->cCount; i++)
4772 {
4773 if (pPatchPage->papPatch[i] == pPatch)
4774 {
4775 /* close the gap between the remaining pointers. */
4776 uint32_t cNew = --pPatchPage->cCount;
4777 if (i < cNew)
4778 pPatchPage->papPatch[i] = pPatchPage->papPatch[cNew];
4779 pPatchPage->papPatch[cNew] = NULL;
4780 return VINF_SUCCESS;
4781 }
4782 }
4783 AssertMsgFailed(("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4784 }
4785 else
4786 {
4787 PPATMPATCHPAGE pPatchNode;
4788
4789 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4790
4791 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4792 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4793 Assert(pPatchNode && pPatchNode == pPatchPage);
4794
4795 Assert(pPatchPage->papPatch);
4796 rc = MMHyperFree(pVM, pPatchPage->papPatch);
4797 AssertRC(rc);
4798 rc = MMHyperFree(pVM, pPatchPage);
4799 AssertRC(rc);
4800 pVM->patm.s.cPageRecords--;
4801 }
4802 return VINF_SUCCESS;
4803}
4804
4805/**
4806 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4807 *
4808 * @returns VBox status code.
4809 * @param pVM Pointer to the VM.
4810 * @param pPatch Patch record
4811 */
4812int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4813{
4814 int rc;
4815 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4816
4817 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4818 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4819 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4820
4821 /** @todo optimize better (large gaps between current and next used page) */
4822 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4823 {
4824 /* Get the closest guest instruction (from above) */
4825 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4826 if ( pGuestToPatchRec
4827 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4828 )
4829 {
4830 /* Code in page really patched -> add record */
4831 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4832 AssertRC(rc);
4833 }
4834 }
4835 pPatch->flags |= PATMFL_CODE_MONITORED;
4836 return VINF_SUCCESS;
4837}
4838
4839/**
4840 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4841 *
4842 * @returns VBox status code.
4843 * @param pVM Pointer to the VM.
4844 * @param pPatch Patch record
4845 */
4846static int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4847{
4848 int rc;
4849 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4850
4851 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4852 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4853 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4854
4855 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4856 {
4857 /* Get the closest guest instruction (from above) */
4858 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4859 if ( pGuestToPatchRec
4860 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4861 )
4862 {
4863 /* Code in page really patched -> remove record */
4864 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4865 AssertRC(rc);
4866 }
4867 }
4868 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4869 return VINF_SUCCESS;
4870}
4871
4872/**
4873 * Notifies PATM about a (potential) write to code that has been patched.
4874 *
4875 * @returns VBox status code.
4876 * @param pVM Pointer to the VM.
4877 * @param GCPtr GC pointer to write address
4878 * @param cbWrite Nr of bytes to write
4879 *
4880 */
4881VMMR3_INT_DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4882{
4883 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4884
4885 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4886
4887 Assert(VM_IS_EMT(pVM));
4888 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4889
4890 /* Quick boundary check */
4891 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4892 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4893 )
4894 return VINF_SUCCESS;
4895
4896 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4897
4898 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4899 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4900
4901 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4902 {
4903loop_start:
4904 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4905 if (pPatchPage)
4906 {
4907 uint32_t i;
4908 bool fValidPatchWrite = false;
4909
4910 /* Quick check to see if the write is in the patched part of the page */
4911 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4912 || pPatchPage->pHighestAddrGC < GCPtr)
4913 {
4914 break;
4915 }
4916
4917 for (i=0;i<pPatchPage->cCount;i++)
4918 {
4919 if (pPatchPage->papPatch[i])
4920 {
4921 PPATCHINFO pPatch = pPatchPage->papPatch[i];
4922 RTRCPTR pPatchInstrGC;
4923 //unused: bool fForceBreak = false;
4924
4925 Assert(pPatchPage->papPatch[i]->flags & PATMFL_CODE_MONITORED);
4926 /** @todo inefficient and includes redundant checks for multiple pages. */
4927 for (uint32_t j=0; j<cbWrite; j++)
4928 {
4929 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4930
4931 if ( pPatch->cbPatchJump
4932 && pGuestPtrGC >= pPatch->pPrivInstrGC
4933 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4934 {
4935 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4936 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4937 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4938 if (rc == VINF_SUCCESS)
4939 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4940 goto loop_start;
4941
4942 continue;
4943 }
4944
4945 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4946 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4947 if (!pPatchInstrGC)
4948 {
4949 RTRCPTR pClosestInstrGC;
4950 uint32_t size;
4951
4952 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4953 if (pPatchInstrGC)
4954 {
4955 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4956 Assert(pClosestInstrGC <= pGuestPtrGC);
4957 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4958 /* Check if this is not a write into a gap between two patches */
4959 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4960 pPatchInstrGC = 0;
4961 }
4962 }
4963 if (pPatchInstrGC)
4964 {
4965 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4966
4967 fValidPatchWrite = true;
4968
4969 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4970 Assert(pPatchToGuestRec);
4971 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4972 {
4973 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4974
4975 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4976 {
4977 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4978
4979 patmR3MarkDirtyPatch(pVM, pPatch);
4980
4981 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4982 goto loop_start;
4983 }
4984 else
4985 {
4986 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4987 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4988
4989 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4990 pPatchToGuestRec->fDirty = true;
4991
4992 *pInstrHC = 0xCC;
4993
4994 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4995 }
4996 }
4997 /* else already marked dirty */
4998 }
4999 }
5000 }
5001 } /* for each patch */
5002
5003 if (fValidPatchWrite == false)
5004 {
5005 /* Write to a part of the page that either:
5006 * - doesn't contain any code (shared code/data); rather unlikely
5007 * - old code page that's no longer in active use.
5008 */
5009invalid_write_loop_start:
5010 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
5011
5012 if (pPatchPage)
5013 {
5014 for (i=0;i<pPatchPage->cCount;i++)
5015 {
5016 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5017
5018 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
5019 {
5020 /* Note: possibly dangerous assumption that all future writes will be harmless. */
5021 if (pPatch->flags & PATMFL_IDTHANDLER)
5022 {
5023 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5024
5025 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
5026 int rc = patmRemovePatchPages(pVM, pPatch);
5027 AssertRC(rc);
5028 }
5029 else
5030 {
5031 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5032 patmR3MarkDirtyPatch(pVM, pPatch);
5033 }
5034 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
5035 goto invalid_write_loop_start;
5036 }
5037 } /* for */
5038 }
5039 }
5040 }
5041 }
5042 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
5043 return VINF_SUCCESS;
5044
5045}
5046
5047/**
5048 * Disable all patches in a flushed page
5049 *
5050 * @returns VBox status code
5051 * @param pVM Pointer to the VM.
5052 * @param addr GC address of the page to flush
5053 * @note Currently only called by CSAMR3FlushPage; optimization to avoid
5054 * having to double check if the physical address has changed
5055 */
5056VMMR3_INT_DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
5057{
5058 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5059
5060 addr &= PAGE_BASE_GC_MASK;
5061
5062 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
5063 if (pPatchPage)
5064 {
5065 int i;
5066
5067 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
5068 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
5069 {
5070 if (pPatchPage->papPatch[i])
5071 {
5072 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5073
5074 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
5075 patmR3MarkDirtyPatch(pVM, pPatch);
5076 }
5077 }
5078 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
5079 }
5080 return VINF_SUCCESS;
5081}
5082
5083/**
5084 * Checks if the instructions at the specified address has been patched already.
5085 *
5086 * @returns boolean, patched or not
5087 * @param pVM Pointer to the VM.
5088 * @param pInstrGC Guest context pointer to instruction
5089 */
5090VMMR3_INT_DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
5091{
5092 Assert(!HMIsEnabled(pVM));
5093 PPATMPATCHREC pPatchRec;
5094 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5095 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
5096 return true;
5097 return false;
5098}
5099
5100/**
5101 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
5102 *
5103 * @returns VBox status code.
5104 * @param pVM Pointer to the VM.
5105 * @param pInstrGC GC address of instr
5106 * @param pByte opcode byte pointer (OUT)
5107 *
5108 */
5109VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
5110{
5111 PPATMPATCHREC pPatchRec;
5112
5113 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5114
5115 /* Shortcut. */
5116 if (!PATMIsEnabled(pVM))
5117 return VERR_PATCH_NOT_FOUND;
5118 Assert(!HMIsEnabled(pVM));
5119 if ( pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5120 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5121 return VERR_PATCH_NOT_FOUND;
5122
5123 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5124 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5125 if ( pPatchRec
5126 && pPatchRec->patch.uState == PATCH_ENABLED
5127 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5128 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5129 {
5130 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5131 *pByte = pPatchRec->patch.aPrivInstr[offset];
5132
5133 if (pPatchRec->patch.cbPatchJump == 1)
5134 {
5135 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5136 }
5137 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5138 return VINF_SUCCESS;
5139 }
5140 return VERR_PATCH_NOT_FOUND;
5141}
5142
5143/**
5144 * Read instruction bytes of the original code that was overwritten by the 5
5145 * bytes patch jump.
5146 *
5147 * @returns VINF_SUCCESS or VERR_PATCH_NOT_FOUND.
5148 * @param pVM Pointer to the VM.
5149 * @param GCPtrInstr GC address of instr
5150 * @param pbDst The output buffer.
5151 * @param cbToRead The maximum number bytes to read.
5152 * @param pcbRead Where to return the acutal number of bytes read.
5153 */
5154VMMR3_INT_DECL(int) PATMR3ReadOrgInstr(PVM pVM, RTGCPTR32 GCPtrInstr, uint8_t *pbDst, size_t cbToRead, size_t *pcbRead)
5155{
5156 /* Shortcut. */
5157 if (!PATMIsEnabled(pVM))
5158 return VERR_PATCH_NOT_FOUND;
5159 Assert(!HMIsEnabled(pVM));
5160 if ( GCPtrInstr < pVM->patm.s.pPatchedInstrGCLowest
5161 || GCPtrInstr > pVM->patm.s.pPatchedInstrGCHighest)
5162 return VERR_PATCH_NOT_FOUND;
5163
5164 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5165
5166 /*
5167 * If the patch is enabled and the pointer lies within 5 bytes of this
5168 * priv instr ptr, then we've got a hit!
5169 */
5170 RTGCPTR32 off;
5171 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree,
5172 GCPtrInstr, false /*fAbove*/);
5173 if ( pPatchRec
5174 && pPatchRec->patch.uState == PATCH_ENABLED
5175 && (off = GCPtrInstr - pPatchRec->patch.pPrivInstrGC) < pPatchRec->patch.cbPatchJump)
5176 {
5177 uint8_t const *pbSrc = &pPatchRec->patch.aPrivInstr[off];
5178 uint32_t const cbMax = pPatchRec->patch.cbPatchJump - off;
5179 if (cbToRead > cbMax)
5180 cbToRead = cbMax;
5181 switch (cbToRead)
5182 {
5183 case 5: pbDst[4] = pbSrc[4];
5184 case 4: pbDst[3] = pbSrc[3];
5185 case 3: pbDst[2] = pbSrc[2];
5186 case 2: pbDst[1] = pbSrc[1];
5187 case 1: pbDst[0] = pbSrc[0];
5188 break;
5189 default:
5190 memcpy(pbDst, pbSrc, cbToRead);
5191 }
5192 *pcbRead = cbToRead;
5193
5194 if (pPatchRec->patch.cbPatchJump == 1)
5195 Log(("PATMR3ReadOrgInstr: returning opcode %.*Rhxs for instruction at %RX32\n", cbToRead, pbSrc, GCPtrInstr));
5196 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5197 return VINF_SUCCESS;
5198 }
5199
5200 return VERR_PATCH_NOT_FOUND;
5201}
5202
5203/**
5204 * Disable patch for privileged instruction at specified location
5205 *
5206 * @returns VBox status code.
5207 * @param pVM Pointer to the VM.
5208 * @param pInstr Guest context point to privileged instruction
5209 *
5210 * @note returns failure if patching is not allowed or possible
5211 *
5212 */
5213VMMR3_INT_DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5214{
5215 PPATMPATCHREC pPatchRec;
5216 PPATCHINFO pPatch;
5217
5218 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5219 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5220 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5221 if (pPatchRec)
5222 {
5223 int rc = VINF_SUCCESS;
5224
5225 pPatch = &pPatchRec->patch;
5226
5227 /* Already disabled? */
5228 if (pPatch->uState == PATCH_DISABLED)
5229 return VINF_SUCCESS;
5230
5231 /* Clear the IDT entries for the patch we're disabling. */
5232 /* Note: very important as we clear IF in the patch itself */
5233 /** @todo this needs to be changed */
5234 if (pPatch->flags & PATMFL_IDTHANDLER)
5235 {
5236 uint32_t iGate;
5237
5238 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5239 if (iGate != (uint32_t)~0)
5240 {
5241 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5242 if (++cIDTHandlersDisabled < 256)
5243 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5244 }
5245 }
5246
5247 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5248 if ( pPatch->pPatchBlockOffset
5249 && pPatch->uState == PATCH_ENABLED)
5250 {
5251 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5252 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5253 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5254 }
5255
5256 /* IDT or function patches haven't changed any guest code. */
5257 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5258 {
5259 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5260 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5261
5262 if (pPatch->uState != PATCH_REFUSED)
5263 {
5264 uint8_t temp[16];
5265
5266 Assert(pPatch->cbPatchJump < sizeof(temp));
5267
5268 /* Let's first check if the guest code is still the same. */
5269 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5270 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5271 if (rc == VINF_SUCCESS)
5272 {
5273 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5274
5275 if ( temp[0] != 0xE9 /* jmp opcode */
5276 || *(RTRCINTPTR *)(&temp[1]) != displ
5277 )
5278 {
5279 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5280 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5281 /* Remove it completely */
5282 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5283 rc = PATMR3RemovePatch(pVM, pInstrGC);
5284 AssertRC(rc);
5285 return VWRN_PATCH_REMOVED;
5286 }
5287 patmRemoveJumpToPatch(pVM, pPatch);
5288 }
5289 else
5290 {
5291 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5292 pPatch->uState = PATCH_DISABLE_PENDING;
5293 }
5294 }
5295 else
5296 {
5297 AssertMsgFailed(("Patch was refused!\n"));
5298 return VERR_PATCH_ALREADY_DISABLED;
5299 }
5300 }
5301 else
5302 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5303 {
5304 uint8_t temp[16];
5305
5306 Assert(pPatch->cbPatchJump < sizeof(temp));
5307
5308 /* Let's first check if the guest code is still the same. */
5309 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5310 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5311 if (rc == VINF_SUCCESS)
5312 {
5313 if (temp[0] != 0xCC)
5314 {
5315 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5316 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5317 /* Remove it completely */
5318 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5319 rc = PATMR3RemovePatch(pVM, pInstrGC);
5320 AssertRC(rc);
5321 return VWRN_PATCH_REMOVED;
5322 }
5323 patmDeactivateInt3Patch(pVM, pPatch);
5324 }
5325 }
5326
5327 if (rc == VINF_SUCCESS)
5328 {
5329 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5330 if (pPatch->uState == PATCH_DISABLE_PENDING)
5331 {
5332 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5333 pPatch->uState = PATCH_UNUSABLE;
5334 }
5335 else
5336 if (pPatch->uState != PATCH_DIRTY)
5337 {
5338 pPatch->uOldState = pPatch->uState;
5339 pPatch->uState = PATCH_DISABLED;
5340 }
5341 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5342 }
5343
5344 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5345 return VINF_SUCCESS;
5346 }
5347 Log(("Patch not found!\n"));
5348 return VERR_PATCH_NOT_FOUND;
5349}
5350
5351/**
5352 * Permanently disable patch for privileged instruction at specified location
5353 *
5354 * @returns VBox status code.
5355 * @param pVM Pointer to the VM.
5356 * @param pInstr Guest context instruction pointer
5357 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5358 * @param pConflictPatch Conflicting patch
5359 *
5360 */
5361static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5362{
5363 NOREF(pConflictAddr);
5364#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5365 PATCHINFO patch;
5366 DISCPUSTATE cpu;
5367 R3PTRTYPE(uint8_t *) pInstrHC;
5368 uint32_t cbInstr;
5369 bool disret;
5370 int rc;
5371
5372 RT_ZERO(patch);
5373 pInstrHC = patmR3GCVirtToHCVirt(pVM, &patch, pInstrGC);
5374 disret = patmR3DisInstr(pVM, &patch, pInstrGC, pInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
5375 /*
5376 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5377 * with one that jumps right into the conflict patch.
5378 * Otherwise we must disable the conflicting patch to avoid serious problems.
5379 */
5380 if ( disret == true
5381 && (pConflictPatch->flags & PATMFL_CODE32)
5382 && (cpu.pCurInstr->uOpcode == OP_JMP || (cpu.pCurInstr->fOpType & DISOPTYPE_COND_CONTROLFLOW))
5383 && (cpu.Param1.fUse & DISUSE_IMMEDIATE32_REL))
5384 {
5385 /* Hint patches must be enabled first. */
5386 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5387 {
5388 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5389 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5390 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5391 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5392 /* Enabling might fail if the patched code has changed in the meantime. */
5393 if (rc != VINF_SUCCESS)
5394 return rc;
5395 }
5396
5397 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5398 if (RT_SUCCESS(rc))
5399 {
5400 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5401 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5402 return VINF_SUCCESS;
5403 }
5404 }
5405#endif
5406
5407 if (pConflictPatch->opcode == OP_CLI)
5408 {
5409 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5410 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5411 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5412 if (rc == VWRN_PATCH_REMOVED)
5413 return VINF_SUCCESS;
5414 if (RT_SUCCESS(rc))
5415 {
5416 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5417 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5418 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5419 if (rc == VERR_PATCH_NOT_FOUND)
5420 return VINF_SUCCESS; /* removed already */
5421
5422 AssertRC(rc);
5423 if (RT_SUCCESS(rc))
5424 {
5425 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5426 return VINF_SUCCESS;
5427 }
5428 }
5429 /* else turned into unusable patch (see below) */
5430 }
5431 else
5432 {
5433 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5434 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5435 if (rc == VWRN_PATCH_REMOVED)
5436 return VINF_SUCCESS;
5437 }
5438
5439 /* No need to monitor the code anymore. */
5440 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5441 {
5442 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5443 AssertRC(rc);
5444 }
5445 pConflictPatch->uState = PATCH_UNUSABLE;
5446 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5447 return VERR_PATCH_DISABLED;
5448}
5449
5450/**
5451 * Enable patch for privileged instruction at specified location
5452 *
5453 * @returns VBox status code.
5454 * @param pVM Pointer to the VM.
5455 * @param pInstr Guest context point to privileged instruction
5456 *
5457 * @note returns failure if patching is not allowed or possible
5458 *
5459 */
5460VMMR3_INT_DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5461{
5462 PPATMPATCHREC pPatchRec;
5463 PPATCHINFO pPatch;
5464
5465 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5466 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5467 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5468 if (pPatchRec)
5469 {
5470 int rc = VINF_SUCCESS;
5471
5472 pPatch = &pPatchRec->patch;
5473
5474 if (pPatch->uState == PATCH_DISABLED)
5475 {
5476 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5477 {
5478 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5479 uint8_t temp[16];
5480
5481 Assert(pPatch->cbPatchJump < sizeof(temp));
5482
5483 /* Let's first check if the guest code is still the same. */
5484 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5485 AssertRC(rc2);
5486 if (rc2 == VINF_SUCCESS)
5487 {
5488 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5489 {
5490 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5491 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5492 /* Remove it completely */
5493 rc = PATMR3RemovePatch(pVM, pInstrGC);
5494 AssertRC(rc);
5495 return VERR_PATCH_NOT_FOUND;
5496 }
5497
5498 PATMP2GLOOKUPREC cacheRec;
5499 RT_ZERO(cacheRec);
5500 cacheRec.pPatch = pPatch;
5501
5502 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5503 /* Free leftover lock if any. */
5504 if (cacheRec.Lock.pvMap)
5505 {
5506 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5507 cacheRec.Lock.pvMap = NULL;
5508 }
5509 AssertRC(rc2);
5510 if (RT_FAILURE(rc2))
5511 return rc2;
5512
5513#ifdef DEBUG
5514 {
5515 DISCPUSTATE cpu;
5516 char szOutput[256];
5517 uint32_t cbInstr;
5518 uint32_t i = 0;
5519 bool disret;
5520 while(i < pPatch->cbPatchJump)
5521 {
5522 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
5523 &cpu, &cbInstr, szOutput, sizeof(szOutput));
5524 Log(("Renewed patch instr: %s", szOutput));
5525 i += cbInstr;
5526 }
5527 }
5528#endif
5529 }
5530 }
5531 else
5532 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5533 {
5534 uint8_t temp[16];
5535
5536 Assert(pPatch->cbPatchJump < sizeof(temp));
5537
5538 /* Let's first check if the guest code is still the same. */
5539 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5540 AssertRC(rc2);
5541
5542 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5543 {
5544 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5545 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5546 rc = PATMR3RemovePatch(pVM, pInstrGC);
5547 AssertRC(rc);
5548 return VERR_PATCH_NOT_FOUND;
5549 }
5550
5551 rc2 = patmActivateInt3Patch(pVM, pPatch);
5552 if (RT_FAILURE(rc2))
5553 return rc2;
5554 }
5555
5556 pPatch->uState = pPatch->uOldState; //restore state
5557
5558 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5559 if (pPatch->pPatchBlockOffset)
5560 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5561
5562 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5563 }
5564 else
5565 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5566
5567 return rc;
5568 }
5569 return VERR_PATCH_NOT_FOUND;
5570}
5571
5572/**
5573 * Remove patch for privileged instruction at specified location
5574 *
5575 * @returns VBox status code.
5576 * @param pVM Pointer to the VM.
5577 * @param pPatchRec Patch record
5578 * @param fForceRemove Remove *all* patches
5579 */
5580int patmR3RemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5581{
5582 PPATCHINFO pPatch;
5583
5584 pPatch = &pPatchRec->patch;
5585
5586 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5587 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5588 {
5589 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5590 return VERR_ACCESS_DENIED;
5591 }
5592 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5593
5594 /* Note: NEVER EVER REUSE PATCH MEMORY */
5595 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5596
5597 if (pPatchRec->patch.pPatchBlockOffset)
5598 {
5599 PAVLOU32NODECORE pNode;
5600
5601 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5602 Assert(pNode);
5603 }
5604
5605 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5606 {
5607 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5608 AssertRC(rc);
5609 }
5610
5611#ifdef VBOX_WITH_STATISTICS
5612 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5613 {
5614 STAMR3Deregister(pVM, &pPatchRec->patch);
5615#ifndef DEBUG_sandervl
5616 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5617 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5618 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5619 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5620 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5621 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5622 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5623 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5624 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5625 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5626 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5627 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5628 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5629 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5630#endif
5631 }
5632#endif
5633
5634 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5635 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5636 pPatch->nrPatch2GuestRecs = 0;
5637 Assert(pPatch->Patch2GuestAddrTree == 0);
5638
5639 patmEmptyTree(pVM, &pPatch->FixupTree);
5640 pPatch->nrFixups = 0;
5641 Assert(pPatch->FixupTree == 0);
5642
5643 if (pPatchRec->patch.pTempInfo)
5644 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5645
5646 /* Note: might fail, because it has already been removed (e.g. during reset). */
5647 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5648
5649 /* Free the patch record */
5650 MMHyperFree(pVM, pPatchRec);
5651 return VINF_SUCCESS;
5652}
5653
5654/**
5655 * RTAvlU32DoWithAll() worker.
5656 * Checks whether the current trampoline instruction is the jump to the target patch
5657 * and updates the displacement to jump to the new target.
5658 *
5659 * @returns VBox status code.
5660 * @retval VERR_ALREADY_EXISTS if the jump was found.
5661 * @param pNode The current patch to guest record to check.
5662 * @param pvUser The refresh state.
5663 */
5664static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5665{
5666 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5667 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5668 PVM pVM = pRefreshPatchState->pVM;
5669
5670 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5671
5672 /*
5673 * Check if the patch instruction starts with a jump.
5674 * ASSUMES that there is no other patch to guest record that starts
5675 * with a jump.
5676 */
5677 if (*pPatchInstr == 0xE9)
5678 {
5679 /* Jump found, update the displacement. */
5680 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5681 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5682 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5683
5684 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5685 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5686
5687 *(uint32_t *)&pPatchInstr[1] = displ;
5688 return VERR_ALREADY_EXISTS; /** @todo better return code */
5689 }
5690
5691 return VINF_SUCCESS;
5692}
5693
5694/**
5695 * Attempt to refresh the patch by recompiling its entire code block
5696 *
5697 * @returns VBox status code.
5698 * @param pVM Pointer to the VM.
5699 * @param pPatchRec Patch record
5700 */
5701int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5702{
5703 PPATCHINFO pPatch;
5704 int rc;
5705 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5706 PTRAMPREC pTrampolinePatchesHead = NULL;
5707
5708 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5709
5710 pPatch = &pPatchRec->patch;
5711 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5712 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5713 {
5714 if (!pPatch->pTrampolinePatchesHead)
5715 {
5716 /*
5717 * It is sometimes possible that there are trampoline patches to this patch
5718 * but they are not recorded (after a saved state load for example).
5719 * Refuse to refresh those patches.
5720 * Can hurt performance in theory if the patched code is modified by the guest
5721 * and is executed often. However most of the time states are saved after the guest
5722 * code was modified and is not updated anymore afterwards so this shouldn't be a
5723 * big problem.
5724 */
5725 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5726 return VERR_PATCHING_REFUSED;
5727 }
5728 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5729 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5730 }
5731
5732 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5733
5734 rc = PATMR3DisablePatch(pVM, pInstrGC);
5735 AssertRC(rc);
5736
5737 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5738 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5739#ifdef VBOX_WITH_STATISTICS
5740 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5741 {
5742 STAMR3Deregister(pVM, &pPatchRec->patch);
5743#ifndef DEBUG_sandervl
5744 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5745 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5746 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5747 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5748 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5749 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5750 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5751 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5752 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5753 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5754 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5755 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5756 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5757 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5758#endif
5759 }
5760#endif
5761
5762 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5763
5764 /* Attempt to install a new patch. */
5765 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5766 if (RT_SUCCESS(rc))
5767 {
5768 RTRCPTR pPatchTargetGC;
5769 PPATMPATCHREC pNewPatchRec;
5770
5771 /* Determine target address in new patch */
5772 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5773 Assert(pPatchTargetGC);
5774 if (!pPatchTargetGC)
5775 {
5776 rc = VERR_PATCHING_REFUSED;
5777 goto failure;
5778 }
5779
5780 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5781 pPatch->uCurPatchOffset = 0;
5782
5783 /* insert jump to new patch in old patch block */
5784 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5785 if (RT_FAILURE(rc))
5786 goto failure;
5787
5788 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5789 Assert(pNewPatchRec); /* can't fail */
5790
5791 /* Remove old patch (only do that when everything is finished) */
5792 int rc2 = patmR3RemovePatch(pVM, pPatchRec, true /* force removal */);
5793 AssertRC(rc2);
5794
5795 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5796 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5797 Assert(fInserted); NOREF(fInserted);
5798
5799 Log(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5800 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5801
5802 /* Used by another patch, so don't remove it! */
5803 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5804
5805 if (pTrampolinePatchesHead)
5806 {
5807 /* Update all trampoline patches to jump to the new patch. */
5808 PTRAMPREC pTrampRec = NULL;
5809 PATMREFRESHPATCH RefreshPatch;
5810
5811 RefreshPatch.pVM = pVM;
5812 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5813
5814 pTrampRec = pTrampolinePatchesHead;
5815
5816 while (pTrampRec)
5817 {
5818 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5819
5820 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5821 /*
5822 * We have to find the right patch2guest record because there might be others
5823 * for statistics.
5824 */
5825 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5826 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5827 Assert(rc == VERR_ALREADY_EXISTS);
5828 rc = VINF_SUCCESS;
5829 pTrampRec = pTrampRec->pNext;
5830 }
5831 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5832 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5833 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5834 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5835 }
5836 }
5837
5838failure:
5839 if (RT_FAILURE(rc))
5840 {
5841 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5842
5843 /* Remove the new inactive patch */
5844 rc = PATMR3RemovePatch(pVM, pInstrGC);
5845 AssertRC(rc);
5846
5847 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5848 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5849 Assert(fInserted); NOREF(fInserted);
5850
5851 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5852 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5853 AssertRC(rc2);
5854
5855 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5856 }
5857 return rc;
5858}
5859
5860/**
5861 * Find patch for privileged instruction at specified location
5862 *
5863 * @returns Patch structure pointer if found; else NULL
5864 * @param pVM Pointer to the VM.
5865 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5866 * @param fIncludeHints Include hinted patches or not
5867 *
5868 */
5869PPATCHINFO patmFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5870{
5871 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5872 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5873 if (pPatchRec)
5874 {
5875 if ( pPatchRec->patch.uState == PATCH_ENABLED
5876 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5877 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5878 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5879 {
5880 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5881 return &pPatchRec->patch;
5882 }
5883 else
5884 if ( fIncludeHints
5885 && pPatchRec->patch.uState == PATCH_DISABLED
5886 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5887 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5888 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5889 {
5890 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5891 return &pPatchRec->patch;
5892 }
5893 }
5894 return NULL;
5895}
5896
5897/**
5898 * Checks whether the GC address is inside a generated patch jump
5899 *
5900 * @returns true -> yes, false -> no
5901 * @param pVM Pointer to the VM.
5902 * @param pAddr Guest context address.
5903 * @param pPatchAddr Guest context patch address (if true).
5904 */
5905VMMR3_INT_DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5906{
5907 RTRCPTR addr;
5908 PPATCHINFO pPatch;
5909
5910 Assert(!HMIsEnabled(pVM));
5911 if (PATMIsEnabled(pVM) == false)
5912 return false;
5913
5914 if (pPatchAddr == NULL)
5915 pPatchAddr = &addr;
5916
5917 *pPatchAddr = 0;
5918
5919 pPatch = patmFindActivePatchByEntrypoint(pVM, pAddr);
5920 if (pPatch)
5921 *pPatchAddr = pPatch->pPrivInstrGC;
5922
5923 return *pPatchAddr == 0 ? false : true;
5924}
5925
5926/**
5927 * Remove patch for privileged instruction at specified location
5928 *
5929 * @returns VBox status code.
5930 * @param pVM Pointer to the VM.
5931 * @param pInstr Guest context point to privileged instruction
5932 *
5933 * @note returns failure if patching is not allowed or possible
5934 *
5935 */
5936VMMR3_INT_DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5937{
5938 PPATMPATCHREC pPatchRec;
5939
5940 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5941 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5942 if (pPatchRec)
5943 {
5944 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5945 if (rc == VWRN_PATCH_REMOVED)
5946 return VINF_SUCCESS;
5947
5948 return patmR3RemovePatch(pVM, pPatchRec, false);
5949 }
5950 AssertFailed();
5951 return VERR_PATCH_NOT_FOUND;
5952}
5953
5954/**
5955 * Mark patch as dirty
5956 *
5957 * @returns VBox status code.
5958 * @param pVM Pointer to the VM.
5959 * @param pPatch Patch record
5960 *
5961 * @note returns failure if patching is not allowed or possible
5962 *
5963 */
5964static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5965{
5966 if (pPatch->pPatchBlockOffset)
5967 {
5968 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5969 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5970 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5971 }
5972
5973 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5974 /* Put back the replaced instruction. */
5975 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5976 if (rc == VWRN_PATCH_REMOVED)
5977 return VINF_SUCCESS;
5978
5979 /* Note: we don't restore patch pages for patches that are not enabled! */
5980 /* Note: be careful when changing this behaviour!! */
5981
5982 /* The patch pages are no longer marked for self-modifying code detection */
5983 if (pPatch->flags & PATMFL_CODE_MONITORED)
5984 {
5985 rc = patmRemovePatchPages(pVM, pPatch);
5986 AssertRCReturn(rc, rc);
5987 }
5988 pPatch->uState = PATCH_DIRTY;
5989
5990 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5991 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5992
5993 return VINF_SUCCESS;
5994}
5995
5996/**
5997 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5998 *
5999 * @returns VBox status code.
6000 * @param pVM Pointer to the VM.
6001 * @param pPatch Patch block structure pointer
6002 * @param pPatchGC GC address in patch block
6003 */
6004RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
6005{
6006 Assert(pPatch->Patch2GuestAddrTree);
6007 /* Get the closest record from below. */
6008 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6009 if (pPatchToGuestRec)
6010 return pPatchToGuestRec->pOrgInstrGC;
6011
6012 return 0;
6013}
6014
6015/**
6016 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6017 *
6018 * @returns corresponding GC pointer in patch block
6019 * @param pVM Pointer to the VM.
6020 * @param pPatch Current patch block pointer
6021 * @param pInstrGC Guest context pointer to privileged instruction
6022 *
6023 */
6024RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6025{
6026 if (pPatch->Guest2PatchAddrTree)
6027 {
6028 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
6029 if (pGuestToPatchRec)
6030 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6031 }
6032
6033 return 0;
6034}
6035
6036/**
6037 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6038 *
6039 * @returns corresponding GC pointer in patch block
6040 * @param pVM Pointer to the VM.
6041 * @param pInstrGC Guest context pointer to privileged instruction
6042 */
6043static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
6044{
6045 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
6046 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
6047 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
6048 return NIL_RTRCPTR;
6049}
6050
6051/**
6052 * Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no
6053 * identical match)
6054 *
6055 * @returns corresponding GC pointer in patch block
6056 * @param pVM Pointer to the VM.
6057 * @param pPatch Current patch block pointer
6058 * @param pInstrGC Guest context pointer to privileged instruction
6059 *
6060 */
6061RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6062{
6063 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
6064 if (pGuestToPatchRec)
6065 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6066 return NIL_RTRCPTR;
6067}
6068
6069/**
6070 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
6071 *
6072 * @returns original GC instruction pointer or 0 if not found
6073 * @param pVM Pointer to the VM.
6074 * @param pPatchGC GC address in patch block
6075 * @param pEnmState State of the translated address (out)
6076 *
6077 */
6078VMMR3_INT_DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
6079{
6080 PPATMPATCHREC pPatchRec;
6081 void *pvPatchCoreOffset;
6082 RTRCPTR pPrivInstrGC;
6083
6084 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
6085 Assert(!HMIsEnabled(pVM));
6086 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6087 if (pvPatchCoreOffset == 0)
6088 {
6089 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
6090 return 0;
6091 }
6092 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6093 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
6094 if (pEnmState)
6095 {
6096 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
6097 || pPatchRec->patch.uState == PATCH_DIRTY
6098 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
6099 || pPatchRec->patch.uState == PATCH_UNUSABLE),
6100 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
6101
6102 if ( !pPrivInstrGC
6103 || pPatchRec->patch.uState == PATCH_UNUSABLE
6104 || pPatchRec->patch.uState == PATCH_REFUSED)
6105 {
6106 pPrivInstrGC = 0;
6107 *pEnmState = PATMTRANS_FAILED;
6108 }
6109 else
6110 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
6111 {
6112 *pEnmState = PATMTRANS_INHIBITIRQ;
6113 }
6114 else
6115 if ( pPatchRec->patch.uState == PATCH_ENABLED
6116 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
6117 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
6118 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
6119 {
6120 *pEnmState = PATMTRANS_OVERWRITTEN;
6121 }
6122 else
6123 if (patmFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
6124 {
6125 *pEnmState = PATMTRANS_OVERWRITTEN;
6126 }
6127 else
6128 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
6129 {
6130 *pEnmState = PATMTRANS_PATCHSTART;
6131 }
6132 else
6133 *pEnmState = PATMTRANS_SAFE;
6134 }
6135 return pPrivInstrGC;
6136}
6137
6138/**
6139 * Returns the GC pointer of the patch for the specified GC address
6140 *
6141 * @returns VBox status code.
6142 * @param pVM Pointer to the VM.
6143 * @param pAddrGC Guest context address
6144 */
6145VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
6146{
6147 PPATMPATCHREC pPatchRec;
6148
6149 Assert(!HMIsEnabled(pVM));
6150
6151 /* Find the patch record. */
6152 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
6153 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
6154 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
6155 return PATCHCODE_PTR_GC(&pPatchRec->patch);
6156 return NIL_RTRCPTR;
6157}
6158
6159/**
6160 * Attempt to recover dirty instructions
6161 *
6162 * @returns VBox status code.
6163 * @param pVM Pointer to the VM.
6164 * @param pCtx Pointer to the guest CPU context.
6165 * @param pPatch Patch record.
6166 * @param pPatchToGuestRec Patch to guest address record.
6167 * @param pEip GC pointer of trapping instruction.
6168 */
6169static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
6170{
6171 DISCPUSTATE CpuOld, CpuNew;
6172 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
6173 int rc;
6174 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
6175 uint32_t cbDirty;
6176 PRECPATCHTOGUEST pRec;
6177 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
6178 PVMCPU pVCpu = VMMGetCpu0(pVM);
6179 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
6180
6181 pRec = pPatchToGuestRec;
6182 pCurInstrGC = pOrgInstrGC;
6183 pCurPatchInstrGC = pEip;
6184 cbDirty = 0;
6185 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6186
6187 /* Find all adjacent dirty instructions */
6188 while (true)
6189 {
6190 if (pRec->fJumpTarget)
6191 {
6192 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6193 pRec->fDirty = false;
6194 return VERR_PATCHING_REFUSED;
6195 }
6196
6197 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6198 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6199 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6200
6201 /* Only harmless instructions are acceptable. */
6202 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6203 if ( RT_FAILURE(rc)
6204 || !(CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS))
6205 {
6206 if (RT_SUCCESS(rc))
6207 cbDirty += CpuOld.cbInstr;
6208 else
6209 if (!cbDirty)
6210 cbDirty = 1;
6211 break;
6212 }
6213
6214#ifdef DEBUG
6215 char szBuf[256];
6216 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6217 szBuf, sizeof(szBuf), NULL);
6218 Log(("DIRTY: %s\n", szBuf));
6219#endif
6220 /* Mark as clean; if we fail we'll let it always fault. */
6221 pRec->fDirty = false;
6222
6223 /* Remove old lookup record. */
6224 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6225 pPatchToGuestRec = NULL;
6226
6227 pCurPatchInstrGC += CpuOld.cbInstr;
6228 cbDirty += CpuOld.cbInstr;
6229
6230 /* Let's see if there's another dirty instruction right after. */
6231 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6232 if (!pRec || !pRec->fDirty)
6233 break; /* no more dirty instructions */
6234
6235 /* In case of complex instructions the next guest instruction could be quite far off. */
6236 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6237 }
6238
6239 if ( RT_SUCCESS(rc)
6240 && (CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS)
6241 )
6242 {
6243 uint32_t cbLeft;
6244
6245 pCurPatchInstrHC = pPatchInstrHC;
6246 pCurPatchInstrGC = pEip;
6247 cbLeft = cbDirty;
6248
6249 while (cbLeft && RT_SUCCESS(rc))
6250 {
6251 bool fValidInstr;
6252
6253 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6254
6255 fValidInstr = !!(CpuNew.pCurInstr->fOpType & DISOPTYPE_HARMLESS);
6256 if ( !fValidInstr
6257 && (CpuNew.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
6258 )
6259 {
6260 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6261
6262 if ( pTargetGC >= pOrgInstrGC
6263 && pTargetGC <= pOrgInstrGC + cbDirty
6264 )
6265 {
6266 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6267 fValidInstr = true;
6268 }
6269 }
6270
6271 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6272 if ( rc == VINF_SUCCESS
6273 && CpuNew.cbInstr <= cbLeft /* must still fit */
6274 && fValidInstr
6275 )
6276 {
6277#ifdef DEBUG
6278 char szBuf[256];
6279 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6280 szBuf, sizeof(szBuf), NULL);
6281 Log(("NEW: %s\n", szBuf));
6282#endif
6283
6284 /* Copy the new instruction. */
6285 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.cbInstr);
6286 AssertRC(rc);
6287
6288 /* Add a new lookup record for the duplicated instruction. */
6289 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6290 }
6291 else
6292 {
6293#ifdef DEBUG
6294 char szBuf[256];
6295 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6296 szBuf, sizeof(szBuf), NULL);
6297 Log(("NEW: %s (FAILED)\n", szBuf));
6298#endif
6299 /* Restore the old lookup record for the duplicated instruction. */
6300 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6301
6302 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6303 rc = VERR_PATCHING_REFUSED;
6304 break;
6305 }
6306 pCurInstrGC += CpuNew.cbInstr;
6307 pCurPatchInstrHC += CpuNew.cbInstr;
6308 pCurPatchInstrGC += CpuNew.cbInstr;
6309 cbLeft -= CpuNew.cbInstr;
6310
6311 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6312 if (!cbLeft)
6313 {
6314 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6315 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6316 {
6317 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6318 if (pRec)
6319 {
6320 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6321 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6322
6323 Assert(!pRec->fDirty);
6324
6325 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6326 if (cbFiller >= SIZEOF_NEARJUMP32)
6327 {
6328 pPatchFillHC[0] = 0xE9;
6329 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6330#ifdef DEBUG
6331 char szBuf[256];
6332 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC,
6333 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6334 Log(("FILL: %s\n", szBuf));
6335#endif
6336 }
6337 else
6338 {
6339 for (unsigned i = 0; i < cbFiller; i++)
6340 {
6341 pPatchFillHC[i] = 0x90; /* NOP */
6342#ifdef DEBUG
6343 char szBuf[256];
6344 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC + i,
6345 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6346 Log(("FILL: %s\n", szBuf));
6347#endif
6348 }
6349 }
6350 }
6351 }
6352 }
6353 }
6354 }
6355 else
6356 rc = VERR_PATCHING_REFUSED;
6357
6358 if (RT_SUCCESS(rc))
6359 {
6360 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6361 }
6362 else
6363 {
6364 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6365 Assert(cbDirty);
6366
6367 /* Mark the whole instruction stream with breakpoints. */
6368 if (cbDirty)
6369 memset(pPatchInstrHC, 0xCC, cbDirty);
6370
6371 if ( pVM->patm.s.fOutOfMemory == false
6372 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6373 {
6374 rc = patmR3RefreshPatch(pVM, pPatch);
6375 if (RT_FAILURE(rc))
6376 {
6377 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6378 }
6379 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6380 rc = VERR_PATCHING_REFUSED;
6381 }
6382 }
6383 return rc;
6384}
6385
6386/**
6387 * Handle trap inside patch code
6388 *
6389 * @returns VBox status code.
6390 * @param pVM Pointer to the VM.
6391 * @param pCtx Pointer to the guest CPU context.
6392 * @param pEip GC pointer of trapping instruction.
6393 * @param ppNewEip GC pointer to new instruction.
6394 */
6395VMMR3_INT_DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6396{
6397 PPATMPATCHREC pPatch = 0;
6398 void *pvPatchCoreOffset;
6399 RTRCUINTPTR offset;
6400 RTRCPTR pNewEip;
6401 int rc ;
6402 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6403 PVMCPU pVCpu = VMMGetCpu0(pVM);
6404
6405 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6406 Assert(pVM->cCpus == 1);
6407
6408 pNewEip = 0;
6409 *ppNewEip = 0;
6410
6411 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6412
6413 /* Find the patch record. */
6414 /* Note: there might not be a patch to guest translation record (global function) */
6415 offset = pEip - pVM->patm.s.pPatchMemGC;
6416 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6417 if (pvPatchCoreOffset)
6418 {
6419 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6420
6421 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6422
6423 if (pPatch->patch.uState == PATCH_DIRTY)
6424 {
6425 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6426 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6427 {
6428 /* Function duplication patches set fPIF to 1 on entry */
6429 pVM->patm.s.pGCStateHC->fPIF = 1;
6430 }
6431 }
6432 else
6433 if (pPatch->patch.uState == PATCH_DISABLED)
6434 {
6435 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6436 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6437 {
6438 /* Function duplication patches set fPIF to 1 on entry */
6439 pVM->patm.s.pGCStateHC->fPIF = 1;
6440 }
6441 }
6442 else
6443 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6444 {
6445 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6446
6447 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6448 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6449 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6450 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6451 }
6452
6453 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6454 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6455
6456 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6457 pPatch->patch.cTraps++;
6458 PATM_STAT_FAULT_INC(&pPatch->patch);
6459 }
6460 else
6461 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6462
6463 /* Check if we were interrupted in PATM generated instruction code. */
6464 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6465 {
6466 DISCPUSTATE Cpu;
6467 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6468 AssertRC(rc);
6469
6470 if ( rc == VINF_SUCCESS
6471 && ( Cpu.pCurInstr->uOpcode == OP_PUSHF
6472 || Cpu.pCurInstr->uOpcode == OP_PUSH
6473 || Cpu.pCurInstr->uOpcode == OP_CALL)
6474 )
6475 {
6476 uint64_t fFlags;
6477
6478 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6479
6480 if (Cpu.pCurInstr->uOpcode == OP_PUSH)
6481 {
6482 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6483 if ( rc == VINF_SUCCESS
6484 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6485 {
6486 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6487
6488 /* Reset the PATM stack. */
6489 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6490
6491 pVM->patm.s.pGCStateHC->fPIF = 1;
6492
6493 Log(("Faulting push -> go back to the original instruction\n"));
6494
6495 /* continue at the original instruction */
6496 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6497 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6498 return VINF_SUCCESS;
6499 }
6500 }
6501
6502 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6503 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6504 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6505 if (rc == VINF_SUCCESS)
6506 {
6507 /* The guest page *must* be present. */
6508 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6509 if ( rc == VINF_SUCCESS
6510 && (fFlags & X86_PTE_P))
6511 {
6512 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6513 return VINF_PATCH_CONTINUE;
6514 }
6515 }
6516 }
6517 else
6518 if (pPatch->patch.pPrivInstrGC == pNewEip)
6519 {
6520 /* Invalidated patch or first instruction overwritten.
6521 * We can ignore the fPIF state in this case.
6522 */
6523 /* Reset the PATM stack. */
6524 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6525
6526 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6527
6528 pVM->patm.s.pGCStateHC->fPIF = 1;
6529
6530 /* continue at the original instruction */
6531 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6532 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6533 return VINF_SUCCESS;
6534 }
6535
6536 char szBuf[256];
6537 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6538
6539 /* Very bad. We crashed in emitted code. Probably stack? */
6540 if (pPatch)
6541 {
6542 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6543 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6544 }
6545 else
6546 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6547 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6548 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6549 }
6550
6551 /* From here on, we must have a valid patch to guest translation. */
6552 if (pvPatchCoreOffset == 0)
6553 {
6554 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6555 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6556 return VERR_PATCH_NOT_FOUND;
6557 }
6558
6559 /* Take care of dirty/changed instructions. */
6560 if (pPatchToGuestRec->fDirty)
6561 {
6562 Assert(pPatchToGuestRec->Core.Key == offset);
6563 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6564
6565 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6566 if (RT_SUCCESS(rc))
6567 {
6568 /* Retry the current instruction. */
6569 pNewEip = pEip;
6570 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6571 }
6572 else
6573 {
6574 /* Reset the PATM stack. */
6575 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6576
6577 rc = VINF_SUCCESS; /* Continue at original instruction. */
6578 }
6579
6580 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6581 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6582 return rc;
6583 }
6584
6585#ifdef VBOX_STRICT
6586 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6587 {
6588 DISCPUSTATE cpu;
6589 bool disret;
6590 uint32_t cbInstr;
6591 PATMP2GLOOKUPREC cacheRec;
6592 RT_ZERO(cacheRec);
6593 cacheRec.pPatch = &pPatch->patch;
6594
6595 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6596 &cpu, &cbInstr);
6597 if (cacheRec.Lock.pvMap)
6598 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6599
6600 if (disret && cpu.pCurInstr->uOpcode == OP_RETN)
6601 {
6602 RTRCPTR retaddr;
6603 PCPUMCTX pCtx2;
6604
6605 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6606
6607 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6608 AssertRC(rc);
6609
6610 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6611 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6612 }
6613 }
6614#endif
6615
6616 /* Return original address, correct by subtracting the CS base address. */
6617 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6618
6619 /* Reset the PATM stack. */
6620 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6621
6622 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6623 {
6624 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6625 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6626#ifdef VBOX_STRICT
6627 DISCPUSTATE cpu;
6628 bool disret;
6629 uint32_t cbInstr;
6630 PATMP2GLOOKUPREC cacheRec;
6631 RT_ZERO(cacheRec);
6632 cacheRec.pPatch = &pPatch->patch;
6633
6634 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
6635 &cpu, &cbInstr);
6636 if (cacheRec.Lock.pvMap)
6637 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6638
6639 if (disret && (cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_INT3))
6640 {
6641 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6642 &cpu, &cbInstr);
6643 if (cacheRec.Lock.pvMap)
6644 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6645
6646 Assert(cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_IRET);
6647 }
6648#endif
6649 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6650 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6651 }
6652
6653 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6654 DBGFR3_DISAS_INSTR_LOG(pVCpu, pCtx->cs.Sel, pNewEip, "PATCHRET: ");
6655 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6656 {
6657 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6658 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6659 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6660 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6661 return VERR_PATCH_DISABLED;
6662 }
6663
6664#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6665 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6666 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6667 {
6668 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6669 //we are only wasting time, back out the patch
6670 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6671 pTrapRec->pNextPatchInstr = 0;
6672 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6673 return VERR_PATCH_DISABLED;
6674 }
6675#endif
6676
6677 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6678 return VINF_SUCCESS;
6679}
6680
6681
6682/**
6683 * Handle page-fault in monitored page
6684 *
6685 * @returns VBox status code.
6686 * @param pVM Pointer to the VM.
6687 */
6688VMMR3_INT_DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6689{
6690 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6691
6692 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6693 addr &= PAGE_BASE_GC_MASK;
6694
6695 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6696 AssertRC(rc); NOREF(rc);
6697
6698 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6699 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6700 {
6701 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6702 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6703 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6704 if (rc == VWRN_PATCH_REMOVED)
6705 return VINF_SUCCESS;
6706
6707 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6708
6709 if (addr == pPatchRec->patch.pPrivInstrGC)
6710 addr++;
6711 }
6712
6713 for(;;)
6714 {
6715 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6716
6717 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6718 break;
6719
6720 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6721 {
6722 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6723 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6724 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6725 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6726 }
6727 addr = pPatchRec->patch.pPrivInstrGC + 1;
6728 }
6729
6730 pVM->patm.s.pvFaultMonitor = 0;
6731 return VINF_SUCCESS;
6732}
6733
6734
6735#ifdef VBOX_WITH_STATISTICS
6736
6737static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6738{
6739 if (pPatch->flags & PATMFL_SYSENTER)
6740 {
6741 return "SYSENT";
6742 }
6743 else
6744 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6745 {
6746 static char szTrap[16];
6747 uint32_t iGate;
6748
6749 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6750 if (iGate < 256)
6751 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6752 else
6753 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6754 return szTrap;
6755 }
6756 else
6757 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6758 return "DUPFUNC";
6759 else
6760 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6761 return "FUNCCALL";
6762 else
6763 if (pPatch->flags & PATMFL_TRAMPOLINE)
6764 return "TRAMP";
6765 else
6766 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6767}
6768
6769static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6770{
6771 NOREF(pVM);
6772 switch(pPatch->uState)
6773 {
6774 case PATCH_ENABLED:
6775 return "ENA";
6776 case PATCH_DISABLED:
6777 return "DIS";
6778 case PATCH_DIRTY:
6779 return "DIR";
6780 case PATCH_UNUSABLE:
6781 return "UNU";
6782 case PATCH_REFUSED:
6783 return "REF";
6784 case PATCH_DISABLE_PENDING:
6785 return "DIP";
6786 default:
6787 AssertFailed();
6788 return " ";
6789 }
6790}
6791
6792/**
6793 * Resets the sample.
6794 * @param pVM Pointer to the VM.
6795 * @param pvSample The sample registered using STAMR3RegisterCallback.
6796 */
6797static void patmResetStat(PVM pVM, void *pvSample)
6798{
6799 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6800 Assert(pPatch);
6801
6802 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6803 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6804}
6805
6806/**
6807 * Prints the sample into the buffer.
6808 *
6809 * @param pVM Pointer to the VM.
6810 * @param pvSample The sample registered using STAMR3RegisterCallback.
6811 * @param pszBuf The buffer to print into.
6812 * @param cchBuf The size of the buffer.
6813 */
6814static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6815{
6816 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6817 Assert(pPatch);
6818
6819 Assert(pPatch->uState != PATCH_REFUSED);
6820 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6821
6822 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6823 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6824 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6825}
6826
6827/**
6828 * Returns the GC address of the corresponding patch statistics counter
6829 *
6830 * @returns Stat address
6831 * @param pVM Pointer to the VM.
6832 * @param pPatch Patch structure
6833 */
6834RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6835{
6836 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6837 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6838}
6839
6840#endif /* VBOX_WITH_STATISTICS */
6841#ifdef VBOX_WITH_DEBUGGER
6842
6843/**
6844 * The '.patmoff' command.
6845 *
6846 * @returns VBox status.
6847 * @param pCmd Pointer to the command descriptor (as registered).
6848 * @param pCmdHlp Pointer to command helper functions.
6849 * @param pVM Pointer to the current VM (if any).
6850 * @param paArgs Pointer to (readonly) array of arguments.
6851 * @param cArgs Number of arguments in the array.
6852 */
6853static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6854{
6855 /*
6856 * Validate input.
6857 */
6858 NOREF(cArgs); NOREF(paArgs);
6859 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6860 PVM pVM = pUVM->pVM;
6861 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6862
6863 if (HMIsEnabled(pVM))
6864 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6865
6866 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6867 PATMR3AllowPatching(pVM->pUVM, false);
6868 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6869}
6870
6871/**
6872 * The '.patmon' command.
6873 *
6874 * @returns VBox status.
6875 * @param pCmd Pointer to the command descriptor (as registered).
6876 * @param pCmdHlp Pointer to command helper functions.
6877 * @param pVM Pointer to the current VM (if any).
6878 * @param paArgs Pointer to (readonly) array of arguments.
6879 * @param cArgs Number of arguments in the array.
6880 */
6881static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6882{
6883 /*
6884 * Validate input.
6885 */
6886 NOREF(cArgs); NOREF(paArgs);
6887 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6888 PVM pVM = pUVM->pVM;
6889 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6890
6891 if (HMIsEnabled(pVM))
6892 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6893
6894 PATMR3AllowPatching(pVM->pUVM, true);
6895 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6896 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6897}
6898
6899#endif /* VBOX_WITH_DEBUGGER */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette