VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 46175

Last change on this file since 46175 was 46159, checked in by vboxsync, 12 years ago

Patch manager support in the disassembler, making the 'u' command in the debugger always show unpatched instruction and annoate those instructions which have patches associated with them (in any state).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 262.2 KB
Line 
1/* $Id: PATM.cpp 46159 2013-05-18 19:56:08Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * @note Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2013 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/cpumdis.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/hm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/trpm.h>
35#include <VBox/vmm/cfgm.h>
36#include <VBox/param.h>
37#include <VBox/vmm/selm.h>
38#include <VBox/vmm/csam.h>
39#include <iprt/avl.h>
40#include "PATMInternal.h"
41#include "PATMPatch.h"
42#include <VBox/vmm/vm.h>
43#include <VBox/vmm/uvm.h>
44#include <VBox/dbg.h>
45#include <VBox/err.h>
46#include <VBox/log.h>
47#include <iprt/assert.h>
48#include <iprt/asm.h>
49#include <VBox/dis.h>
50#include <VBox/disopcode.h>
51#include "internal/pgm.h"
52
53#include <iprt/string.h>
54#include "PATMA.h"
55
56//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
57//#define PATM_DISABLE_ALL
58
59/**
60 * Refresh trampoline patch state.
61 */
62typedef struct PATMREFRESHPATCH
63{
64 /** Pointer to the VM structure. */
65 PVM pVM;
66 /** The trampoline patch record. */
67 PPATCHINFO pPatchTrampoline;
68 /** The new patch we want to jump to. */
69 PPATCHINFO pPatchRec;
70} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
71
72
73#define PATMREAD_RAWCODE 1 /* read code as-is */
74#define PATMREAD_ORGCODE 2 /* read original guest opcode bytes; not the patched bytes */
75#define PATMREAD_NOCHECK 4 /* don't check for patch conflicts */
76
77/*
78 * Private structure used during disassembly
79 */
80typedef struct
81{
82 PVM pVM;
83 PPATCHINFO pPatchInfo;
84 R3PTRTYPE(uint8_t *) pbInstrHC;
85 RTRCPTR pInstrGC;
86 uint32_t fReadFlags;
87} PATMDISASM, *PPATMDISASM;
88
89
90/*******************************************************************************
91* Internal Functions *
92*******************************************************************************/
93
94static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
95static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
96static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
97
98#ifdef LOG_ENABLED // keep gcc quiet
99static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
100#endif
101#ifdef VBOX_WITH_STATISTICS
102static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
103static void patmResetStat(PVM pVM, void *pvSample);
104static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
105#endif
106
107#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
108#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
109
110static int patmReinit(PVM pVM);
111static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
112static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC);
113static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch);
114
115#ifdef VBOX_WITH_DEBUGGER
116static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
117static FNDBGCCMD patmr3CmdOn;
118static FNDBGCCMD patmr3CmdOff;
119
120/** Command descriptors. */
121static const DBGCCMD g_aCmds[] =
122{
123 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
124 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
125 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
126};
127#endif
128
129/* Don't want to break saved states, so put it here as a global variable. */
130static unsigned int cIDTHandlersDisabled = 0;
131
132/**
133 * Initializes the PATM.
134 *
135 * @returns VBox status code.
136 * @param pVM Pointer to the VM.
137 */
138VMMR3_INT_DECL(int) PATMR3Init(PVM pVM)
139{
140 int rc;
141
142 /*
143 * We only need a saved state dummy loader if HM is enabled.
144 */
145 if (HMIsEnabled(pVM))
146 {
147 pVM->fPATMEnabled = false;
148 return SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, 0,
149 NULL, NULL, NULL,
150 NULL, NULL, NULL,
151 NULL, patmR3LoadDummy, NULL);
152 }
153
154 /*
155 * Raw-mode.
156 */
157 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
158
159 /* These values can't change as they are hardcoded in patch code (old saved states!) */
160 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
161 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
162 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
163 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
164
165 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
166 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
167
168 /* Allocate patch memory and GC patch state memory. */
169 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
170 /* Add another page in case the generated code is much larger than expected. */
171 /** @todo bad safety precaution */
172 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
173 if (RT_FAILURE(rc))
174 {
175 Log(("MMHyperAlloc failed with %Rrc\n", rc));
176 return rc;
177 }
178 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
179
180 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
181 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
182 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
183
184 patmR3DbgInit(pVM);
185
186 /*
187 * Hypervisor memory for GC status data (read/write)
188 *
189 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
190 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
191 *
192 */
193 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
194 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
195 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
196
197 /* Hypervisor memory for patch statistics */
198 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
199 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
200
201 /* Memory for patch lookup trees. */
202 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
203 AssertRCReturn(rc, rc);
204 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
205
206#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
207 /* Check CFGM option. */
208 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
209 if (RT_FAILURE(rc))
210# ifdef PATM_DISABLE_ALL
211 pVM->fPATMEnabled = false;
212# else
213 pVM->fPATMEnabled = true;
214# endif
215#endif
216
217 rc = patmReinit(pVM);
218 AssertRC(rc);
219 if (RT_FAILURE(rc))
220 return rc;
221
222 /*
223 * Register save and load state notifiers.
224 */
225 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
226 NULL, NULL, NULL,
227 NULL, patmR3Save, NULL,
228 NULL, patmR3Load, NULL);
229 AssertRCReturn(rc, rc);
230
231#ifdef VBOX_WITH_DEBUGGER
232 /*
233 * Debugger commands.
234 */
235 static bool s_fRegisteredCmds = false;
236 if (!s_fRegisteredCmds)
237 {
238 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
239 if (RT_SUCCESS(rc2))
240 s_fRegisteredCmds = true;
241 }
242#endif
243
244#ifdef VBOX_WITH_STATISTICS
245 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
246 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
247 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
248 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
249 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
250 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
251 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
252 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
253
254 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
255 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
256
257 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
258 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
259 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
260
261 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
262 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
263 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
264 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
265 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
266
267 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
268 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
269
270 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
271 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
272
273 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
274 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
275 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
276
277 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
278 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
279 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
280
281 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
282 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
283
284 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
285 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
286 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
287 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
288
289 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
290 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
291
292 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
293 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
294
295 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
296 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
297 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
298
299 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
300 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
301 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
302 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
303
304 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
305 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
306 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
307 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
308 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
309
310 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
311#endif /* VBOX_WITH_STATISTICS */
312
313 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
314 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
315 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
316 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
317 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
318 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
319 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
320 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
321
322 return rc;
323}
324
325/**
326 * Finalizes HMA page attributes.
327 *
328 * @returns VBox status code.
329 * @param pVM Pointer to the VM.
330 */
331VMMR3_INT_DECL(int) PATMR3InitFinalize(PVM pVM)
332{
333 if (HMIsEnabled(pVM))
334 return VINF_SUCCESS;
335
336 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
337 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
338 if (RT_FAILURE(rc))
339 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
340
341 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
342 if (RT_FAILURE(rc))
343 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
344
345 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
346 if (RT_FAILURE(rc))
347 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
348
349 return rc;
350}
351
352/**
353 * (Re)initializes PATM
354 *
355 * @param pVM The VM.
356 */
357static int patmReinit(PVM pVM)
358{
359 int rc;
360
361 /*
362 * Assert alignment and sizes.
363 */
364 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
365 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
366
367 /*
368 * Setup any fixed pointers and offsets.
369 */
370 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
371
372#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
373#ifndef PATM_DISABLE_ALL
374 pVM->fPATMEnabled = true;
375#endif
376#endif
377
378 Assert(pVM->patm.s.pGCStateHC);
379 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
380 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
381
382 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
383 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
384
385 Assert(pVM->patm.s.pGCStackHC);
386 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
387 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
388 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
389 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
390
391 Assert(pVM->patm.s.pStatsHC);
392 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
393 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
394
395 Assert(pVM->patm.s.pPatchMemHC);
396 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
397 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
398 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
399
400 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
401 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
402
403 Assert(pVM->patm.s.PatchLookupTreeHC);
404 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
405
406 /*
407 * (Re)Initialize PATM structure
408 */
409 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
410 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
411 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
412 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
413 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
414 pVM->patm.s.pvFaultMonitor = 0;
415 pVM->patm.s.deltaReloc = 0;
416
417 /* Lowest and highest patched instruction */
418 pVM->patm.s.pPatchedInstrGCLowest = ~0;
419 pVM->patm.s.pPatchedInstrGCHighest = 0;
420
421 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
422 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
423 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
424
425 pVM->patm.s.pfnSysEnterPatchGC = 0;
426 pVM->patm.s.pfnSysEnterGC = 0;
427
428 pVM->patm.s.fOutOfMemory = false;
429
430 pVM->patm.s.pfnHelperCallGC = 0;
431 patmR3DbgReset(pVM);
432
433 /* Generate all global functions to be used by future patches. */
434 /* We generate a fake patch in order to use the existing code for relocation. */
435 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
436 if (RT_FAILURE(rc))
437 {
438 Log(("Out of memory!!!!\n"));
439 return VERR_NO_MEMORY;
440 }
441 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
442 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
443 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
444
445 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
446 AssertRC(rc);
447
448 /* Update free pointer in patch memory. */
449 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
450 /* Round to next 8 byte boundary. */
451 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
452
453
454 return rc;
455}
456
457
458/**
459 * Applies relocations to data and code managed by this
460 * component. This function will be called at init and
461 * whenever the VMM need to relocate it self inside the GC.
462 *
463 * The PATM will update the addresses used by the switcher.
464 *
465 * @param pVM The VM.
466 */
467VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM)
468{
469 if (HMIsEnabled(pVM))
470 return;
471
472 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
473 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
474
475 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
476 if (delta)
477 {
478 PCPUMCTX pCtx;
479
480 /* Update CPUMCTX guest context pointer. */
481 pVM->patm.s.pCPUMCtxGC += delta;
482
483 pVM->patm.s.deltaReloc = delta;
484
485 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
486
487 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
488
489 /* If we are running patch code right now, then also adjust EIP. */
490 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
491 pCtx->eip += delta;
492
493 pVM->patm.s.pGCStateGC = GCPtrNew;
494 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
495
496 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
497
498 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
499
500 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
501
502 if (pVM->patm.s.pfnSysEnterPatchGC)
503 pVM->patm.s.pfnSysEnterPatchGC += delta;
504
505 /* Deal with the global patch functions. */
506 pVM->patm.s.pfnHelperCallGC += delta;
507 pVM->patm.s.pfnHelperRetGC += delta;
508 pVM->patm.s.pfnHelperIretGC += delta;
509 pVM->patm.s.pfnHelperJumpGC += delta;
510
511 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
512 }
513}
514
515
516/**
517 * Terminates the PATM.
518 *
519 * Termination means cleaning up and freeing all resources,
520 * the VM it self is at this point powered off or suspended.
521 *
522 * @returns VBox status code.
523 * @param pVM Pointer to the VM.
524 */
525VMMR3_INT_DECL(int) PATMR3Term(PVM pVM)
526{
527 if (HMIsEnabled(pVM))
528 return VINF_SUCCESS;
529
530 patmR3DbgTerm(pVM);
531
532 /* Memory was all allocated from the two MM heaps and requires no freeing. */
533 return VINF_SUCCESS;
534}
535
536
537/**
538 * PATM reset callback.
539 *
540 * @returns VBox status code.
541 * @param pVM The VM which is reset.
542 */
543VMMR3_INT_DECL(int) PATMR3Reset(PVM pVM)
544{
545 Log(("PATMR3Reset\n"));
546 if (HMIsEnabled(pVM))
547 return VINF_SUCCESS;
548
549 /* Free all patches. */
550 for (;;)
551 {
552 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
553 if (pPatchRec)
554 patmR3RemovePatch(pVM, pPatchRec, true);
555 else
556 break;
557 }
558 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
559 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
560 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
561 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
562
563 int rc = patmReinit(pVM);
564 if (RT_SUCCESS(rc))
565 rc = PATMR3InitFinalize(pVM); /* paranoia */
566
567 return rc;
568}
569
570/**
571 * @callback_method_impl{FNDISREADBYTES}
572 */
573static DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
574{
575 PATMDISASM *pDisInfo = (PATMDISASM *)pDis->pvUser;
576
577/** @todo change this to read more! */
578 /*
579 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
580 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
581 */
582 /** @todo could change in the future! */
583 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
584 {
585 size_t cbRead = cbMaxRead;
586 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
587 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
588 if (RT_SUCCESS(rc))
589 {
590 if (cbRead >= cbMinRead)
591 {
592 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
593 return VINF_SUCCESS;
594 }
595
596 cbMinRead -= (uint8_t)cbRead;
597 cbMaxRead -= (uint8_t)cbRead;
598 offInstr += (uint8_t)cbRead;
599 uSrcAddr += cbRead;
600 }
601
602#ifdef VBOX_STRICT
603 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
604 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
605 {
606 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr, NULL) == false);
607 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr + cbMinRead-1, NULL) == false);
608 }
609#endif
610 }
611
612 int rc = VINF_SUCCESS;
613 RTGCPTR32 uSrcAddr = (RTGCPTR32)pDis->uInstrAddr + offInstr;
614 if ( !pDisInfo->pbInstrHC
615 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(uSrcAddr + cbMinRead - 1)
616 && !PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr)))
617 {
618 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr));
619 rc = PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
620 offInstr += cbMinRead;
621 }
622 else
623 {
624 /*
625 * pbInstrHC is the base address; adjust according to the GC pointer.
626 *
627 * Try read the max number of bytes here. Since the disassembler only
628 * ever uses these bytes for the current instruction, it doesn't matter
629 * much if we accidentally read the start of the next instruction even
630 * if it happens to be a patch jump or int3.
631 */
632 uint8_t const *pbInstrHC = pDisInfo->pbInstrHC; AssertPtr(pbInstrHC);
633 pbInstrHC += uSrcAddr - pDisInfo->pInstrGC;
634
635 size_t cbMaxRead1 = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
636 size_t cbMaxRead2 = PAGE_SIZE - ((uintptr_t)pbInstrHC & PAGE_OFFSET_MASK);
637 size_t cbToRead = RT_MIN(cbMaxRead1, RT_MAX(cbMaxRead2, cbMinRead));
638 if (cbToRead > cbMaxRead)
639 cbToRead = cbMaxRead;
640
641 memcpy(&pDis->abInstr[offInstr], pbInstrHC, cbToRead);
642 offInstr += (uint8_t)cbToRead;
643 }
644
645 pDis->cbCachedInstr = offInstr;
646 return rc;
647}
648
649
650DECLINLINE(bool) patmR3DisInstrToStr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
651 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
652{
653 PATMDISASM disinfo;
654 disinfo.pVM = pVM;
655 disinfo.pPatchInfo = pPatch;
656 disinfo.pbInstrHC = pbInstrHC;
657 disinfo.pInstrGC = InstrGCPtr32;
658 disinfo.fReadFlags = fReadFlags;
659 return RT_SUCCESS(DISInstrToStrWithReader(InstrGCPtr32,
660 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
661 patmReadBytes, &disinfo,
662 pCpu, pcbInstr, pszOutput, cbOutput));
663}
664
665
666DECLINLINE(bool) patmR3DisInstr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
667 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
668{
669 PATMDISASM disinfo;
670 disinfo.pVM = pVM;
671 disinfo.pPatchInfo = pPatch;
672 disinfo.pbInstrHC = pbInstrHC;
673 disinfo.pInstrGC = InstrGCPtr32;
674 disinfo.fReadFlags = fReadFlags;
675 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32,
676 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
677 patmReadBytes, &disinfo,
678 pCpu, pcbInstr));
679}
680
681
682DECLINLINE(bool) patmR3DisInstrNoStrOpMode(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC,
683 uint32_t fReadFlags,
684 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
685{
686 PATMDISASM disinfo;
687 disinfo.pVM = pVM;
688 disinfo.pPatchInfo = pPatch;
689 disinfo.pbInstrHC = pbInstrHC;
690 disinfo.pInstrGC = InstrGCPtr32;
691 disinfo.fReadFlags = fReadFlags;
692 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32, pPatch->uOpMode, patmReadBytes, &disinfo,
693 pCpu, pcbInstr));
694}
695
696#ifdef LOG_ENABLED
697# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
698 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_ORGCODE, a_szComment, " patch:")
699# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
700 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_RAWCODE, a_szComment, " patch:")
701
702# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) \
703 do { \
704 if (LogIsEnabled()) \
705 patmLogRawPatchInstr(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2); \
706 } while (0)
707
708static void patmLogRawPatchInstr(PVM pVM, PPATCHINFO pPatch, uint32_t fFlags,
709 const char *pszComment1, const char *pszComment2)
710{
711 DISCPUSTATE DisState;
712 char szOutput[128];
713 szOutput[0] = '\0';
714 patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC, NULL, fFlags,
715 &DisState, NULL, szOutput, sizeof(szOutput));
716 Log(("%s%s %s", pszComment1, pszComment2, szOutput));
717}
718
719#else
720# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
721# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
722# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) do { } while (0)
723#endif
724
725
726/**
727 * Callback function for RTAvloU32DoWithAll
728 *
729 * Updates all fixups in the patches
730 *
731 * @returns VBox status code.
732 * @param pNode Current node
733 * @param pParam Pointer to the VM.
734 */
735static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
736{
737 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
738 PVM pVM = (PVM)pParam;
739 RTRCINTPTR delta;
740 int rc;
741
742 /* Nothing to do if the patch is not active. */
743 if (pPatch->patch.uState == PATCH_REFUSED)
744 return 0;
745
746 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
747 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Org patch jump:", "");
748
749 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
750 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
751
752 /*
753 * Apply fixups
754 */
755 PRELOCREC pRec = 0;
756 AVLPVKEY key = 0;
757
758 while (true)
759 {
760 /* Get the record that's closest from above */
761 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
762 if (pRec == 0)
763 break;
764
765 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
766
767 switch (pRec->uType)
768 {
769 case FIXUP_ABSOLUTE:
770 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
771 if ( !pRec->pSource
772 || PATMIsPatchGCAddr(pVM, pRec->pSource))
773 {
774 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
775 }
776 else
777 {
778 uint8_t curInstr[15];
779 uint8_t oldInstr[15];
780 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
781
782 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
783
784 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
785 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
786
787 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
788 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
789
790 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
791
792 if ( rc == VERR_PAGE_NOT_PRESENT
793 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
794 {
795 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
796
797 Log(("PATM: Patch page not present -> check later!\n"));
798 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
799 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
800 }
801 else
802 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
803 {
804 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
805 /*
806 * Disable patch; this is not a good solution
807 */
808 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
809 pPatch->patch.uState = PATCH_DISABLED;
810 }
811 else
812 if (RT_SUCCESS(rc))
813 {
814 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
815 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
816 AssertRC(rc);
817 }
818 }
819 break;
820
821 case FIXUP_REL_JMPTOPATCH:
822 {
823 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
824
825 if ( pPatch->patch.uState == PATCH_ENABLED
826 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
827 {
828 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
829 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
830 RTRCPTR pJumpOffGC;
831 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
832 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
833
834#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
835 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
836#else
837 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
838#endif
839
840 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
841#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
842 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
843 {
844 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
845
846 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
847 oldJump[0] = pPatch->patch.aPrivInstr[0];
848 oldJump[1] = pPatch->patch.aPrivInstr[1];
849 *(RTRCUINTPTR *)&oldJump[2] = displOld;
850 }
851 else
852#endif
853 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
854 {
855 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
856 oldJump[0] = 0xE9;
857 *(RTRCUINTPTR *)&oldJump[1] = displOld;
858 }
859 else
860 {
861 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
862 continue; //this should never happen!!
863 }
864 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
865
866 /*
867 * Read old patch jump and compare it to the one we previously installed
868 */
869 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
870 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
871
872 if ( rc == VERR_PAGE_NOT_PRESENT
873 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
874 {
875 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
876
877 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
878 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
879 }
880 else
881 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
882 {
883 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
884 /*
885 * Disable patch; this is not a good solution
886 */
887 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
888 pPatch->patch.uState = PATCH_DISABLED;
889 }
890 else
891 if (RT_SUCCESS(rc))
892 {
893 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
894 AssertRC(rc);
895 }
896 else
897 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
898 }
899 else
900 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
901
902 pRec->pDest = pTarget;
903 break;
904 }
905
906 case FIXUP_REL_JMPTOGUEST:
907 {
908 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
909 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
910
911 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
912 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
913 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
914 pRec->pSource = pSource;
915 break;
916 }
917
918 default:
919 AssertMsg(0, ("Invalid fixup type!!\n"));
920 return VERR_INVALID_PARAMETER;
921 }
922 }
923
924 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
925 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Rel patch jump:", "");
926 return 0;
927}
928
929/**
930 * \#PF Handler callback for virtual access handler ranges.
931 *
932 * Important to realize that a physical page in a range can have aliases, and
933 * for ALL and WRITE handlers these will also trigger.
934 *
935 * @returns VINF_SUCCESS if the handler have carried out the operation.
936 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
937 * @param pVM Pointer to the VM.
938 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
939 * @param pvPtr The HC mapping of that address.
940 * @param pvBuf What the guest is reading/writing.
941 * @param cbBuf How much it's reading/writing.
942 * @param enmAccessType The access type.
943 * @param pvUser User argument.
944 */
945DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
946 PGMACCESSTYPE enmAccessType, void *pvUser)
947{
948 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
949 NOREF(pvPtr); NOREF(pvBuf); NOREF(cbBuf); NOREF(pvUser);
950
951 /** @todo could be the wrong virtual address (alias) */
952 pVM->patm.s.pvFaultMonitor = GCPtr;
953 PATMR3HandleMonitoredPage(pVM);
954 return VINF_PGM_HANDLER_DO_DEFAULT;
955}
956
957#ifdef VBOX_WITH_DEBUGGER
958
959/**
960 * Callback function for RTAvloU32DoWithAll
961 *
962 * Enables the patch that's being enumerated
963 *
964 * @returns 0 (continue enumeration).
965 * @param pNode Current node
966 * @param pVM Pointer to the VM.
967 */
968static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
969{
970 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
971
972 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
973 return 0;
974}
975
976
977/**
978 * Callback function for RTAvloU32DoWithAll
979 *
980 * Disables the patch that's being enumerated
981 *
982 * @returns 0 (continue enumeration).
983 * @param pNode Current node
984 * @param pVM Pointer to the VM.
985 */
986static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
987{
988 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
989
990 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
991 return 0;
992}
993
994#endif /* VBOX_WITH_DEBUGGER */
995#ifdef UNUSED_FUNCTIONS
996
997/**
998 * Returns the host context pointer and size of the patch memory block
999 *
1000 * @returns Host context pointer.
1001 * @param pVM Pointer to the VM.
1002 * @param pcb Size of the patch memory block
1003 * @internal
1004 */
1005VMMR3_INT_DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
1006{
1007 AssertReturn(!HMIsEnabled(pVM), NULL);
1008 if (pcb)
1009 *pcb = pVM->patm.s.cbPatchMem;
1010 return pVM->patm.s.pPatchMemHC;
1011}
1012
1013
1014/**
1015 * Returns the guest context pointer and size of the patch memory block
1016 *
1017 * @returns Guest context pointer.
1018 * @param pVM Pointer to the VM.
1019 * @param pcb Size of the patch memory block
1020 */
1021VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
1022{
1023 AssertReturn(!HMIsEnabled(pVM), NIL_RTRCPTR);
1024 if (pcb)
1025 *pcb = pVM->patm.s.cbPatchMem;
1026 return pVM->patm.s.pPatchMemGC;
1027}
1028
1029#endif /* UNUSED_FUNCTIONS */
1030
1031/**
1032 * Returns the host context pointer of the GC context structure
1033 *
1034 * @returns VBox status code.
1035 * @param pVM Pointer to the VM.
1036 */
1037VMMR3_INT_DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
1038{
1039 AssertReturn(!HMIsEnabled(pVM), NULL);
1040 return pVM->patm.s.pGCStateHC;
1041}
1042
1043
1044#ifdef UNUSED_FUNCTION
1045/**
1046 * Checks whether the HC address is part of our patch region
1047 *
1048 * @returns true/false.
1049 * @param pVM Pointer to the VM.
1050 * @param pAddrHC Host context ring-3 address to check.
1051 */
1052VMMR3_INT_DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, void *pAddrHC)
1053{
1054 return (uintptr_t)pAddrHC >= (uintptr_t)pVM->patm.s.pPatchMemHC
1055 && (uintptr_t)pAddrHC < (uintptr_t)pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem;
1056}
1057#endif
1058
1059
1060/**
1061 * Allows or disallow patching of privileged instructions executed by the guest OS
1062 *
1063 * @returns VBox status code.
1064 * @param pUVM The user mode VM handle.
1065 * @param fAllowPatching Allow/disallow patching
1066 */
1067VMMR3DECL(int) PATMR3AllowPatching(PUVM pUVM, bool fAllowPatching)
1068{
1069 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1070 PVM pVM = pUVM->pVM;
1071 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1072
1073 if (!HMIsEnabled(pVM))
1074 pVM->fPATMEnabled = fAllowPatching;
1075 else
1076 Assert(!pVM->fPATMEnabled);
1077 return VINF_SUCCESS;
1078}
1079
1080
1081/**
1082 * Checks if the patch manager is enabled or not.
1083 *
1084 * @returns true if enabled, false if not (or if invalid handle).
1085 * @param pUVM The user mode VM handle.
1086 */
1087VMMR3DECL(bool) PATMR3IsEnabled(PUVM pUVM)
1088{
1089 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1090 PVM pVM = pUVM->pVM;
1091 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1092 return PATMIsEnabled(pVM);
1093}
1094
1095
1096/**
1097 * Convert a GC patch block pointer to a HC patch pointer
1098 *
1099 * @returns HC pointer or NULL if it's not a GC patch pointer
1100 * @param pVM Pointer to the VM.
1101 * @param pAddrGC GC pointer
1102 */
1103VMMR3_INT_DECL(void *) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
1104{
1105 AssertReturn(!HMIsEnabled(pVM), NULL);
1106 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
1107 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
1108 return NULL;
1109}
1110
1111
1112/**
1113 * Convert guest context address to host context pointer
1114 *
1115 * @returns VBox status code.
1116 * @param pVM Pointer to the VM.
1117 * @param pCacheRec Address conversion cache record
1118 * @param pGCPtr Guest context pointer
1119 *
1120 * @returns Host context pointer or NULL in case of an error
1121 *
1122 */
1123R3PTRTYPE(uint8_t *) patmR3GCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
1124{
1125 int rc;
1126 R3PTRTYPE(uint8_t *) pHCPtr;
1127 uint32_t offset;
1128
1129 if (PATMIsPatchGCAddr(pVM, pGCPtr))
1130 {
1131 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1132 Assert(pPatch);
1133 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
1134 }
1135
1136 offset = pGCPtr & PAGE_OFFSET_MASK;
1137 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1138 return pCacheRec->pPageLocStartHC + offset;
1139
1140 /* Release previous lock if any. */
1141 if (pCacheRec->Lock.pvMap)
1142 {
1143 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1144 pCacheRec->Lock.pvMap = NULL;
1145 }
1146
1147 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1148 if (rc != VINF_SUCCESS)
1149 {
1150 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1151 return NULL;
1152 }
1153 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1154 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1155 return pHCPtr;
1156}
1157
1158
1159/**
1160 * Calculates and fills in all branch targets
1161 *
1162 * @returns VBox status code.
1163 * @param pVM Pointer to the VM.
1164 * @param pPatch Current patch block pointer
1165 *
1166 */
1167static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1168{
1169 int32_t displ;
1170
1171 PJUMPREC pRec = 0;
1172 unsigned nrJumpRecs = 0;
1173
1174 /*
1175 * Set all branch targets inside the patch block.
1176 * We remove all jump records as they are no longer needed afterwards.
1177 */
1178 while (true)
1179 {
1180 RCPTRTYPE(uint8_t *) pInstrGC;
1181 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1182
1183 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1184 if (pRec == 0)
1185 break;
1186
1187 nrJumpRecs++;
1188
1189 /* HC in patch block to GC in patch block. */
1190 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1191
1192 if (pRec->opcode == OP_CALL)
1193 {
1194 /* Special case: call function replacement patch from this patch block.
1195 */
1196 PPATMPATCHREC pFunctionRec = patmQueryFunctionPatch(pVM, pRec->pTargetGC);
1197 if (!pFunctionRec)
1198 {
1199 int rc;
1200
1201 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1202 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1203 else
1204 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1205
1206 if (RT_FAILURE(rc))
1207 {
1208 uint8_t *pPatchHC;
1209 RTRCPTR pPatchGC;
1210 RTRCPTR pOrgInstrGC;
1211
1212 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1213 Assert(pOrgInstrGC);
1214
1215 /* Failure for some reason -> mark exit point with int 3. */
1216 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1217
1218 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1219 Assert(pPatchGC);
1220
1221 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1222
1223 /* Set a breakpoint at the very beginning of the recompiled instruction */
1224 *pPatchHC = 0xCC;
1225
1226 continue;
1227 }
1228 }
1229 else
1230 {
1231 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1232 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1233 }
1234
1235 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1236 }
1237 else
1238 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1239
1240 if (pBranchTargetGC == 0)
1241 {
1242 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1243 return VERR_PATCHING_REFUSED;
1244 }
1245 /* Our jumps *always* have a dword displacement (to make things easier). */
1246 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1247 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1248 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1249 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1250 }
1251 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1252 Assert(pPatch->JumpTree == 0);
1253 return VINF_SUCCESS;
1254}
1255
1256/**
1257 * Add an illegal instruction record
1258 *
1259 * @param pVM Pointer to the VM.
1260 * @param pPatch Patch structure ptr
1261 * @param pInstrGC Guest context pointer to privileged instruction
1262 *
1263 */
1264static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1265{
1266 PAVLPVNODECORE pRec;
1267
1268 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1269 Assert(pRec);
1270 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1271
1272 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1273 Assert(ret); NOREF(ret);
1274 pPatch->pTempInfo->nrIllegalInstr++;
1275}
1276
1277static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1278{
1279 PAVLPVNODECORE pRec;
1280
1281 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1282 if (pRec)
1283 return true;
1284 else
1285 return false;
1286}
1287
1288/**
1289 * Add a patch to guest lookup record
1290 *
1291 * @param pVM Pointer to the VM.
1292 * @param pPatch Patch structure ptr
1293 * @param pPatchInstrHC Guest context pointer to patch block
1294 * @param pInstrGC Guest context pointer to privileged instruction
1295 * @param enmType Lookup type
1296 * @param fDirty Dirty flag
1297 *
1298 * @note Be extremely careful with this function. Make absolutely sure the guest
1299 * address is correct! (to avoid executing instructions twice!)
1300 */
1301void patmR3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1302{
1303 bool ret;
1304 PRECPATCHTOGUEST pPatchToGuestRec;
1305 PRECGUESTTOPATCH pGuestToPatchRec;
1306 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1307
1308 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1309 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1310
1311 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1312 {
1313 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1314 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1315 return; /* already there */
1316
1317 Assert(!pPatchToGuestRec);
1318 }
1319#ifdef VBOX_STRICT
1320 else
1321 {
1322 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1323 Assert(!pPatchToGuestRec);
1324 }
1325#endif
1326
1327 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1328 Assert(pPatchToGuestRec);
1329 pPatchToGuestRec->Core.Key = PatchOffset;
1330 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1331 pPatchToGuestRec->enmType = enmType;
1332 pPatchToGuestRec->fDirty = fDirty;
1333
1334 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1335 Assert(ret);
1336
1337 /* GC to patch address */
1338 if (enmType == PATM_LOOKUP_BOTHDIR)
1339 {
1340 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1341 if (!pGuestToPatchRec)
1342 {
1343 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1344 pGuestToPatchRec->Core.Key = pInstrGC;
1345 pGuestToPatchRec->PatchOffset = PatchOffset;
1346
1347 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1348 Assert(ret);
1349 }
1350 }
1351
1352 pPatch->nrPatch2GuestRecs++;
1353}
1354
1355
1356/**
1357 * Removes a patch to guest lookup record
1358 *
1359 * @param pVM Pointer to the VM.
1360 * @param pPatch Patch structure ptr
1361 * @param pPatchInstrGC Guest context pointer to patch block
1362 */
1363void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1364{
1365 PAVLU32NODECORE pNode;
1366 PAVLU32NODECORE pNode2;
1367 PRECPATCHTOGUEST pPatchToGuestRec;
1368 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1369
1370 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1371 Assert(pPatchToGuestRec);
1372 if (pPatchToGuestRec)
1373 {
1374 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1375 {
1376 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1377
1378 Assert(pGuestToPatchRec->Core.Key);
1379 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1380 Assert(pNode2);
1381 }
1382 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1383 Assert(pNode);
1384
1385 MMR3HeapFree(pPatchToGuestRec);
1386 pPatch->nrPatch2GuestRecs--;
1387 }
1388}
1389
1390
1391/**
1392 * RTAvlPVDestroy callback.
1393 */
1394static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1395{
1396 MMR3HeapFree(pNode);
1397 return 0;
1398}
1399
1400/**
1401 * Empty the specified tree (PV tree, MMR3 heap)
1402 *
1403 * @param pVM Pointer to the VM.
1404 * @param ppTree Tree to empty
1405 */
1406static void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1407{
1408 NOREF(pVM);
1409 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1410}
1411
1412
1413/**
1414 * RTAvlU32Destroy callback.
1415 */
1416static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1417{
1418 MMR3HeapFree(pNode);
1419 return 0;
1420}
1421
1422/**
1423 * Empty the specified tree (U32 tree, MMR3 heap)
1424 *
1425 * @param pVM Pointer to the VM.
1426 * @param ppTree Tree to empty
1427 */
1428static void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1429{
1430 NOREF(pVM);
1431 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1432}
1433
1434
1435/**
1436 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1437 *
1438 * @returns VBox status code.
1439 * @param pVM Pointer to the VM.
1440 * @param pCpu CPU disassembly state
1441 * @param pInstrGC Guest context pointer to privileged instruction
1442 * @param pCurInstrGC Guest context pointer to the current instruction
1443 * @param pCacheRec Cache record ptr
1444 *
1445 */
1446static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1447{
1448 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1449 bool fIllegalInstr = false;
1450
1451 /*
1452 * Preliminary heuristics:
1453 *- no call instructions without a fixed displacement between cli and sti/popf
1454 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1455 *- no nested pushf/cli
1456 *- sti/popf should be the (eventual) target of all branches
1457 *- no near or far returns; no int xx, no into
1458 *
1459 * Note: Later on we can impose less stricter guidelines if the need arises
1460 */
1461
1462 /* Bail out if the patch gets too big. */
1463 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1464 {
1465 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1466 fIllegalInstr = true;
1467 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1468 }
1469 else
1470 {
1471 /* No unconditional jumps or calls without fixed displacements. */
1472 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1473 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1474 )
1475 {
1476 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1477 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1478 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1479 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1480 )
1481 {
1482 fIllegalInstr = true;
1483 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1484 }
1485 }
1486
1487 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1488 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->uOpcode == OP_JMP)
1489 {
1490 if ( pCurInstrGC > pPatch->pPrivInstrGC
1491 && pCurInstrGC + pCpu->cbInstr < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1492 {
1493 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1494 /* We turn this one into a int 3 callable patch. */
1495 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1496 }
1497 }
1498 else
1499 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1500 if (pPatch->opcode == OP_PUSHF)
1501 {
1502 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->uOpcode == OP_PUSHF)
1503 {
1504 fIllegalInstr = true;
1505 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1506 }
1507 }
1508
1509 /* no far returns */
1510 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1511 {
1512 pPatch->pTempInfo->nrRetInstr++;
1513 fIllegalInstr = true;
1514 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1515 }
1516 else if ( pCpu->pCurInstr->uOpcode == OP_INT3
1517 || pCpu->pCurInstr->uOpcode == OP_INT
1518 || pCpu->pCurInstr->uOpcode == OP_INTO)
1519 {
1520 /* No int xx or into either. */
1521 fIllegalInstr = true;
1522 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1523 }
1524 }
1525
1526 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1527
1528 /* Illegal instruction -> end of analysis phase for this code block */
1529 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1530 return VINF_SUCCESS;
1531
1532 /* Check for exit points. */
1533 switch (pCpu->pCurInstr->uOpcode)
1534 {
1535 case OP_SYSEXIT:
1536 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1537
1538 case OP_SYSENTER:
1539 case OP_ILLUD2:
1540 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1541 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1542 return VINF_SUCCESS;
1543
1544 case OP_STI:
1545 case OP_POPF:
1546 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1547 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1548 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1549 {
1550 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1551 return VERR_PATCHING_REFUSED;
1552 }
1553 if (pPatch->opcode == OP_PUSHF)
1554 {
1555 if (pCpu->pCurInstr->uOpcode == OP_POPF)
1556 {
1557 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1558 return VINF_SUCCESS;
1559
1560 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1561 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1562 pPatch->flags |= PATMFL_CHECK_SIZE;
1563 }
1564 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1565 }
1566 /* else: fall through. */
1567 case OP_RETN: /* exit point for function replacement */
1568 return VINF_SUCCESS;
1569
1570 case OP_IRET:
1571 return VINF_SUCCESS; /* exitpoint */
1572
1573 case OP_CPUID:
1574 case OP_CALL:
1575 case OP_JMP:
1576 break;
1577
1578#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1579 case OP_STR:
1580 break;
1581#endif
1582
1583 default:
1584 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1585 {
1586 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1587 return VINF_SUCCESS; /* exit point */
1588 }
1589 break;
1590 }
1591
1592 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1593 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW))
1594 {
1595 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1596 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->cbInstr));
1597 return VINF_SUCCESS;
1598 }
1599
1600 return VWRN_CONTINUE_ANALYSIS;
1601}
1602
1603/**
1604 * Analyses the instructions inside a function for compliance
1605 *
1606 * @returns VBox status code.
1607 * @param pVM Pointer to the VM.
1608 * @param pCpu CPU disassembly state
1609 * @param pInstrGC Guest context pointer to privileged instruction
1610 * @param pCurInstrGC Guest context pointer to the current instruction
1611 * @param pCacheRec Cache record ptr
1612 *
1613 */
1614static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1615{
1616 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1617 bool fIllegalInstr = false;
1618 NOREF(pInstrGC);
1619
1620 //Preliminary heuristics:
1621 //- no call instructions
1622 //- ret ends a block
1623
1624 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1625
1626 // bail out if the patch gets too big
1627 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1628 {
1629 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1630 fIllegalInstr = true;
1631 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1632 }
1633 else
1634 {
1635 // no unconditional jumps or calls without fixed displacements
1636 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1637 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1638 )
1639 {
1640 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1641 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1642 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1643 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1644 )
1645 {
1646 fIllegalInstr = true;
1647 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1648 }
1649 }
1650 else /* no far returns */
1651 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1652 {
1653 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1654 fIllegalInstr = true;
1655 }
1656 else /* no int xx or into either */
1657 if (pCpu->pCurInstr->uOpcode == OP_INT3 || pCpu->pCurInstr->uOpcode == OP_INT || pCpu->pCurInstr->uOpcode == OP_INTO)
1658 {
1659 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1660 fIllegalInstr = true;
1661 }
1662
1663 #if 0
1664 ///@todo we can handle certain in/out and privileged instructions in the guest context
1665 if (pCpu->pCurInstr->fOpType & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr->uOpcode != OP_STI)
1666 {
1667 Log(("Illegal instructions for function patch!!\n"));
1668 return VERR_PATCHING_REFUSED;
1669 }
1670 #endif
1671 }
1672
1673 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1674
1675 /* Illegal instruction -> end of analysis phase for this code block */
1676 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1677 {
1678 return VINF_SUCCESS;
1679 }
1680
1681 // Check for exit points
1682 switch (pCpu->pCurInstr->uOpcode)
1683 {
1684 case OP_ILLUD2:
1685 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1686 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1687 return VINF_SUCCESS;
1688
1689 case OP_IRET:
1690 case OP_SYSEXIT: /* will fault or emulated in GC */
1691 case OP_RETN:
1692 return VINF_SUCCESS;
1693
1694#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1695 case OP_STR:
1696 break;
1697#endif
1698
1699 case OP_POPF:
1700 case OP_STI:
1701 return VWRN_CONTINUE_ANALYSIS;
1702 default:
1703 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1704 {
1705 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1706 return VINF_SUCCESS; /* exit point */
1707 }
1708 return VWRN_CONTINUE_ANALYSIS;
1709 }
1710
1711 return VWRN_CONTINUE_ANALYSIS;
1712}
1713
1714/**
1715 * Recompiles the instructions in a code block
1716 *
1717 * @returns VBox status code.
1718 * @param pVM Pointer to the VM.
1719 * @param pCpu CPU disassembly state
1720 * @param pInstrGC Guest context pointer to privileged instruction
1721 * @param pCurInstrGC Guest context pointer to the current instruction
1722 * @param pCacheRec Cache record ptr
1723 *
1724 */
1725static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1726{
1727 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1728 int rc = VINF_SUCCESS;
1729 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1730
1731 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1732
1733 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1734 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1735 {
1736 /*
1737 * Been there, done that; so insert a jump (we don't want to duplicate code)
1738 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1739 */
1740 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1741 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1742 }
1743
1744 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1745 {
1746 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1747 }
1748 else
1749 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1750
1751 if (RT_FAILURE(rc))
1752 return rc;
1753
1754 /* Note: Never do a direct return unless a failure is encountered! */
1755
1756 /* Clear recompilation of next instruction flag; we are doing that right here. */
1757 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1758 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1759
1760 /* Add lookup record for patch to guest address translation */
1761 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1762
1763 /* Update lowest and highest instruction address for this patch */
1764 if (pCurInstrGC < pPatch->pInstrGCLowest)
1765 pPatch->pInstrGCLowest = pCurInstrGC;
1766 else
1767 if (pCurInstrGC > pPatch->pInstrGCHighest)
1768 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->cbInstr;
1769
1770 /* Illegal instruction -> end of recompile phase for this code block. */
1771 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1772 {
1773 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1774 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1775 goto end;
1776 }
1777
1778 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1779 * Indirect calls are handled below.
1780 */
1781 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1782 && (pCpu->pCurInstr->uOpcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1783 && (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J))
1784 {
1785 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1786 if (pTargetGC == 0)
1787 {
1788 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
1789 return VERR_PATCHING_REFUSED;
1790 }
1791
1792 if (pCpu->pCurInstr->uOpcode == OP_CALL)
1793 {
1794 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1795 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1796 if (RT_FAILURE(rc))
1797 goto end;
1798 }
1799 else
1800 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->uOpcode, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1801
1802 if (RT_SUCCESS(rc))
1803 rc = VWRN_CONTINUE_RECOMPILE;
1804
1805 goto end;
1806 }
1807
1808 switch (pCpu->pCurInstr->uOpcode)
1809 {
1810 case OP_CLI:
1811 {
1812 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1813 * until we've found the proper exit point(s).
1814 */
1815 if ( pCurInstrGC != pInstrGC
1816 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1817 )
1818 {
1819 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1820 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1821 }
1822 /* Set by irq inhibition; no longer valid now. */
1823 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1824
1825 rc = patmPatchGenCli(pVM, pPatch);
1826 if (RT_SUCCESS(rc))
1827 rc = VWRN_CONTINUE_RECOMPILE;
1828 break;
1829 }
1830
1831 case OP_MOV:
1832 if (pCpu->pCurInstr->fOpType & DISOPTYPE_POTENTIALLY_DANGEROUS)
1833 {
1834 /* mov ss, src? */
1835 if ( (pCpu->Param1.fUse & DISUSE_REG_SEG)
1836 && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS))
1837 {
1838 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1839 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1840 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1841 }
1842#if 0 /* necessary for Haiku */
1843 else
1844 if ( (pCpu->Param2.fUse & DISUSE_REG_SEG)
1845 && (pCpu->Param2.Base.idxSegReg == USE_REG_SS)
1846 && (pCpu->Param1.fUse & (DISUSE_REG_GEN32|DISUSE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1847 {
1848 /* mov GPR, ss */
1849 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1850 if (RT_SUCCESS(rc))
1851 rc = VWRN_CONTINUE_RECOMPILE;
1852 break;
1853 }
1854#endif
1855 }
1856 goto duplicate_instr;
1857
1858 case OP_POP:
1859 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1860 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS)
1861 {
1862 Assert(pCpu->pCurInstr->fOpType & DISOPTYPE_INHIBIT_IRQS);
1863
1864 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1865 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1866 }
1867 goto duplicate_instr;
1868
1869 case OP_STI:
1870 {
1871 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1872
1873 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1874 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1875 {
1876 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1877 fInhibitIRQInstr = true;
1878 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1879 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1880 }
1881 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1882
1883 if (RT_SUCCESS(rc))
1884 {
1885 DISCPUSTATE cpu = *pCpu;
1886 unsigned cbInstr;
1887 int disret;
1888 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1889
1890 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1891
1892 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1893 { /* Force pNextInstrHC out of scope after using it */
1894 uint8_t *pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1895 if (pNextInstrHC == NULL)
1896 {
1897 AssertFailed();
1898 return VERR_PATCHING_REFUSED;
1899 }
1900
1901 // Disassemble the next instruction
1902 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
1903 }
1904 if (disret == false)
1905 {
1906 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1907 return VERR_PATCHING_REFUSED;
1908 }
1909 pReturnInstrGC = pNextInstrGC + cbInstr;
1910
1911 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1912 || pReturnInstrGC <= pInstrGC
1913 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1914 )
1915 {
1916 /* Not an exit point for function duplication patches */
1917 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1918 && RT_SUCCESS(rc))
1919 {
1920 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1921 rc = VWRN_CONTINUE_RECOMPILE;
1922 }
1923 else
1924 rc = VINF_SUCCESS; //exit point
1925 }
1926 else {
1927 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1928 rc = VERR_PATCHING_REFUSED; //not allowed!!
1929 }
1930 }
1931 break;
1932 }
1933
1934 case OP_POPF:
1935 {
1936 bool fGenerateJmpBack = (pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32);
1937
1938 /* Not an exit point for IDT handler or function replacement patches */
1939 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1940 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1941 fGenerateJmpBack = false;
1942
1943 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->cbInstr, !!(pCpu->fPrefix & DISPREFIX_OPSIZE), fGenerateJmpBack);
1944 if (RT_SUCCESS(rc))
1945 {
1946 if (fGenerateJmpBack == false)
1947 {
1948 /* Not an exit point for IDT handler or function replacement patches */
1949 rc = VWRN_CONTINUE_RECOMPILE;
1950 }
1951 else
1952 {
1953 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1954 rc = VINF_SUCCESS; /* exit point! */
1955 }
1956 }
1957 break;
1958 }
1959
1960 case OP_PUSHF:
1961 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1962 if (RT_SUCCESS(rc))
1963 rc = VWRN_CONTINUE_RECOMPILE;
1964 break;
1965
1966 case OP_PUSH:
1967 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1968 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS)
1969 {
1970 rc = patmPatchGenPushCS(pVM, pPatch);
1971 if (RT_SUCCESS(rc))
1972 rc = VWRN_CONTINUE_RECOMPILE;
1973 break;
1974 }
1975 goto duplicate_instr;
1976
1977 case OP_IRET:
1978 Log(("IRET at %RRv\n", pCurInstrGC));
1979 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1980 if (RT_SUCCESS(rc))
1981 {
1982 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1983 rc = VINF_SUCCESS; /* exit point by definition */
1984 }
1985 break;
1986
1987 case OP_ILLUD2:
1988 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1989 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1990 if (RT_SUCCESS(rc))
1991 rc = VINF_SUCCESS; /* exit point by definition */
1992 Log(("Illegal opcode (0xf 0xb)\n"));
1993 break;
1994
1995 case OP_CPUID:
1996 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1997 if (RT_SUCCESS(rc))
1998 rc = VWRN_CONTINUE_RECOMPILE;
1999 break;
2000
2001 case OP_STR:
2002#ifdef VBOX_WITH_SAFE_STR /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table and move OP_STR into #ifndef */
2003 /* Now safe because our shadow TR entry is identical to the guest's. */
2004 goto duplicate_instr;
2005#endif
2006 case OP_SLDT:
2007 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
2008 if (RT_SUCCESS(rc))
2009 rc = VWRN_CONTINUE_RECOMPILE;
2010 break;
2011
2012 case OP_SGDT:
2013 case OP_SIDT:
2014 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
2015 if (RT_SUCCESS(rc))
2016 rc = VWRN_CONTINUE_RECOMPILE;
2017 break;
2018
2019 case OP_RETN:
2020 /* retn is an exit point for function patches */
2021 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
2022 if (RT_SUCCESS(rc))
2023 rc = VINF_SUCCESS; /* exit point by definition */
2024 break;
2025
2026 case OP_SYSEXIT:
2027 /* Duplicate it, so it can be emulated in GC (or fault). */
2028 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2029 if (RT_SUCCESS(rc))
2030 rc = VINF_SUCCESS; /* exit point by definition */
2031 break;
2032
2033 case OP_CALL:
2034 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2035 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2036 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2037 */
2038 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2039 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far calls! */)
2040 {
2041 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
2042 if (RT_SUCCESS(rc))
2043 {
2044 rc = VWRN_CONTINUE_RECOMPILE;
2045 }
2046 break;
2047 }
2048 goto gen_illegal_instr;
2049
2050 case OP_JMP:
2051 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2052 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2053 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2054 */
2055 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2056 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far jumps! */)
2057 {
2058 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
2059 if (RT_SUCCESS(rc))
2060 rc = VINF_SUCCESS; /* end of branch */
2061 break;
2062 }
2063 goto gen_illegal_instr;
2064
2065 case OP_INT3:
2066 case OP_INT:
2067 case OP_INTO:
2068 goto gen_illegal_instr;
2069
2070 case OP_MOV_DR:
2071 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2072 if (pCpu->pCurInstr->fParam2 == OP_PARM_Dd)
2073 {
2074 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
2075 if (RT_SUCCESS(rc))
2076 rc = VWRN_CONTINUE_RECOMPILE;
2077 break;
2078 }
2079 goto duplicate_instr;
2080
2081 case OP_MOV_CR:
2082 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2083 if (pCpu->pCurInstr->fParam2 == OP_PARM_Cd)
2084 {
2085 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
2086 if (RT_SUCCESS(rc))
2087 rc = VWRN_CONTINUE_RECOMPILE;
2088 break;
2089 }
2090 goto duplicate_instr;
2091
2092 default:
2093 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW | DISOPTYPE_PRIVILEGED_NOTRAP))
2094 {
2095gen_illegal_instr:
2096 rc = patmPatchGenIllegalInstr(pVM, pPatch);
2097 if (RT_SUCCESS(rc))
2098 rc = VINF_SUCCESS; /* exit point by definition */
2099 }
2100 else
2101 {
2102duplicate_instr:
2103 Log(("patmPatchGenDuplicate\n"));
2104 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2105 if (RT_SUCCESS(rc))
2106 rc = VWRN_CONTINUE_RECOMPILE;
2107 }
2108 break;
2109 }
2110
2111end:
2112
2113 if ( !fInhibitIRQInstr
2114 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2115 {
2116 int rc2;
2117 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2118
2119 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2120 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
2121 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
2122 {
2123 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
2124
2125 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2126 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
2127 rc = VINF_SUCCESS; /* end of the line */
2128 }
2129 else
2130 {
2131 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
2132 }
2133 if (RT_FAILURE(rc2))
2134 rc = rc2;
2135 }
2136
2137 if (RT_SUCCESS(rc))
2138 {
2139 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
2140 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
2141 && pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32
2142 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
2143 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
2144 )
2145 {
2146 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2147
2148 // The end marker for this kind of patch is any instruction at a location outside our patch jump
2149 Log(("patmRecompileCallback: end found for single instruction patch at %RRv cbInstr %d\n", pNextInstrGC, pCpu->cbInstr));
2150
2151 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2152 AssertRC(rc);
2153 }
2154 }
2155 return rc;
2156}
2157
2158
2159#ifdef LOG_ENABLED
2160
2161/**
2162 * Add a disasm jump record (temporary for prevent duplicate analysis)
2163 *
2164 * @param pVM Pointer to the VM.
2165 * @param pPatch Patch structure ptr
2166 * @param pInstrGC Guest context pointer to privileged instruction
2167 *
2168 */
2169static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2170{
2171 PAVLPVNODECORE pRec;
2172
2173 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2174 Assert(pRec);
2175 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2176
2177 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2178 Assert(ret);
2179}
2180
2181/**
2182 * Checks if jump target has been analysed before.
2183 *
2184 * @returns VBox status code.
2185 * @param pPatch Patch struct
2186 * @param pInstrGC Jump target
2187 *
2188 */
2189static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2190{
2191 PAVLPVNODECORE pRec;
2192
2193 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2194 if (pRec)
2195 return true;
2196 return false;
2197}
2198
2199/**
2200 * For proper disassembly of the final patch block
2201 *
2202 * @returns VBox status code.
2203 * @param pVM Pointer to the VM.
2204 * @param pCpu CPU disassembly state
2205 * @param pInstrGC Guest context pointer to privileged instruction
2206 * @param pCurInstrGC Guest context pointer to the current instruction
2207 * @param pCacheRec Cache record ptr
2208 *
2209 */
2210int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2211{
2212 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2213 NOREF(pInstrGC);
2214
2215 if (pCpu->pCurInstr->uOpcode == OP_INT3)
2216 {
2217 /* Could be an int3 inserted in a call patch. Check to be sure */
2218 DISCPUSTATE cpu;
2219 RTRCPTR pOrgJumpGC;
2220
2221 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2222
2223 { /* Force pOrgJumpHC out of scope after using it */
2224 uint8_t *pOrgJumpHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2225
2226 bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL);
2227 if (!disret || cpu.pCurInstr->uOpcode != OP_CALL || cpu.Param1.cb != 4 /* only near calls */)
2228 return VINF_SUCCESS;
2229 }
2230 return VWRN_CONTINUE_ANALYSIS;
2231 }
2232
2233 if ( pCpu->pCurInstr->uOpcode == OP_ILLUD2
2234 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2235 {
2236 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2237 return VWRN_CONTINUE_ANALYSIS;
2238 }
2239
2240 if ( (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2241 || pCpu->pCurInstr->uOpcode == OP_INT
2242 || pCpu->pCurInstr->uOpcode == OP_IRET
2243 || pCpu->pCurInstr->uOpcode == OP_RETN
2244 || pCpu->pCurInstr->uOpcode == OP_RETF
2245 )
2246 {
2247 return VINF_SUCCESS;
2248 }
2249
2250 if (pCpu->pCurInstr->uOpcode == OP_ILLUD2)
2251 return VINF_SUCCESS;
2252
2253 return VWRN_CONTINUE_ANALYSIS;
2254}
2255
2256
2257/**
2258 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2259 *
2260 * @returns VBox status code.
2261 * @param pVM Pointer to the VM.
2262 * @param pInstrGC Guest context pointer to the initial privileged instruction
2263 * @param pCurInstrGC Guest context pointer to the current instruction
2264 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2265 * @param pCacheRec Cache record ptr
2266 *
2267 */
2268int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2269{
2270 DISCPUSTATE cpu;
2271 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2272 int rc = VWRN_CONTINUE_ANALYSIS;
2273 uint32_t cbInstr, delta;
2274 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2275 bool disret;
2276 char szOutput[256];
2277
2278 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2279
2280 /* We need this to determine branch targets (and for disassembling). */
2281 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2282
2283 while (rc == VWRN_CONTINUE_ANALYSIS)
2284 {
2285 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2286 if (pCurInstrHC == NULL)
2287 {
2288 rc = VERR_PATCHING_REFUSED;
2289 goto end;
2290 }
2291
2292 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_RAWCODE,
2293 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2294 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2295 {
2296 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2297
2298 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2299 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2300 else
2301 Log(("DIS %s", szOutput));
2302
2303 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2304 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2305 {
2306 rc = VINF_SUCCESS;
2307 goto end;
2308 }
2309 }
2310 else
2311 Log(("DIS: %s", szOutput));
2312
2313 if (disret == false)
2314 {
2315 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2316 rc = VINF_SUCCESS;
2317 goto end;
2318 }
2319
2320 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2321 if (rc != VWRN_CONTINUE_ANALYSIS) {
2322 break; //done!
2323 }
2324
2325 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2326 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2327 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2328 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2329 )
2330 {
2331 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2332 RTRCPTR pOrgTargetGC;
2333
2334 if (pTargetGC == 0)
2335 {
2336 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2337 rc = VERR_PATCHING_REFUSED;
2338 break;
2339 }
2340
2341 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2342 {
2343 //jump back to guest code
2344 rc = VINF_SUCCESS;
2345 goto end;
2346 }
2347 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2348
2349 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2350 {
2351 rc = VINF_SUCCESS;
2352 goto end;
2353 }
2354
2355 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2356 {
2357 /* New jump, let's check it. */
2358 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2359
2360 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2361 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2362 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2363
2364 if (rc != VINF_SUCCESS) {
2365 break; //done!
2366 }
2367 }
2368 if (cpu.pCurInstr->uOpcode == OP_JMP)
2369 {
2370 /* Unconditional jump; return to caller. */
2371 rc = VINF_SUCCESS;
2372 goto end;
2373 }
2374
2375 rc = VWRN_CONTINUE_ANALYSIS;
2376 }
2377 pCurInstrGC += cbInstr;
2378 }
2379end:
2380 return rc;
2381}
2382
2383/**
2384 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2385 *
2386 * @returns VBox status code.
2387 * @param pVM Pointer to the VM.
2388 * @param pInstrGC Guest context pointer to the initial privileged instruction
2389 * @param pCurInstrGC Guest context pointer to the current instruction
2390 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2391 * @param pCacheRec Cache record ptr
2392 *
2393 */
2394int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2395{
2396 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2397
2398 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2399 /* Free all disasm jump records. */
2400 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2401 return rc;
2402}
2403
2404#endif /* LOG_ENABLED */
2405
2406/**
2407 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2408 * If so, this patch is permanently disabled.
2409 *
2410 * @param pVM Pointer to the VM.
2411 * @param pInstrGC Guest context pointer to instruction
2412 * @param pConflictGC Guest context pointer to check
2413 *
2414 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2415 *
2416 */
2417VMMR3_INT_DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2418{
2419 AssertReturn(!HMIsEnabled(pVM), VERR_PATCH_NO_CONFLICT);
2420 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2421 if (pTargetPatch)
2422 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2423 return VERR_PATCH_NO_CONFLICT;
2424}
2425
2426/**
2427 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2428 *
2429 * @returns VBox status code.
2430 * @param pVM Pointer to the VM.
2431 * @param pInstrGC Guest context pointer to privileged instruction
2432 * @param pCurInstrGC Guest context pointer to the current instruction
2433 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2434 * @param pCacheRec Cache record ptr
2435 *
2436 */
2437static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2438{
2439 DISCPUSTATE cpu;
2440 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2441 int rc = VWRN_CONTINUE_ANALYSIS;
2442 uint32_t cbInstr;
2443 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2444 bool disret;
2445#ifdef LOG_ENABLED
2446 char szOutput[256];
2447#endif
2448
2449 while (rc == VWRN_CONTINUE_RECOMPILE)
2450 {
2451 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2452 if (pCurInstrHC == NULL)
2453 {
2454 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2455 goto end;
2456 }
2457#ifdef LOG_ENABLED
2458 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE,
2459 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2460 Log(("Recompile: %s", szOutput));
2461#else
2462 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
2463#endif
2464 if (disret == false)
2465 {
2466 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2467
2468 /* Add lookup record for patch to guest address translation */
2469 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2470 patmPatchGenIllegalInstr(pVM, pPatch);
2471 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2472 goto end;
2473 }
2474
2475 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2476 if (rc != VWRN_CONTINUE_RECOMPILE)
2477 {
2478 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2479 if ( rc == VINF_SUCCESS
2480 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2481 {
2482 DISCPUSTATE cpunext;
2483 uint32_t opsizenext;
2484 uint8_t *pNextInstrHC;
2485 RTRCPTR pNextInstrGC = pCurInstrGC + cbInstr;
2486
2487 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2488
2489 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2490 * Recompile the next instruction as well
2491 */
2492 pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2493 if (pNextInstrHC == NULL)
2494 {
2495 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2496 goto end;
2497 }
2498 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpunext, &opsizenext);
2499 if (disret == false)
2500 {
2501 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2502 goto end;
2503 }
2504 switch(cpunext.pCurInstr->uOpcode)
2505 {
2506 case OP_IRET: /* inhibit cleared in generated code */
2507 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2508 case OP_HLT:
2509 break; /* recompile these */
2510
2511 default:
2512 if (cpunext.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2513 {
2514 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2515
2516 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2517 AssertRC(rc);
2518 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2519 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2520 }
2521 break;
2522 }
2523
2524 /* Note: after a cli we must continue to a proper exit point */
2525 if (cpunext.pCurInstr->uOpcode != OP_CLI)
2526 {
2527 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2528 if (RT_SUCCESS(rc))
2529 {
2530 rc = VINF_SUCCESS;
2531 goto end;
2532 }
2533 break;
2534 }
2535 else
2536 rc = VWRN_CONTINUE_RECOMPILE;
2537 }
2538 else
2539 break; /* done! */
2540 }
2541
2542 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2543
2544
2545 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2546 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2547 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2548 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2549 )
2550 {
2551 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2552 if (addr == 0)
2553 {
2554 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2555 rc = VERR_PATCHING_REFUSED;
2556 break;
2557 }
2558
2559 Log(("Jump encountered target %RRv\n", addr));
2560
2561 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2562 if (!(cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW))
2563 {
2564 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2565 /* First we need to finish this linear code stream until the next exit point. */
2566 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+cbInstr, pfnPATMR3Recompile, pCacheRec);
2567 if (RT_FAILURE(rc))
2568 {
2569 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2570 break; //fatal error
2571 }
2572 }
2573
2574 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2575 {
2576 /* New code; let's recompile it. */
2577 Log(("patmRecompileCodeStream continue with jump\n"));
2578
2579 /*
2580 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2581 * this patch so we can continue our analysis
2582 *
2583 * We rely on CSAM to detect and resolve conflicts
2584 */
2585 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, addr);
2586 if(pTargetPatch)
2587 {
2588 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2589 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2590 }
2591
2592 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2593 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2594 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2595
2596 if(pTargetPatch)
2597 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2598
2599 if (RT_FAILURE(rc))
2600 {
2601 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2602 break; //done!
2603 }
2604 }
2605 /* Always return to caller here; we're done! */
2606 rc = VINF_SUCCESS;
2607 goto end;
2608 }
2609 else
2610 if (cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW)
2611 {
2612 rc = VINF_SUCCESS;
2613 goto end;
2614 }
2615 pCurInstrGC += cbInstr;
2616 }
2617end:
2618 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2619 return rc;
2620}
2621
2622
2623/**
2624 * Generate the jump from guest to patch code
2625 *
2626 * @returns VBox status code.
2627 * @param pVM Pointer to the VM.
2628 * @param pPatch Patch record
2629 * @param pCacheRec Guest translation lookup cache record
2630 */
2631static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2632{
2633 uint8_t temp[8];
2634 uint8_t *pPB;
2635 int rc;
2636
2637 Assert(pPatch->cbPatchJump <= sizeof(temp));
2638 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2639
2640 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2641 Assert(pPB);
2642
2643#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2644 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2645 {
2646 Assert(pPatch->pPatchJumpDestGC);
2647
2648 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2649 {
2650 // jmp [PatchCode]
2651 if (fAddFixup)
2652 {
2653 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2654 {
2655 Log(("Relocation failed for the jump in the guest code!!\n"));
2656 return VERR_PATCHING_REFUSED;
2657 }
2658 }
2659
2660 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2661 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2662 }
2663 else
2664 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2665 {
2666 // jmp [PatchCode]
2667 if (fAddFixup)
2668 {
2669 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2670 {
2671 Log(("Relocation failed for the jump in the guest code!!\n"));
2672 return VERR_PATCHING_REFUSED;
2673 }
2674 }
2675
2676 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2677 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2678 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2679 }
2680 else
2681 {
2682 Assert(0);
2683 return VERR_PATCHING_REFUSED;
2684 }
2685 }
2686 else
2687#endif
2688 {
2689 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2690
2691 // jmp [PatchCode]
2692 if (fAddFixup)
2693 {
2694 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2695 {
2696 Log(("Relocation failed for the jump in the guest code!!\n"));
2697 return VERR_PATCHING_REFUSED;
2698 }
2699 }
2700 temp[0] = 0xE9; //jmp
2701 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2702 }
2703 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2704 AssertRC(rc);
2705
2706 if (rc == VINF_SUCCESS)
2707 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2708
2709 return rc;
2710}
2711
2712/**
2713 * Remove the jump from guest to patch code
2714 *
2715 * @returns VBox status code.
2716 * @param pVM Pointer to the VM.
2717 * @param pPatch Patch record
2718 */
2719static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2720{
2721#ifdef DEBUG
2722 DISCPUSTATE cpu;
2723 char szOutput[256];
2724 uint32_t cbInstr, i = 0;
2725 bool disret;
2726
2727 while (i < pPatch->cbPrivInstr)
2728 {
2729 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2730 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2731 if (disret == false)
2732 break;
2733
2734 Log(("Org patch jump: %s", szOutput));
2735 Assert(cbInstr);
2736 i += cbInstr;
2737 }
2738#endif
2739
2740 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2741 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2742#ifdef DEBUG
2743 if (rc == VINF_SUCCESS)
2744 {
2745 i = 0;
2746 while (i < pPatch->cbPrivInstr)
2747 {
2748 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2749 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2750 if (disret == false)
2751 break;
2752
2753 Log(("Org instr: %s", szOutput));
2754 Assert(cbInstr);
2755 i += cbInstr;
2756 }
2757 }
2758#endif
2759 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2760 return rc;
2761}
2762
2763/**
2764 * Generate the call from guest to patch code
2765 *
2766 * @returns VBox status code.
2767 * @param pVM Pointer to the VM.
2768 * @param pPatch Patch record
2769 * @param pInstrHC HC address where to insert the jump
2770 * @param pCacheRec Guest translation cache record
2771 */
2772static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2773{
2774 uint8_t temp[8];
2775 uint8_t *pPB;
2776 int rc;
2777
2778 Assert(pPatch->cbPatchJump <= sizeof(temp));
2779
2780 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2781 Assert(pPB);
2782
2783 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2784
2785 // jmp [PatchCode]
2786 if (fAddFixup)
2787 {
2788 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2789 {
2790 Log(("Relocation failed for the jump in the guest code!!\n"));
2791 return VERR_PATCHING_REFUSED;
2792 }
2793 }
2794
2795 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2796 temp[0] = pPatch->aPrivInstr[0];
2797 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2798
2799 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2800 AssertRC(rc);
2801
2802 return rc;
2803}
2804
2805
2806/**
2807 * Patch cli/sti pushf/popf instruction block at specified location
2808 *
2809 * @returns VBox status code.
2810 * @param pVM Pointer to the VM.
2811 * @param pInstrGC Guest context point to privileged instruction
2812 * @param pInstrHC Host context point to privileged instruction
2813 * @param uOpcode Instruction opcode
2814 * @param uOpSize Size of starting instruction
2815 * @param pPatchRec Patch record
2816 *
2817 * @note returns failure if patching is not allowed or possible
2818 *
2819 */
2820static int patmR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2821 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2822{
2823 PPATCHINFO pPatch = &pPatchRec->patch;
2824 int rc = VERR_PATCHING_REFUSED;
2825 uint32_t orgOffsetPatchMem = ~0;
2826 RTRCPTR pInstrStart;
2827 bool fInserted;
2828 NOREF(pInstrHC); NOREF(uOpSize);
2829
2830 /* Save original offset (in case of failures later on) */
2831 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2832 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2833
2834 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2835 switch (uOpcode)
2836 {
2837 case OP_MOV:
2838 break;
2839
2840 case OP_CLI:
2841 case OP_PUSHF:
2842 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2843 /* Note: special precautions are taken when disabling and enabling such patches. */
2844 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2845 break;
2846
2847 default:
2848 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2849 {
2850 AssertMsg(0, ("patmR3PatchBlock: Invalid opcode %x\n", uOpcode));
2851 return VERR_INVALID_PARAMETER;
2852 }
2853 }
2854
2855 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2856 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2857
2858 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2859 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2860 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2861 )
2862 {
2863 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2864 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2865 rc = VERR_PATCHING_REFUSED;
2866 goto failure;
2867 }
2868
2869 pPatch->nrPatch2GuestRecs = 0;
2870 pInstrStart = pInstrGC;
2871
2872#ifdef PATM_ENABLE_CALL
2873 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2874#endif
2875
2876 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2877 pPatch->uCurPatchOffset = 0;
2878
2879 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2880 {
2881 Assert(pPatch->flags & PATMFL_INTHANDLER);
2882
2883 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2884 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2885 if (RT_FAILURE(rc))
2886 goto failure;
2887 }
2888
2889 /***************************************************************************************************************************/
2890 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2891 /***************************************************************************************************************************/
2892#ifdef VBOX_WITH_STATISTICS
2893 if (!(pPatch->flags & PATMFL_SYSENTER))
2894 {
2895 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2896 if (RT_FAILURE(rc))
2897 goto failure;
2898 }
2899#endif
2900
2901 PATMP2GLOOKUPREC cacheRec;
2902 RT_ZERO(cacheRec);
2903 cacheRec.pPatch = pPatch;
2904
2905 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2906 /* Free leftover lock if any. */
2907 if (cacheRec.Lock.pvMap)
2908 {
2909 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2910 cacheRec.Lock.pvMap = NULL;
2911 }
2912 if (rc != VINF_SUCCESS)
2913 {
2914 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2915 goto failure;
2916 }
2917
2918 /* Calculated during analysis. */
2919 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2920 {
2921 /* Most likely cause: we encountered an illegal instruction very early on. */
2922 /** @todo could turn it into an int3 callable patch. */
2923 Log(("patmR3PatchBlock: patch block too small -> refuse\n"));
2924 rc = VERR_PATCHING_REFUSED;
2925 goto failure;
2926 }
2927
2928 /* size of patch block */
2929 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2930
2931
2932 /* Update free pointer in patch memory. */
2933 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2934 /* Round to next 8 byte boundary. */
2935 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2936
2937 /*
2938 * Insert into patch to guest lookup tree
2939 */
2940 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2941 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2942 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2943 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2944 if (!fInserted)
2945 {
2946 rc = VERR_PATCHING_REFUSED;
2947 goto failure;
2948 }
2949
2950 /* Note that patmr3SetBranchTargets can install additional patches!! */
2951 rc = patmr3SetBranchTargets(pVM, pPatch);
2952 if (rc != VINF_SUCCESS)
2953 {
2954 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2955 goto failure;
2956 }
2957
2958#ifdef LOG_ENABLED
2959 Log(("Patch code ----------------------------------------------------------\n"));
2960 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2961 /* Free leftover lock if any. */
2962 if (cacheRec.Lock.pvMap)
2963 {
2964 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2965 cacheRec.Lock.pvMap = NULL;
2966 }
2967 Log(("Patch code ends -----------------------------------------------------\n"));
2968#endif
2969
2970 /* make a copy of the guest code bytes that will be overwritten */
2971 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2972
2973 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2974 AssertRC(rc);
2975
2976 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2977 {
2978 /*uint8_t bASMInt3 = 0xCC; - unused */
2979
2980 Log(("patmR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2981 /* Replace first opcode byte with 'int 3'. */
2982 rc = patmActivateInt3Patch(pVM, pPatch);
2983 if (RT_FAILURE(rc))
2984 goto failure;
2985
2986 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2987 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2988
2989 pPatch->flags &= ~PATMFL_INSTR_HINT;
2990 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2991 }
2992 else
2993 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2994 {
2995 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2996 /* now insert a jump in the guest code */
2997 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2998 AssertRC(rc);
2999 if (RT_FAILURE(rc))
3000 goto failure;
3001
3002 }
3003
3004 patmR3DbgAddPatch(pVM, pPatchRec);
3005
3006 PATM_LOG_RAW_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
3007
3008 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3009 pPatch->pTempInfo->nrIllegalInstr = 0;
3010
3011 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3012
3013 pPatch->uState = PATCH_ENABLED;
3014 return VINF_SUCCESS;
3015
3016failure:
3017 if (pPatchRec->CoreOffset.Key)
3018 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3019
3020 patmEmptyTree(pVM, &pPatch->FixupTree);
3021 pPatch->nrFixups = 0;
3022
3023 patmEmptyTree(pVM, &pPatch->JumpTree);
3024 pPatch->nrJumpRecs = 0;
3025
3026 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3027 pPatch->pTempInfo->nrIllegalInstr = 0;
3028
3029 /* Turn this cli patch into a dummy. */
3030 pPatch->uState = PATCH_REFUSED;
3031 pPatch->pPatchBlockOffset = 0;
3032
3033 // Give back the patch memory we no longer need
3034 Assert(orgOffsetPatchMem != (uint32_t)~0);
3035 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3036
3037 return rc;
3038}
3039
3040/**
3041 * Patch IDT handler
3042 *
3043 * @returns VBox status code.
3044 * @param pVM Pointer to the VM.
3045 * @param pInstrGC Guest context point to privileged instruction
3046 * @param uOpSize Size of starting instruction
3047 * @param pPatchRec Patch record
3048 * @param pCacheRec Cache record ptr
3049 *
3050 * @note returns failure if patching is not allowed or possible
3051 *
3052 */
3053static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3054{
3055 PPATCHINFO pPatch = &pPatchRec->patch;
3056 bool disret;
3057 DISCPUSTATE cpuPush, cpuJmp;
3058 uint32_t cbInstr;
3059 RTRCPTR pCurInstrGC = pInstrGC;
3060 uint8_t *pCurInstrHC, *pInstrHC;
3061 uint32_t orgOffsetPatchMem = ~0;
3062
3063 pInstrHC = pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
3064 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
3065
3066 /*
3067 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
3068 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
3069 * condition here and only patch the common entypoint once.
3070 */
3071 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &cbInstr);
3072 Assert(disret);
3073 if (disret && cpuPush.pCurInstr->uOpcode == OP_PUSH)
3074 {
3075 RTRCPTR pJmpInstrGC;
3076 int rc;
3077 pCurInstrGC += cbInstr;
3078
3079 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuJmp, &cbInstr);
3080 if ( disret
3081 && cpuJmp.pCurInstr->uOpcode == OP_JMP
3082 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
3083 )
3084 {
3085 bool fInserted;
3086 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3087 if (pJmpPatch == 0)
3088 {
3089 /* Patch it first! */
3090 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
3091 if (rc != VINF_SUCCESS)
3092 goto failure;
3093 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3094 Assert(pJmpPatch);
3095 }
3096 if (pJmpPatch->patch.uState != PATCH_ENABLED)
3097 goto failure;
3098
3099 /* save original offset (in case of failures later on) */
3100 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3101
3102 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3103 pPatch->uCurPatchOffset = 0;
3104 pPatch->nrPatch2GuestRecs = 0;
3105
3106#ifdef VBOX_WITH_STATISTICS
3107 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3108 if (RT_FAILURE(rc))
3109 goto failure;
3110#endif
3111
3112 /* Install fake cli patch (to clear the virtual IF) */
3113 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
3114 if (RT_FAILURE(rc))
3115 goto failure;
3116
3117 /* Add lookup record for patch to guest address translation (for the push) */
3118 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
3119
3120 /* Duplicate push. */
3121 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
3122 if (RT_FAILURE(rc))
3123 goto failure;
3124
3125 /* Generate jump to common entrypoint. */
3126 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
3127 if (RT_FAILURE(rc))
3128 goto failure;
3129
3130 /* size of patch block */
3131 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3132
3133 /* Update free pointer in patch memory. */
3134 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3135 /* Round to next 8 byte boundary */
3136 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3137
3138 /* There's no jump from guest to patch code. */
3139 pPatch->cbPatchJump = 0;
3140
3141
3142#ifdef LOG_ENABLED
3143 Log(("Patch code ----------------------------------------------------------\n"));
3144 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3145 Log(("Patch code ends -----------------------------------------------------\n"));
3146#endif
3147 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3148
3149 /*
3150 * Insert into patch to guest lookup tree
3151 */
3152 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3153 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3154 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3155 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3156 patmR3DbgAddPatch(pVM, pPatchRec);
3157
3158 pPatch->uState = PATCH_ENABLED;
3159
3160 return VINF_SUCCESS;
3161 }
3162 }
3163failure:
3164 /* Give back the patch memory we no longer need */
3165 if (orgOffsetPatchMem != (uint32_t)~0)
3166 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3167
3168 return patmR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3169}
3170
3171/**
3172 * Install a trampoline to call a guest trap handler directly
3173 *
3174 * @returns VBox status code.
3175 * @param pVM Pointer to the VM.
3176 * @param pInstrGC Guest context point to privileged instruction
3177 * @param pPatchRec Patch record
3178 * @param pCacheRec Cache record ptr
3179 *
3180 */
3181static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3182{
3183 PPATCHINFO pPatch = &pPatchRec->patch;
3184 int rc = VERR_PATCHING_REFUSED;
3185 uint32_t orgOffsetPatchMem = ~0;
3186 bool fInserted;
3187
3188 // save original offset (in case of failures later on)
3189 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3190
3191 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3192 pPatch->uCurPatchOffset = 0;
3193 pPatch->nrPatch2GuestRecs = 0;
3194
3195#ifdef VBOX_WITH_STATISTICS
3196 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3197 if (RT_FAILURE(rc))
3198 goto failure;
3199#endif
3200
3201 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3202 if (RT_FAILURE(rc))
3203 goto failure;
3204
3205 /* size of patch block */
3206 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3207
3208 /* Update free pointer in patch memory. */
3209 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3210 /* Round to next 8 byte boundary */
3211 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3212
3213 /* There's no jump from guest to patch code. */
3214 pPatch->cbPatchJump = 0;
3215
3216#ifdef LOG_ENABLED
3217 Log(("Patch code ----------------------------------------------------------\n"));
3218 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3219 Log(("Patch code ends -----------------------------------------------------\n"));
3220#endif
3221 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "TRAP handler");
3222 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3223
3224 /*
3225 * Insert into patch to guest lookup tree
3226 */
3227 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3228 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3229 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3230 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3231 patmR3DbgAddPatch(pVM, pPatchRec);
3232
3233 pPatch->uState = PATCH_ENABLED;
3234 return VINF_SUCCESS;
3235
3236failure:
3237 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3238
3239 /* Turn this cli patch into a dummy. */
3240 pPatch->uState = PATCH_REFUSED;
3241 pPatch->pPatchBlockOffset = 0;
3242
3243 /* Give back the patch memory we no longer need */
3244 Assert(orgOffsetPatchMem != (uint32_t)~0);
3245 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3246
3247 return rc;
3248}
3249
3250
3251#ifdef LOG_ENABLED
3252/**
3253 * Check if the instruction is patched as a common idt handler
3254 *
3255 * @returns true or false
3256 * @param pVM Pointer to the VM.
3257 * @param pInstrGC Guest context point to the instruction
3258 *
3259 */
3260static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3261{
3262 PPATMPATCHREC pRec;
3263
3264 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3265 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3266 return true;
3267 return false;
3268}
3269#endif //DEBUG
3270
3271
3272/**
3273 * Duplicates a complete function
3274 *
3275 * @returns VBox status code.
3276 * @param pVM Pointer to the VM.
3277 * @param pInstrGC Guest context point to privileged instruction
3278 * @param pPatchRec Patch record
3279 * @param pCacheRec Cache record ptr
3280 *
3281 */
3282static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3283{
3284 PPATCHINFO pPatch = &pPatchRec->patch;
3285 int rc = VERR_PATCHING_REFUSED;
3286 uint32_t orgOffsetPatchMem = ~0;
3287 bool fInserted;
3288
3289 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3290 /* Save original offset (in case of failures later on). */
3291 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3292
3293 /* We will not go on indefinitely with call instruction handling. */
3294 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3295 {
3296 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3297 return VERR_PATCHING_REFUSED;
3298 }
3299
3300 pVM->patm.s.ulCallDepth++;
3301
3302#ifdef PATM_ENABLE_CALL
3303 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3304#endif
3305
3306 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3307
3308 pPatch->nrPatch2GuestRecs = 0;
3309 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3310 pPatch->uCurPatchOffset = 0;
3311
3312 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3313 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3314 if (RT_FAILURE(rc))
3315 goto failure;
3316
3317#ifdef VBOX_WITH_STATISTICS
3318 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3319 if (RT_FAILURE(rc))
3320 goto failure;
3321#endif
3322
3323 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3324 if (rc != VINF_SUCCESS)
3325 {
3326 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3327 goto failure;
3328 }
3329
3330 //size of patch block
3331 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3332
3333 //update free pointer in patch memory
3334 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3335 /* Round to next 8 byte boundary. */
3336 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3337
3338 pPatch->uState = PATCH_ENABLED;
3339
3340 /*
3341 * Insert into patch to guest lookup tree
3342 */
3343 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3344 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3345 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3346 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3347 if (!fInserted)
3348 {
3349 rc = VERR_PATCHING_REFUSED;
3350 goto failure;
3351 }
3352
3353 /* Note that patmr3SetBranchTargets can install additional patches!! */
3354 rc = patmr3SetBranchTargets(pVM, pPatch);
3355 if (rc != VINF_SUCCESS)
3356 {
3357 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3358 goto failure;
3359 }
3360
3361 patmR3DbgAddPatch(pVM, pPatchRec);
3362
3363#ifdef LOG_ENABLED
3364 Log(("Patch code ----------------------------------------------------------\n"));
3365 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3366 Log(("Patch code ends -----------------------------------------------------\n"));
3367#endif
3368
3369 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3370
3371 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3372 pPatch->pTempInfo->nrIllegalInstr = 0;
3373
3374 pVM->patm.s.ulCallDepth--;
3375 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3376 return VINF_SUCCESS;
3377
3378failure:
3379 if (pPatchRec->CoreOffset.Key)
3380 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3381
3382 patmEmptyTree(pVM, &pPatch->FixupTree);
3383 pPatch->nrFixups = 0;
3384
3385 patmEmptyTree(pVM, &pPatch->JumpTree);
3386 pPatch->nrJumpRecs = 0;
3387
3388 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3389 pPatch->pTempInfo->nrIllegalInstr = 0;
3390
3391 /* Turn this cli patch into a dummy. */
3392 pPatch->uState = PATCH_REFUSED;
3393 pPatch->pPatchBlockOffset = 0;
3394
3395 // Give back the patch memory we no longer need
3396 Assert(orgOffsetPatchMem != (uint32_t)~0);
3397 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3398
3399 pVM->patm.s.ulCallDepth--;
3400 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3401 return rc;
3402}
3403
3404/**
3405 * Creates trampoline code to jump inside an existing patch
3406 *
3407 * @returns VBox status code.
3408 * @param pVM Pointer to the VM.
3409 * @param pInstrGC Guest context point to privileged instruction
3410 * @param pPatchRec Patch record
3411 *
3412 */
3413static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3414{
3415 PPATCHINFO pPatch = &pPatchRec->patch;
3416 RTRCPTR pPage, pPatchTargetGC = 0;
3417 uint32_t orgOffsetPatchMem = ~0;
3418 int rc = VERR_PATCHING_REFUSED;
3419 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3420 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3421 bool fInserted = false;
3422
3423 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3424 /* Save original offset (in case of failures later on). */
3425 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3426
3427 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3428 /** @todo we already checked this before */
3429 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3430
3431 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3432 if (pPatchPage)
3433 {
3434 uint32_t i;
3435
3436 for (i=0;i<pPatchPage->cCount;i++)
3437 {
3438 if (pPatchPage->papPatch[i])
3439 {
3440 pPatchToJmp = pPatchPage->papPatch[i];
3441
3442 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3443 && pPatchToJmp->uState == PATCH_ENABLED)
3444 {
3445 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3446 if (pPatchTargetGC)
3447 {
3448 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3449 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3450 Assert(pPatchToGuestRec);
3451
3452 pPatchToGuestRec->fJumpTarget = true;
3453 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3454 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3455 break;
3456 }
3457 }
3458 }
3459 }
3460 }
3461 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3462
3463 /*
3464 * Only record the trampoline patch if this is the first patch to the target
3465 * or we recorded other patches already.
3466 * The goal is to refuse refreshing function duplicates if the guest
3467 * modifies code after a saved state was loaded because it is not possible
3468 * to save the relation between trampoline and target without changing the
3469 * saved satte version.
3470 */
3471 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3472 || pPatchToJmp->pTrampolinePatchesHead)
3473 {
3474 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3475 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3476 if (!pTrampRec)
3477 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3478
3479 pTrampRec->pPatchTrampoline = pPatchRec;
3480 }
3481
3482 pPatch->nrPatch2GuestRecs = 0;
3483 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3484 pPatch->uCurPatchOffset = 0;
3485
3486 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3487 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3488 if (RT_FAILURE(rc))
3489 goto failure;
3490
3491#ifdef VBOX_WITH_STATISTICS
3492 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3493 if (RT_FAILURE(rc))
3494 goto failure;
3495#endif
3496
3497 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3498 if (RT_FAILURE(rc))
3499 goto failure;
3500
3501 /*
3502 * Insert into patch to guest lookup tree
3503 */
3504 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3505 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3506 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3507 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3508 if (!fInserted)
3509 {
3510 rc = VERR_PATCHING_REFUSED;
3511 goto failure;
3512 }
3513 patmR3DbgAddPatch(pVM, pPatchRec);
3514
3515 /* size of patch block */
3516 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3517
3518 /* Update free pointer in patch memory. */
3519 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3520 /* Round to next 8 byte boundary */
3521 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3522
3523 /* There's no jump from guest to patch code. */
3524 pPatch->cbPatchJump = 0;
3525
3526 /* Enable the patch. */
3527 pPatch->uState = PATCH_ENABLED;
3528 /* We allow this patch to be called as a function. */
3529 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3530
3531 if (pTrampRec)
3532 {
3533 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3534 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3535 }
3536 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3537 return VINF_SUCCESS;
3538
3539failure:
3540 if (pPatchRec->CoreOffset.Key)
3541 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3542
3543 patmEmptyTree(pVM, &pPatch->FixupTree);
3544 pPatch->nrFixups = 0;
3545
3546 patmEmptyTree(pVM, &pPatch->JumpTree);
3547 pPatch->nrJumpRecs = 0;
3548
3549 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3550 pPatch->pTempInfo->nrIllegalInstr = 0;
3551
3552 /* Turn this cli patch into a dummy. */
3553 pPatch->uState = PATCH_REFUSED;
3554 pPatch->pPatchBlockOffset = 0;
3555
3556 // Give back the patch memory we no longer need
3557 Assert(orgOffsetPatchMem != (uint32_t)~0);
3558 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3559
3560 if (pTrampRec)
3561 MMR3HeapFree(pTrampRec);
3562
3563 return rc;
3564}
3565
3566
3567/**
3568 * Patch branch target function for call/jump at specified location.
3569 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3570 *
3571 * @returns VBox status code.
3572 * @param pVM Pointer to the VM.
3573 * @param pCtx Pointer to the guest CPU context.
3574 *
3575 */
3576VMMR3_INT_DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3577{
3578 RTRCPTR pBranchTarget, pPage;
3579 int rc;
3580 RTRCPTR pPatchTargetGC = 0;
3581 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
3582
3583 pBranchTarget = pCtx->edx;
3584 pBranchTarget = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3585
3586 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3587 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3588
3589 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3590 if (pPatchPage)
3591 {
3592 uint32_t i;
3593
3594 for (i=0;i<pPatchPage->cCount;i++)
3595 {
3596 if (pPatchPage->papPatch[i])
3597 {
3598 PPATCHINFO pPatch = pPatchPage->papPatch[i];
3599
3600 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3601 && pPatch->uState == PATCH_ENABLED)
3602 {
3603 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3604 if (pPatchTargetGC)
3605 {
3606 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3607 break;
3608 }
3609 }
3610 }
3611 }
3612 }
3613
3614 if (pPatchTargetGC)
3615 {
3616 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3617 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3618 }
3619 else
3620 {
3621 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3622 }
3623
3624 if (rc == VINF_SUCCESS)
3625 {
3626 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3627 Assert(pPatchTargetGC);
3628 }
3629
3630 if (pPatchTargetGC)
3631 {
3632 pCtx->eax = pPatchTargetGC;
3633 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3634 }
3635 else
3636 {
3637 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3638 pCtx->eax = 0;
3639 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3640 }
3641 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3642 rc = patmAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3643 AssertRC(rc);
3644
3645 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3646 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3647 return VINF_SUCCESS;
3648}
3649
3650/**
3651 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3652 *
3653 * @returns VBox status code.
3654 * @param pVM Pointer to the VM.
3655 * @param pCpu Disassembly CPU structure ptr
3656 * @param pInstrGC Guest context point to privileged instruction
3657 * @param pCacheRec Cache record ptr
3658 *
3659 */
3660static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3661{
3662 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3663 int rc = VERR_PATCHING_REFUSED;
3664 DISCPUSTATE cpu;
3665 RTRCPTR pTargetGC;
3666 PPATMPATCHREC pPatchFunction;
3667 uint32_t cbInstr;
3668 bool disret;
3669
3670 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3671 Assert((pCpu->pCurInstr->uOpcode == OP_CALL || pCpu->pCurInstr->uOpcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32);
3672
3673 if ((pCpu->pCurInstr->uOpcode != OP_CALL && pCpu->pCurInstr->uOpcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32)
3674 {
3675 rc = VERR_PATCHING_REFUSED;
3676 goto failure;
3677 }
3678
3679 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3680 if (pTargetGC == 0)
3681 {
3682 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
3683 rc = VERR_PATCHING_REFUSED;
3684 goto failure;
3685 }
3686
3687 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3688 if (pPatchFunction == NULL)
3689 {
3690 for(;;)
3691 {
3692 /* It could be an indirect call (call -> jmp dest).
3693 * Note that it's dangerous to assume the jump will never change...
3694 */
3695 uint8_t *pTmpInstrHC;
3696
3697 pTmpInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3698 Assert(pTmpInstrHC);
3699 if (pTmpInstrHC == 0)
3700 break;
3701
3702 disret = patmR3DisInstr(pVM, pPatch, pTargetGC, pTmpInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
3703 if (disret == false || cpu.pCurInstr->uOpcode != OP_JMP)
3704 break;
3705
3706 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3707 if (pTargetGC == 0)
3708 {
3709 break;
3710 }
3711
3712 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3713 break;
3714 }
3715 if (pPatchFunction == 0)
3716 {
3717 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3718 rc = VERR_PATCHING_REFUSED;
3719 goto failure;
3720 }
3721 }
3722
3723 // make a copy of the guest code bytes that will be overwritten
3724 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3725
3726 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3727 AssertRC(rc);
3728
3729 /* Now replace the original call in the guest code */
3730 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3731 AssertRC(rc);
3732 if (RT_FAILURE(rc))
3733 goto failure;
3734
3735 /* Lowest and highest address for write monitoring. */
3736 pPatch->pInstrGCLowest = pInstrGC;
3737 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3738 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "Call");
3739
3740 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3741
3742 pPatch->uState = PATCH_ENABLED;
3743 return VINF_SUCCESS;
3744
3745failure:
3746 /* Turn this patch into a dummy. */
3747 pPatch->uState = PATCH_REFUSED;
3748
3749 return rc;
3750}
3751
3752/**
3753 * Replace the address in an MMIO instruction with the cached version.
3754 *
3755 * @returns VBox status code.
3756 * @param pVM Pointer to the VM.
3757 * @param pInstrGC Guest context point to privileged instruction
3758 * @param pCpu Disassembly CPU structure ptr
3759 * @param pCacheRec Cache record ptr
3760 *
3761 * @note returns failure if patching is not allowed or possible
3762 *
3763 */
3764static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3765{
3766 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3767 uint8_t *pPB;
3768 int rc = VERR_PATCHING_REFUSED;
3769
3770 Assert(pVM->patm.s.mmio.pCachedData);
3771 if (!pVM->patm.s.mmio.pCachedData)
3772 goto failure;
3773
3774 if (pCpu->Param2.fUse != DISUSE_DISPLACEMENT32)
3775 goto failure;
3776
3777 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3778 if (pPB == 0)
3779 goto failure;
3780
3781 /* Add relocation record for cached data access. */
3782 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC,
3783 pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3784 {
3785 Log(("Relocation failed for cached mmio address!!\n"));
3786 return VERR_PATCHING_REFUSED;
3787 }
3788 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "MMIO patch old instruction:", "");
3789
3790 /* Save original instruction. */
3791 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3792 AssertRC(rc);
3793
3794 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3795
3796 /* Replace address with that of the cached item. */
3797 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR),
3798 &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3799 AssertRC(rc);
3800 if (RT_FAILURE(rc))
3801 {
3802 goto failure;
3803 }
3804
3805 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3806 pVM->patm.s.mmio.pCachedData = 0;
3807 pVM->patm.s.mmio.GCPhys = 0;
3808 pPatch->uState = PATCH_ENABLED;
3809 return VINF_SUCCESS;
3810
3811failure:
3812 /* Turn this patch into a dummy. */
3813 pPatch->uState = PATCH_REFUSED;
3814
3815 return rc;
3816}
3817
3818
3819/**
3820 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3821 *
3822 * @returns VBox status code.
3823 * @param pVM Pointer to the VM.
3824 * @param pInstrGC Guest context point to privileged instruction
3825 * @param pPatch Patch record
3826 *
3827 * @note returns failure if patching is not allowed or possible
3828 *
3829 */
3830static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3831{
3832 DISCPUSTATE cpu;
3833 uint32_t cbInstr;
3834 bool disret;
3835 uint8_t *pInstrHC;
3836
3837 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3838
3839 /* Convert GC to HC address. */
3840 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3841 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3842
3843 /* Disassemble mmio instruction. */
3844 disret = patmR3DisInstrNoStrOpMode(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE,
3845 &cpu, &cbInstr);
3846 if (disret == false)
3847 {
3848 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3849 return VERR_PATCHING_REFUSED;
3850 }
3851
3852 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
3853 if (cbInstr > MAX_INSTR_SIZE)
3854 return VERR_PATCHING_REFUSED;
3855 if (cpu.Param2.fUse != DISUSE_DISPLACEMENT32)
3856 return VERR_PATCHING_REFUSED;
3857
3858 /* Add relocation record for cached data access. */
3859 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3860 {
3861 Log(("Relocation failed for cached mmio address!!\n"));
3862 return VERR_PATCHING_REFUSED;
3863 }
3864 /* Replace address with that of the cached item. */
3865 *(RTRCPTR *)&pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3866
3867 /* Lowest and highest address for write monitoring. */
3868 pPatch->pInstrGCLowest = pInstrGC;
3869 pPatch->pInstrGCHighest = pInstrGC + cpu.cbInstr;
3870
3871 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3872 pVM->patm.s.mmio.pCachedData = 0;
3873 pVM->patm.s.mmio.GCPhys = 0;
3874 return VINF_SUCCESS;
3875}
3876
3877/**
3878 * Activates an int3 patch
3879 *
3880 * @returns VBox status code.
3881 * @param pVM Pointer to the VM.
3882 * @param pPatch Patch record
3883 */
3884static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3885{
3886 uint8_t bASMInt3 = 0xCC;
3887 int rc;
3888
3889 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3890 Assert(pPatch->uState != PATCH_ENABLED);
3891
3892 /* Replace first opcode byte with 'int 3'. */
3893 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
3894 AssertRC(rc);
3895
3896 pPatch->cbPatchJump = sizeof(bASMInt3);
3897
3898 return rc;
3899}
3900
3901/**
3902 * Deactivates an int3 patch
3903 *
3904 * @returns VBox status code.
3905 * @param pVM Pointer to the VM.
3906 * @param pPatch Patch record
3907 */
3908static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3909{
3910 uint8_t ASMInt3 = 0xCC;
3911 int rc;
3912
3913 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3914 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3915
3916 /* Restore first opcode byte. */
3917 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3918 AssertRC(rc);
3919 return rc;
3920}
3921
3922/**
3923 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically
3924 * in the raw-mode context.
3925 *
3926 * @returns VBox status code.
3927 * @param pVM Pointer to the VM.
3928 * @param pInstrGC Guest context point to privileged instruction
3929 * @param pInstrHC Host context point to privileged instruction
3930 * @param pCpu Disassembly CPU structure ptr
3931 * @param pPatch Patch record
3932 *
3933 * @note returns failure if patching is not allowed or possible
3934 *
3935 */
3936int patmR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3937{
3938 uint8_t bASMInt3 = 0xCC;
3939 int rc;
3940
3941 /* Note: Do not use patch memory here! It might called during patch installation too. */
3942 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "patmR3PatchInstrInt3:", "");
3943
3944 /* Save the original instruction. */
3945 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3946 AssertRC(rc);
3947 pPatch->cbPatchJump = sizeof(bASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3948
3949 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3950
3951 /* Replace first opcode byte with 'int 3'. */
3952 rc = patmActivateInt3Patch(pVM, pPatch);
3953 if (RT_FAILURE(rc))
3954 goto failure;
3955
3956 /* Lowest and highest address for write monitoring. */
3957 pPatch->pInstrGCLowest = pInstrGC;
3958 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3959
3960 pPatch->uState = PATCH_ENABLED;
3961 return VINF_SUCCESS;
3962
3963failure:
3964 /* Turn this patch into a dummy. */
3965 return VERR_PATCHING_REFUSED;
3966}
3967
3968#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3969/**
3970 * Patch a jump instruction at specified location
3971 *
3972 * @returns VBox status code.
3973 * @param pVM Pointer to the VM.
3974 * @param pInstrGC Guest context point to privileged instruction
3975 * @param pInstrHC Host context point to privileged instruction
3976 * @param pCpu Disassembly CPU structure ptr
3977 * @param pPatchRec Patch record
3978 *
3979 * @note returns failure if patching is not allowed or possible
3980 *
3981 */
3982int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3983{
3984 PPATCHINFO pPatch = &pPatchRec->patch;
3985 int rc = VERR_PATCHING_REFUSED;
3986
3987 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3988 pPatch->uCurPatchOffset = 0;
3989 pPatch->cbPatchBlockSize = 0;
3990 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3991
3992 /*
3993 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3994 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3995 */
3996 switch (pCpu->pCurInstr->uOpcode)
3997 {
3998 case OP_JO:
3999 case OP_JNO:
4000 case OP_JC:
4001 case OP_JNC:
4002 case OP_JE:
4003 case OP_JNE:
4004 case OP_JBE:
4005 case OP_JNBE:
4006 case OP_JS:
4007 case OP_JNS:
4008 case OP_JP:
4009 case OP_JNP:
4010 case OP_JL:
4011 case OP_JNL:
4012 case OP_JLE:
4013 case OP_JNLE:
4014 case OP_JMP:
4015 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
4016 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL);
4017 if (!(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL))
4018 goto failure;
4019
4020 Assert(pCpu->cbInstr == SIZEOF_NEARJUMP32 || pCpu->cbInstr == SIZEOF_NEAR_COND_JUMP32);
4021 if (pCpu->cbInstr != SIZEOF_NEARJUMP32 && pCpu->cbInstr != SIZEOF_NEAR_COND_JUMP32)
4022 goto failure;
4023
4024 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->cbInstr))
4025 {
4026 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
4027 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
4028 rc = VERR_PATCHING_REFUSED;
4029 goto failure;
4030 }
4031
4032 break;
4033
4034 default:
4035 goto failure;
4036 }
4037
4038 // make a copy of the guest code bytes that will be overwritten
4039 Assert(pCpu->cbInstr <= sizeof(pPatch->aPrivInstr));
4040 Assert(pCpu->cbInstr >= SIZEOF_NEARJUMP32);
4041 pPatch->cbPatchJump = pCpu->cbInstr;
4042
4043 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
4044 AssertRC(rc);
4045
4046 /* Now insert a jump in the guest code. */
4047 /*
4048 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
4049 * references the target instruction in the conflict patch.
4050 */
4051 RTRCPTR pJmpDest = patmR3GuestGCPtrToPatchGCPtrSimple(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue);
4052
4053 AssertMsg(pJmpDest, ("patmR3GuestGCPtrToPatchGCPtrSimple failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue));
4054 pPatch->pPatchJumpDestGC = pJmpDest;
4055
4056 PATMP2GLOOKUPREC cacheRec;
4057 RT_ZERO(cacheRec);
4058 cacheRec.pPatch = pPatch;
4059
4060 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
4061 /* Free leftover lock if any. */
4062 if (cacheRec.Lock.pvMap)
4063 {
4064 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4065 cacheRec.Lock.pvMap = NULL;
4066 }
4067 AssertRC(rc);
4068 if (RT_FAILURE(rc))
4069 goto failure;
4070
4071 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
4072
4073 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
4074 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
4075
4076 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
4077
4078 /* Lowest and highest address for write monitoring. */
4079 pPatch->pInstrGCLowest = pInstrGC;
4080 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
4081
4082 pPatch->uState = PATCH_ENABLED;
4083 return VINF_SUCCESS;
4084
4085failure:
4086 /* Turn this cli patch into a dummy. */
4087 pPatch->uState = PATCH_REFUSED;
4088
4089 return rc;
4090}
4091#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
4092
4093
4094/**
4095 * Gives hint to PATM about supervisor guest instructions
4096 *
4097 * @returns VBox status code.
4098 * @param pVM Pointer to the VM.
4099 * @param pInstr Guest context point to privileged instruction
4100 * @param flags Patch flags
4101 */
4102VMMR3_INT_DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4103{
4104 Assert(pInstrGC);
4105 Assert(flags == PATMFL_CODE32);
4106
4107 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4108 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4109}
4110
4111/**
4112 * Patch privileged instruction at specified location
4113 *
4114 * @returns VBox status code.
4115 * @param pVM Pointer to the VM.
4116 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4117 * @param flags Patch flags
4118 *
4119 * @note returns failure if patching is not allowed or possible
4120 */
4121VMMR3_INT_DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4122{
4123 DISCPUSTATE cpu;
4124 R3PTRTYPE(uint8_t *) pInstrHC;
4125 uint32_t cbInstr;
4126 PPATMPATCHREC pPatchRec;
4127 PCPUMCTX pCtx = 0;
4128 bool disret;
4129 int rc;
4130 PVMCPU pVCpu = VMMGetCpu0(pVM);
4131 LogFlow(("PATMR3InstallPatch: %08x (%#llx)\n", pInstrGC, flags));
4132
4133 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4134
4135 if ( !pVM
4136 || pInstrGC == 0
4137 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4138 {
4139 AssertFailed();
4140 return VERR_INVALID_PARAMETER;
4141 }
4142
4143 if (PATMIsEnabled(pVM) == false)
4144 return VERR_PATCHING_REFUSED;
4145
4146 /* Test for patch conflict only with patches that actually change guest code. */
4147 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4148 {
4149 PPATCHINFO pConflictPatch = patmFindActivePatchByEntrypoint(pVM, pInstrGC);
4150 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4151 if (pConflictPatch != 0)
4152 return VERR_PATCHING_REFUSED;
4153 }
4154
4155 if (!(flags & PATMFL_CODE32))
4156 {
4157 /** @todo Only 32 bits code right now */
4158 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4159 return VERR_NOT_IMPLEMENTED;
4160 }
4161
4162 /* We ran out of patch memory; don't bother anymore. */
4163 if (pVM->patm.s.fOutOfMemory == true)
4164 return VERR_PATCHING_REFUSED;
4165
4166#if 1 /* DONT COMMIT ENABLED! */
4167 /* Blacklisted NT4SP1 areas - debugging why we sometimes crash early on, */
4168 if ( 0
4169 //|| (pInstrGC - 0x80010000U) < 0x10000U // NT4SP1 HAL
4170 //|| (pInstrGC - 0x80010000U) < 0x5000U // NT4SP1 HAL
4171 //|| (pInstrGC - 0x80013000U) < 0x2000U // NT4SP1 HAL
4172 //|| (pInstrGC - 0x80014000U) < 0x1000U // NT4SP1 HAL
4173 //|| (pInstrGC - 0x80014000U) < 0x800U // NT4SP1 HAL
4174 //|| (pInstrGC - 0x80014400U) < 0x400U // NT4SP1 HAL
4175 //|| (pInstrGC - 0x80014400U) < 0x200U // NT4SP1 HAL
4176 //|| (pInstrGC - 0x80014400U) < 0x100U // NT4SP1 HAL
4177 //|| (pInstrGC - 0x80014500U) < 0x100U // NT4SP1 HAL - negative
4178 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4179 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4180 //|| (pInstrGC - 0x80014440U) < 0x40U // NT4SP1 HAL
4181 //|| (pInstrGC - 0x80014440U) < 0x20U // NT4SP1 HAL
4182 || pInstrGC == 0x80014447 /* KfLowerIrql */
4183 || 0)
4184 {
4185 Log(("PATMR3InstallPatch: %08x is blacklisted\n", pInstrGC));
4186 return VERR_PATCHING_REFUSED;
4187 }
4188#endif
4189
4190 /* Make sure the code selector is wide open; otherwise refuse. */
4191 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4192 if (CPUMGetGuestCPL(pVCpu) == 0)
4193 {
4194 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4195 if (pInstrGCFlat != pInstrGC)
4196 {
4197 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs.Sel, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4198 return VERR_PATCHING_REFUSED;
4199 }
4200 }
4201
4202 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4203 if (!(flags & PATMFL_GUEST_SPECIFIC))
4204 {
4205 /* New code. Make sure CSAM has a go at it first. */
4206 CSAMR3CheckCode(pVM, pInstrGC);
4207 }
4208
4209 /* Note: obsolete */
4210 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4211 && (flags & PATMFL_MMIO_ACCESS))
4212 {
4213 RTRCUINTPTR offset;
4214 void *pvPatchCoreOffset;
4215
4216 /* Find the patch record. */
4217 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4218 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4219 if (pvPatchCoreOffset == NULL)
4220 {
4221 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4222 return VERR_PATCH_NOT_FOUND; //fatal error
4223 }
4224 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4225
4226 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4227 }
4228
4229 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4230
4231 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4232 if (pPatchRec)
4233 {
4234 Assert(!(flags & PATMFL_TRAMPOLINE));
4235
4236 /* Hints about existing patches are ignored. */
4237 if (flags & PATMFL_INSTR_HINT)
4238 return VERR_PATCHING_REFUSED;
4239
4240 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4241 {
4242 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4243 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4244 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4245 }
4246
4247 if (pPatchRec->patch.uState == PATCH_DISABLED)
4248 {
4249 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4250 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4251 {
4252 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4253 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4254 }
4255 else
4256 Log(("Enabling patch %RRv again\n", pInstrGC));
4257
4258 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4259 rc = PATMR3EnablePatch(pVM, pInstrGC);
4260 if (RT_SUCCESS(rc))
4261 return VWRN_PATCH_ENABLED;
4262
4263 return rc;
4264 }
4265 if ( pPatchRec->patch.uState == PATCH_ENABLED
4266 || pPatchRec->patch.uState == PATCH_DIRTY)
4267 {
4268 /*
4269 * The patch might have been overwritten.
4270 */
4271 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4272 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4273 {
4274 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4275 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4276 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4277 {
4278 if (flags & PATMFL_IDTHANDLER)
4279 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4280
4281 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4282 }
4283 }
4284 rc = PATMR3RemovePatch(pVM, pInstrGC);
4285 if (RT_FAILURE(rc))
4286 return VERR_PATCHING_REFUSED;
4287 }
4288 else
4289 {
4290 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4291 /* already tried it once! */
4292 return VERR_PATCHING_REFUSED;
4293 }
4294 }
4295
4296 RTGCPHYS GCPhys;
4297 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4298 if (rc != VINF_SUCCESS)
4299 {
4300 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4301 return rc;
4302 }
4303 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4304 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4305 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4306 {
4307 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4308 return VERR_PATCHING_REFUSED;
4309 }
4310
4311 /* Initialize cache record for guest address translations. */
4312 bool fInserted;
4313 PATMP2GLOOKUPREC cacheRec;
4314 RT_ZERO(cacheRec);
4315
4316 pInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4317 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4318
4319 /* Allocate patch record. */
4320 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4321 if (RT_FAILURE(rc))
4322 {
4323 Log(("Out of memory!!!!\n"));
4324 return VERR_NO_MEMORY;
4325 }
4326 pPatchRec->Core.Key = pInstrGC;
4327 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4328 /* Insert patch record into the lookup tree. */
4329 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4330 Assert(fInserted);
4331
4332 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4333 pPatchRec->patch.flags = flags;
4334 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
4335 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4336
4337 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4338 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4339
4340 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4341 {
4342 /*
4343 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4344 */
4345 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4346 if (pPatchNear)
4347 {
4348 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4349 {
4350 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4351
4352 pPatchRec->patch.uState = PATCH_UNUSABLE;
4353 /*
4354 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4355 */
4356 return VERR_PATCHING_REFUSED;
4357 }
4358 }
4359 }
4360
4361 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4362 if (pPatchRec->patch.pTempInfo == 0)
4363 {
4364 Log(("Out of memory!!!!\n"));
4365 return VERR_NO_MEMORY;
4366 }
4367
4368 disret = patmR3DisInstrNoStrOpMode(pVM, &pPatchRec->patch, pInstrGC, NULL, PATMREAD_ORGCODE, &cpu, &cbInstr);
4369 if (disret == false)
4370 {
4371 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4372 return VERR_PATCHING_REFUSED;
4373 }
4374
4375 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
4376 if (cbInstr > MAX_INSTR_SIZE)
4377 return VERR_PATCHING_REFUSED;
4378
4379 pPatchRec->patch.cbPrivInstr = cbInstr;
4380 pPatchRec->patch.opcode = cpu.pCurInstr->uOpcode;
4381
4382 /* Restricted hinting for now. */
4383 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->uOpcode == OP_CLI);
4384
4385 /* Initialize cache record patch pointer. */
4386 cacheRec.pPatch = &pPatchRec->patch;
4387
4388 /* Allocate statistics slot */
4389 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4390 {
4391 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4392 }
4393 else
4394 {
4395 Log(("WARNING: Patch index wrap around!!\n"));
4396 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4397 }
4398
4399 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4400 {
4401 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4402 }
4403 else
4404 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4405 {
4406 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4407 }
4408 else
4409 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4410 {
4411 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4412 }
4413 else
4414 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4415 {
4416 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4417 }
4418 else
4419 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4420 {
4421 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4422 }
4423 else
4424 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4425 {
4426 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4427 }
4428 else
4429 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4430 {
4431 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4432 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4433
4434 rc = patmIdtHandler(pVM, pInstrGC, cbInstr, pPatchRec, &cacheRec);
4435#ifdef VBOX_WITH_STATISTICS
4436 if ( rc == VINF_SUCCESS
4437 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4438 {
4439 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4440 }
4441#endif
4442 }
4443 else
4444 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4445 {
4446 switch (cpu.pCurInstr->uOpcode)
4447 {
4448 case OP_SYSENTER:
4449 case OP_PUSH:
4450 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4451 if (rc == VINF_SUCCESS)
4452 {
4453 if (rc == VINF_SUCCESS)
4454 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4455 return rc;
4456 }
4457 break;
4458
4459 default:
4460 rc = VERR_NOT_IMPLEMENTED;
4461 break;
4462 }
4463 }
4464 else
4465 {
4466 switch (cpu.pCurInstr->uOpcode)
4467 {
4468 case OP_SYSENTER:
4469 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4470 if (rc == VINF_SUCCESS)
4471 {
4472 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4473 return VINF_SUCCESS;
4474 }
4475 break;
4476
4477#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4478 case OP_JO:
4479 case OP_JNO:
4480 case OP_JC:
4481 case OP_JNC:
4482 case OP_JE:
4483 case OP_JNE:
4484 case OP_JBE:
4485 case OP_JNBE:
4486 case OP_JS:
4487 case OP_JNS:
4488 case OP_JP:
4489 case OP_JNP:
4490 case OP_JL:
4491 case OP_JNL:
4492 case OP_JLE:
4493 case OP_JNLE:
4494 case OP_JECXZ:
4495 case OP_LOOP:
4496 case OP_LOOPNE:
4497 case OP_LOOPE:
4498 case OP_JMP:
4499 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4500 {
4501 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4502 break;
4503 }
4504 return VERR_NOT_IMPLEMENTED;
4505#endif
4506
4507 case OP_PUSHF:
4508 case OP_CLI:
4509 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4510 rc = patmR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec);
4511 break;
4512
4513#ifndef VBOX_WITH_SAFE_STR
4514 case OP_STR:
4515#endif
4516 case OP_SGDT:
4517 case OP_SLDT:
4518 case OP_SIDT:
4519 case OP_CPUID:
4520 case OP_LSL:
4521 case OP_LAR:
4522 case OP_SMSW:
4523 case OP_VERW:
4524 case OP_VERR:
4525 case OP_IRET:
4526#ifdef VBOX_WITH_RAW_RING1
4527 case OP_MOV:
4528#endif
4529 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4530 break;
4531
4532 default:
4533 return VERR_NOT_IMPLEMENTED;
4534 }
4535 }
4536
4537 if (rc != VINF_SUCCESS)
4538 {
4539 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4540 {
4541 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4542 pPatchRec->patch.nrPatch2GuestRecs = 0;
4543 }
4544 pVM->patm.s.uCurrentPatchIdx--;
4545 }
4546 else
4547 {
4548 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4549 AssertRCReturn(rc, rc);
4550
4551 /* Keep track upper and lower boundaries of patched instructions */
4552 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4553 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4554 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4555 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4556
4557 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4558 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4559
4560 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4561 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4562
4563 rc = VINF_SUCCESS;
4564
4565 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4566 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4567 {
4568 rc = PATMR3DisablePatch(pVM, pInstrGC);
4569 AssertRCReturn(rc, rc);
4570 }
4571
4572#ifdef VBOX_WITH_STATISTICS
4573 /* Register statistics counter */
4574 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4575 {
4576 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4577 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4578#ifndef DEBUG_sandervl
4579 /* Full breakdown for the GUI. */
4580 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4581 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4582 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4583 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4584 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4585 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4586 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4587 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4588 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4589 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4590 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4591 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4592 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4593 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4594 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4595 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4596#endif
4597 }
4598#endif
4599
4600 /* Add debug symbol. */
4601 patmR3DbgAddPatch(pVM, pPatchRec);
4602 }
4603 /* Free leftover lock if any. */
4604 if (cacheRec.Lock.pvMap)
4605 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4606 return rc;
4607}
4608
4609/**
4610 * Query instruction size
4611 *
4612 * @returns VBox status code.
4613 * @param pVM Pointer to the VM.
4614 * @param pPatch Patch record
4615 * @param pInstrGC Instruction address
4616 */
4617static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4618{
4619 uint8_t *pInstrHC;
4620 PGMPAGEMAPLOCK Lock;
4621
4622 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4623 if (rc == VINF_SUCCESS)
4624 {
4625 DISCPUSTATE cpu;
4626 bool disret;
4627 uint32_t cbInstr;
4628
4629 disret = patmR3DisInstr(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE | PATMREAD_NOCHECK, &cpu, &cbInstr);
4630 PGMPhysReleasePageMappingLock(pVM, &Lock);
4631 if (disret)
4632 return cbInstr;
4633 }
4634 return 0;
4635}
4636
4637/**
4638 * Add patch to page record
4639 *
4640 * @returns VBox status code.
4641 * @param pVM Pointer to the VM.
4642 * @param pPage Page address
4643 * @param pPatch Patch record
4644 */
4645int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4646{
4647 PPATMPATCHPAGE pPatchPage;
4648 int rc;
4649
4650 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4651
4652 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4653 if (pPatchPage)
4654 {
4655 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4656 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4657 {
4658 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4659 PPATCHINFO *papPatchOld = pPatchPage->papPatch;
4660
4661 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4662 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH,
4663 (void **)&pPatchPage->papPatch);
4664 if (RT_FAILURE(rc))
4665 {
4666 Log(("Out of memory!!!!\n"));
4667 return VERR_NO_MEMORY;
4668 }
4669 memcpy(pPatchPage->papPatch, papPatchOld, cMaxPatchesOld * sizeof(pPatchPage->papPatch[0]));
4670 MMHyperFree(pVM, papPatchOld);
4671 }
4672 pPatchPage->papPatch[pPatchPage->cCount] = pPatch;
4673 pPatchPage->cCount++;
4674 }
4675 else
4676 {
4677 bool fInserted;
4678
4679 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4680 if (RT_FAILURE(rc))
4681 {
4682 Log(("Out of memory!!!!\n"));
4683 return VERR_NO_MEMORY;
4684 }
4685 pPatchPage->Core.Key = pPage;
4686 pPatchPage->cCount = 1;
4687 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4688
4689 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH,
4690 (void **)&pPatchPage->papPatch);
4691 if (RT_FAILURE(rc))
4692 {
4693 Log(("Out of memory!!!!\n"));
4694 MMHyperFree(pVM, pPatchPage);
4695 return VERR_NO_MEMORY;
4696 }
4697 pPatchPage->papPatch[0] = pPatch;
4698
4699 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4700 Assert(fInserted);
4701 pVM->patm.s.cPageRecords++;
4702
4703 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4704 }
4705 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4706
4707 /* Get the closest guest instruction (from below) */
4708 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4709 Assert(pGuestToPatchRec);
4710 if (pGuestToPatchRec)
4711 {
4712 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4713 if ( pPatchPage->pLowestAddrGC == 0
4714 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4715 {
4716 RTRCUINTPTR offset;
4717
4718 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4719
4720 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4721 /* If we're too close to the page boundary, then make sure an
4722 instruction from the previous page doesn't cross the
4723 boundary itself. */
4724 if (offset && offset < MAX_INSTR_SIZE)
4725 {
4726 /* Get the closest guest instruction (from above) */
4727 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4728
4729 if (pGuestToPatchRec)
4730 {
4731 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4732 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4733 {
4734 pPatchPage->pLowestAddrGC = pPage;
4735 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4736 }
4737 }
4738 }
4739 }
4740 }
4741
4742 /* Get the closest guest instruction (from above) */
4743 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4744 Assert(pGuestToPatchRec);
4745 if (pGuestToPatchRec)
4746 {
4747 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4748 if ( pPatchPage->pHighestAddrGC == 0
4749 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4750 {
4751 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4752 /* Increase by instruction size. */
4753 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4754//// Assert(size);
4755 pPatchPage->pHighestAddrGC += size;
4756 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4757 }
4758 }
4759
4760 return VINF_SUCCESS;
4761}
4762
4763/**
4764 * Remove patch from page record
4765 *
4766 * @returns VBox status code.
4767 * @param pVM Pointer to the VM.
4768 * @param pPage Page address
4769 * @param pPatch Patch record
4770 */
4771int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4772{
4773 PPATMPATCHPAGE pPatchPage;
4774 int rc;
4775
4776 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4777 Assert(pPatchPage);
4778
4779 if (!pPatchPage)
4780 return VERR_INVALID_PARAMETER;
4781
4782 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4783
4784 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4785 if (pPatchPage->cCount > 1)
4786 {
4787 uint32_t i;
4788
4789 /* Used by multiple patches */
4790 for (i = 0; i < pPatchPage->cCount; i++)
4791 {
4792 if (pPatchPage->papPatch[i] == pPatch)
4793 {
4794 /* close the gap between the remaining pointers. */
4795 uint32_t cNew = --pPatchPage->cCount;
4796 if (i < cNew)
4797 pPatchPage->papPatch[i] = pPatchPage->papPatch[cNew];
4798 pPatchPage->papPatch[cNew] = NULL;
4799 return VINF_SUCCESS;
4800 }
4801 }
4802 AssertMsgFailed(("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4803 }
4804 else
4805 {
4806 PPATMPATCHPAGE pPatchNode;
4807
4808 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4809
4810 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4811 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4812 Assert(pPatchNode && pPatchNode == pPatchPage);
4813
4814 Assert(pPatchPage->papPatch);
4815 rc = MMHyperFree(pVM, pPatchPage->papPatch);
4816 AssertRC(rc);
4817 rc = MMHyperFree(pVM, pPatchPage);
4818 AssertRC(rc);
4819 pVM->patm.s.cPageRecords--;
4820 }
4821 return VINF_SUCCESS;
4822}
4823
4824/**
4825 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4826 *
4827 * @returns VBox status code.
4828 * @param pVM Pointer to the VM.
4829 * @param pPatch Patch record
4830 */
4831int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4832{
4833 int rc;
4834 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4835
4836 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4837 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4838 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4839
4840 /** @todo optimize better (large gaps between current and next used page) */
4841 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4842 {
4843 /* Get the closest guest instruction (from above) */
4844 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4845 if ( pGuestToPatchRec
4846 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4847 )
4848 {
4849 /* Code in page really patched -> add record */
4850 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4851 AssertRC(rc);
4852 }
4853 }
4854 pPatch->flags |= PATMFL_CODE_MONITORED;
4855 return VINF_SUCCESS;
4856}
4857
4858/**
4859 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4860 *
4861 * @returns VBox status code.
4862 * @param pVM Pointer to the VM.
4863 * @param pPatch Patch record
4864 */
4865static int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4866{
4867 int rc;
4868 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4869
4870 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4871 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4872 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4873
4874 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4875 {
4876 /* Get the closest guest instruction (from above) */
4877 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4878 if ( pGuestToPatchRec
4879 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4880 )
4881 {
4882 /* Code in page really patched -> remove record */
4883 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4884 AssertRC(rc);
4885 }
4886 }
4887 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4888 return VINF_SUCCESS;
4889}
4890
4891/**
4892 * Notifies PATM about a (potential) write to code that has been patched.
4893 *
4894 * @returns VBox status code.
4895 * @param pVM Pointer to the VM.
4896 * @param GCPtr GC pointer to write address
4897 * @param cbWrite Nr of bytes to write
4898 *
4899 */
4900VMMR3_INT_DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4901{
4902 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4903
4904 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4905
4906 Assert(VM_IS_EMT(pVM));
4907 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4908
4909 /* Quick boundary check */
4910 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4911 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4912 )
4913 return VINF_SUCCESS;
4914
4915 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4916
4917 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4918 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4919
4920 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4921 {
4922loop_start:
4923 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4924 if (pPatchPage)
4925 {
4926 uint32_t i;
4927 bool fValidPatchWrite = false;
4928
4929 /* Quick check to see if the write is in the patched part of the page */
4930 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4931 || pPatchPage->pHighestAddrGC < GCPtr)
4932 {
4933 break;
4934 }
4935
4936 for (i=0;i<pPatchPage->cCount;i++)
4937 {
4938 if (pPatchPage->papPatch[i])
4939 {
4940 PPATCHINFO pPatch = pPatchPage->papPatch[i];
4941 RTRCPTR pPatchInstrGC;
4942 //unused: bool fForceBreak = false;
4943
4944 Assert(pPatchPage->papPatch[i]->flags & PATMFL_CODE_MONITORED);
4945 /** @todo inefficient and includes redundant checks for multiple pages. */
4946 for (uint32_t j=0; j<cbWrite; j++)
4947 {
4948 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4949
4950 if ( pPatch->cbPatchJump
4951 && pGuestPtrGC >= pPatch->pPrivInstrGC
4952 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4953 {
4954 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4955 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4956 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4957 if (rc == VINF_SUCCESS)
4958 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4959 goto loop_start;
4960
4961 continue;
4962 }
4963
4964 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4965 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4966 if (!pPatchInstrGC)
4967 {
4968 RTRCPTR pClosestInstrGC;
4969 uint32_t size;
4970
4971 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4972 if (pPatchInstrGC)
4973 {
4974 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4975 Assert(pClosestInstrGC <= pGuestPtrGC);
4976 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4977 /* Check if this is not a write into a gap between two patches */
4978 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4979 pPatchInstrGC = 0;
4980 }
4981 }
4982 if (pPatchInstrGC)
4983 {
4984 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4985
4986 fValidPatchWrite = true;
4987
4988 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4989 Assert(pPatchToGuestRec);
4990 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4991 {
4992 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4993
4994 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4995 {
4996 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4997
4998 patmR3MarkDirtyPatch(pVM, pPatch);
4999
5000 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
5001 goto loop_start;
5002 }
5003 else
5004 {
5005 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
5006 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
5007
5008 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
5009 pPatchToGuestRec->fDirty = true;
5010
5011 *pInstrHC = 0xCC;
5012
5013 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
5014 }
5015 }
5016 /* else already marked dirty */
5017 }
5018 }
5019 }
5020 } /* for each patch */
5021
5022 if (fValidPatchWrite == false)
5023 {
5024 /* Write to a part of the page that either:
5025 * - doesn't contain any code (shared code/data); rather unlikely
5026 * - old code page that's no longer in active use.
5027 */
5028invalid_write_loop_start:
5029 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
5030
5031 if (pPatchPage)
5032 {
5033 for (i=0;i<pPatchPage->cCount;i++)
5034 {
5035 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5036
5037 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
5038 {
5039 /* Note: possibly dangerous assumption that all future writes will be harmless. */
5040 if (pPatch->flags & PATMFL_IDTHANDLER)
5041 {
5042 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5043
5044 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
5045 int rc = patmRemovePatchPages(pVM, pPatch);
5046 AssertRC(rc);
5047 }
5048 else
5049 {
5050 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5051 patmR3MarkDirtyPatch(pVM, pPatch);
5052 }
5053 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
5054 goto invalid_write_loop_start;
5055 }
5056 } /* for */
5057 }
5058 }
5059 }
5060 }
5061 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
5062 return VINF_SUCCESS;
5063
5064}
5065
5066/**
5067 * Disable all patches in a flushed page
5068 *
5069 * @returns VBox status code
5070 * @param pVM Pointer to the VM.
5071 * @param addr GC address of the page to flush
5072 * @note Currently only called by CSAMR3FlushPage; optimization to avoid
5073 * having to double check if the physical address has changed
5074 */
5075VMMR3_INT_DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
5076{
5077 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5078
5079 addr &= PAGE_BASE_GC_MASK;
5080
5081 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
5082 if (pPatchPage)
5083 {
5084 int i;
5085
5086 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
5087 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
5088 {
5089 if (pPatchPage->papPatch[i])
5090 {
5091 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5092
5093 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
5094 patmR3MarkDirtyPatch(pVM, pPatch);
5095 }
5096 }
5097 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
5098 }
5099 return VINF_SUCCESS;
5100}
5101
5102/**
5103 * Checks if the instructions at the specified address has been patched already.
5104 *
5105 * @returns boolean, patched or not
5106 * @param pVM Pointer to the VM.
5107 * @param pInstrGC Guest context pointer to instruction
5108 */
5109VMMR3_INT_DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
5110{
5111 Assert(!HMIsEnabled(pVM));
5112 PPATMPATCHREC pPatchRec;
5113 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5114 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
5115 return true;
5116 return false;
5117}
5118
5119/**
5120 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
5121 *
5122 * @returns VBox status code.
5123 * @param pVM Pointer to the VM.
5124 * @param pInstrGC GC address of instr
5125 * @param pByte opcode byte pointer (OUT)
5126 *
5127 */
5128VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
5129{
5130 PPATMPATCHREC pPatchRec;
5131
5132 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5133
5134 /* Shortcut. */
5135 if (!PATMIsEnabled(pVM))
5136 return VERR_PATCH_NOT_FOUND;
5137 Assert(!HMIsEnabled(pVM));
5138 if ( pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5139 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5140 return VERR_PATCH_NOT_FOUND;
5141
5142 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5143 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5144 if ( pPatchRec
5145 && pPatchRec->patch.uState == PATCH_ENABLED
5146 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5147 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5148 {
5149 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5150 *pByte = pPatchRec->patch.aPrivInstr[offset];
5151
5152 if (pPatchRec->patch.cbPatchJump == 1)
5153 {
5154 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5155 }
5156 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5157 return VINF_SUCCESS;
5158 }
5159 return VERR_PATCH_NOT_FOUND;
5160}
5161
5162/**
5163 * Read instruction bytes of the original code that was overwritten by the 5
5164 * bytes patch jump.
5165 *
5166 * @returns VINF_SUCCESS or VERR_PATCH_NOT_FOUND.
5167 * @param pVM Pointer to the VM.
5168 * @param GCPtrInstr GC address of instr
5169 * @param pbDst The output buffer.
5170 * @param cbToRead The maximum number bytes to read.
5171 * @param pcbRead Where to return the acutal number of bytes read.
5172 */
5173VMMR3_INT_DECL(int) PATMR3ReadOrgInstr(PVM pVM, RTGCPTR32 GCPtrInstr, uint8_t *pbDst, size_t cbToRead, size_t *pcbRead)
5174{
5175 /* Shortcut. */
5176 if (!PATMIsEnabled(pVM))
5177 return VERR_PATCH_NOT_FOUND;
5178 Assert(!HMIsEnabled(pVM));
5179 if ( GCPtrInstr < pVM->patm.s.pPatchedInstrGCLowest
5180 || GCPtrInstr > pVM->patm.s.pPatchedInstrGCHighest)
5181 return VERR_PATCH_NOT_FOUND;
5182
5183 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5184
5185 /*
5186 * If the patch is enabled and the pointer lies within 5 bytes of this
5187 * priv instr ptr, then we've got a hit!
5188 */
5189 RTGCPTR32 off;
5190 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree,
5191 GCPtrInstr, false /*fAbove*/);
5192 if ( pPatchRec
5193 && pPatchRec->patch.uState == PATCH_ENABLED
5194 && (off = GCPtrInstr - pPatchRec->patch.pPrivInstrGC) < pPatchRec->patch.cbPatchJump)
5195 {
5196 uint8_t const *pbSrc = &pPatchRec->patch.aPrivInstr[off];
5197 uint32_t const cbMax = pPatchRec->patch.cbPatchJump - off;
5198 if (cbToRead > cbMax)
5199 cbToRead = cbMax;
5200 switch (cbToRead)
5201 {
5202 case 5: pbDst[4] = pbSrc[4];
5203 case 4: pbDst[3] = pbSrc[3];
5204 case 3: pbDst[2] = pbSrc[2];
5205 case 2: pbDst[1] = pbSrc[1];
5206 case 1: pbDst[0] = pbSrc[0];
5207 break;
5208 default:
5209 memcpy(pbDst, pbSrc, cbToRead);
5210 }
5211 *pcbRead = cbToRead;
5212
5213 if (pPatchRec->patch.cbPatchJump == 1)
5214 Log(("PATMR3ReadOrgInstr: returning opcode %.*Rhxs for instruction at %RX32\n", cbToRead, pbSrc, GCPtrInstr));
5215 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5216 return VINF_SUCCESS;
5217 }
5218
5219 return VERR_PATCH_NOT_FOUND;
5220}
5221
5222/**
5223 * Disable patch for privileged instruction at specified location
5224 *
5225 * @returns VBox status code.
5226 * @param pVM Pointer to the VM.
5227 * @param pInstr Guest context point to privileged instruction
5228 *
5229 * @note returns failure if patching is not allowed or possible
5230 *
5231 */
5232VMMR3_INT_DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5233{
5234 PPATMPATCHREC pPatchRec;
5235 PPATCHINFO pPatch;
5236
5237 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5238 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5239 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5240 if (pPatchRec)
5241 {
5242 int rc = VINF_SUCCESS;
5243
5244 pPatch = &pPatchRec->patch;
5245
5246 /* Already disabled? */
5247 if (pPatch->uState == PATCH_DISABLED)
5248 return VINF_SUCCESS;
5249
5250 /* Clear the IDT entries for the patch we're disabling. */
5251 /* Note: very important as we clear IF in the patch itself */
5252 /** @todo this needs to be changed */
5253 if (pPatch->flags & PATMFL_IDTHANDLER)
5254 {
5255 uint32_t iGate;
5256
5257 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5258 if (iGate != (uint32_t)~0)
5259 {
5260 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5261 if (++cIDTHandlersDisabled < 256)
5262 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5263 }
5264 }
5265
5266 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5267 if ( pPatch->pPatchBlockOffset
5268 && pPatch->uState == PATCH_ENABLED)
5269 {
5270 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5271 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5272 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5273 }
5274
5275 /* IDT or function patches haven't changed any guest code. */
5276 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5277 {
5278 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5279 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5280
5281 if (pPatch->uState != PATCH_REFUSED)
5282 {
5283 uint8_t temp[16];
5284
5285 Assert(pPatch->cbPatchJump < sizeof(temp));
5286
5287 /* Let's first check if the guest code is still the same. */
5288 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5289 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5290 if (rc == VINF_SUCCESS)
5291 {
5292 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5293
5294 if ( temp[0] != 0xE9 /* jmp opcode */
5295 || *(RTRCINTPTR *)(&temp[1]) != displ
5296 )
5297 {
5298 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5299 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5300 /* Remove it completely */
5301 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5302 rc = PATMR3RemovePatch(pVM, pInstrGC);
5303 AssertRC(rc);
5304 return VWRN_PATCH_REMOVED;
5305 }
5306 patmRemoveJumpToPatch(pVM, pPatch);
5307 }
5308 else
5309 {
5310 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5311 pPatch->uState = PATCH_DISABLE_PENDING;
5312 }
5313 }
5314 else
5315 {
5316 AssertMsgFailed(("Patch was refused!\n"));
5317 return VERR_PATCH_ALREADY_DISABLED;
5318 }
5319 }
5320 else
5321 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5322 {
5323 uint8_t temp[16];
5324
5325 Assert(pPatch->cbPatchJump < sizeof(temp));
5326
5327 /* Let's first check if the guest code is still the same. */
5328 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5329 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5330 if (rc == VINF_SUCCESS)
5331 {
5332 if (temp[0] != 0xCC)
5333 {
5334 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5335 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5336 /* Remove it completely */
5337 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5338 rc = PATMR3RemovePatch(pVM, pInstrGC);
5339 AssertRC(rc);
5340 return VWRN_PATCH_REMOVED;
5341 }
5342 patmDeactivateInt3Patch(pVM, pPatch);
5343 }
5344 }
5345
5346 if (rc == VINF_SUCCESS)
5347 {
5348 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5349 if (pPatch->uState == PATCH_DISABLE_PENDING)
5350 {
5351 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5352 pPatch->uState = PATCH_UNUSABLE;
5353 }
5354 else
5355 if (pPatch->uState != PATCH_DIRTY)
5356 {
5357 pPatch->uOldState = pPatch->uState;
5358 pPatch->uState = PATCH_DISABLED;
5359 }
5360 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5361 }
5362
5363 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5364 return VINF_SUCCESS;
5365 }
5366 Log(("Patch not found!\n"));
5367 return VERR_PATCH_NOT_FOUND;
5368}
5369
5370/**
5371 * Permanently disable patch for privileged instruction at specified location
5372 *
5373 * @returns VBox status code.
5374 * @param pVM Pointer to the VM.
5375 * @param pInstr Guest context instruction pointer
5376 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5377 * @param pConflictPatch Conflicting patch
5378 *
5379 */
5380static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5381{
5382 NOREF(pConflictAddr);
5383#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5384 PATCHINFO patch;
5385 DISCPUSTATE cpu;
5386 R3PTRTYPE(uint8_t *) pInstrHC;
5387 uint32_t cbInstr;
5388 bool disret;
5389 int rc;
5390
5391 RT_ZERO(patch);
5392 pInstrHC = patmR3GCVirtToHCVirt(pVM, &patch, pInstrGC);
5393 disret = patmR3DisInstr(pVM, &patch, pInstrGC, pInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
5394 /*
5395 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5396 * with one that jumps right into the conflict patch.
5397 * Otherwise we must disable the conflicting patch to avoid serious problems.
5398 */
5399 if ( disret == true
5400 && (pConflictPatch->flags & PATMFL_CODE32)
5401 && (cpu.pCurInstr->uOpcode == OP_JMP || (cpu.pCurInstr->fOpType & DISOPTYPE_COND_CONTROLFLOW))
5402 && (cpu.Param1.fUse & DISUSE_IMMEDIATE32_REL))
5403 {
5404 /* Hint patches must be enabled first. */
5405 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5406 {
5407 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5408 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5409 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5410 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5411 /* Enabling might fail if the patched code has changed in the meantime. */
5412 if (rc != VINF_SUCCESS)
5413 return rc;
5414 }
5415
5416 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5417 if (RT_SUCCESS(rc))
5418 {
5419 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5420 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5421 return VINF_SUCCESS;
5422 }
5423 }
5424#endif
5425
5426 if (pConflictPatch->opcode == OP_CLI)
5427 {
5428 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5429 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5430 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5431 if (rc == VWRN_PATCH_REMOVED)
5432 return VINF_SUCCESS;
5433 if (RT_SUCCESS(rc))
5434 {
5435 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5436 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5437 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5438 if (rc == VERR_PATCH_NOT_FOUND)
5439 return VINF_SUCCESS; /* removed already */
5440
5441 AssertRC(rc);
5442 if (RT_SUCCESS(rc))
5443 {
5444 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5445 return VINF_SUCCESS;
5446 }
5447 }
5448 /* else turned into unusable patch (see below) */
5449 }
5450 else
5451 {
5452 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5453 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5454 if (rc == VWRN_PATCH_REMOVED)
5455 return VINF_SUCCESS;
5456 }
5457
5458 /* No need to monitor the code anymore. */
5459 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5460 {
5461 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5462 AssertRC(rc);
5463 }
5464 pConflictPatch->uState = PATCH_UNUSABLE;
5465 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5466 return VERR_PATCH_DISABLED;
5467}
5468
5469/**
5470 * Enable patch for privileged instruction at specified location
5471 *
5472 * @returns VBox status code.
5473 * @param pVM Pointer to the VM.
5474 * @param pInstr Guest context point to privileged instruction
5475 *
5476 * @note returns failure if patching is not allowed or possible
5477 *
5478 */
5479VMMR3_INT_DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5480{
5481 PPATMPATCHREC pPatchRec;
5482 PPATCHINFO pPatch;
5483
5484 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5485 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5486 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5487 if (pPatchRec)
5488 {
5489 int rc = VINF_SUCCESS;
5490
5491 pPatch = &pPatchRec->patch;
5492
5493 if (pPatch->uState == PATCH_DISABLED)
5494 {
5495 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5496 {
5497 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5498 uint8_t temp[16];
5499
5500 Assert(pPatch->cbPatchJump < sizeof(temp));
5501
5502 /* Let's first check if the guest code is still the same. */
5503 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5504 AssertRC(rc2);
5505 if (rc2 == VINF_SUCCESS)
5506 {
5507 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5508 {
5509 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5510 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5511 /* Remove it completely */
5512 rc = PATMR3RemovePatch(pVM, pInstrGC);
5513 AssertRC(rc);
5514 return VERR_PATCH_NOT_FOUND;
5515 }
5516
5517 PATMP2GLOOKUPREC cacheRec;
5518 RT_ZERO(cacheRec);
5519 cacheRec.pPatch = pPatch;
5520
5521 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5522 /* Free leftover lock if any. */
5523 if (cacheRec.Lock.pvMap)
5524 {
5525 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5526 cacheRec.Lock.pvMap = NULL;
5527 }
5528 AssertRC(rc2);
5529 if (RT_FAILURE(rc2))
5530 return rc2;
5531
5532#ifdef DEBUG
5533 {
5534 DISCPUSTATE cpu;
5535 char szOutput[256];
5536 uint32_t cbInstr;
5537 uint32_t i = 0;
5538 bool disret;
5539 while(i < pPatch->cbPatchJump)
5540 {
5541 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
5542 &cpu, &cbInstr, szOutput, sizeof(szOutput));
5543 Log(("Renewed patch instr: %s", szOutput));
5544 i += cbInstr;
5545 }
5546 }
5547#endif
5548 }
5549 }
5550 else
5551 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5552 {
5553 uint8_t temp[16];
5554
5555 Assert(pPatch->cbPatchJump < sizeof(temp));
5556
5557 /* Let's first check if the guest code is still the same. */
5558 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5559 AssertRC(rc2);
5560
5561 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5562 {
5563 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5564 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5565 rc = PATMR3RemovePatch(pVM, pInstrGC);
5566 AssertRC(rc);
5567 return VERR_PATCH_NOT_FOUND;
5568 }
5569
5570 rc2 = patmActivateInt3Patch(pVM, pPatch);
5571 if (RT_FAILURE(rc2))
5572 return rc2;
5573 }
5574
5575 pPatch->uState = pPatch->uOldState; //restore state
5576
5577 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5578 if (pPatch->pPatchBlockOffset)
5579 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5580
5581 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5582 }
5583 else
5584 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5585
5586 return rc;
5587 }
5588 return VERR_PATCH_NOT_FOUND;
5589}
5590
5591/**
5592 * Remove patch for privileged instruction at specified location
5593 *
5594 * @returns VBox status code.
5595 * @param pVM Pointer to the VM.
5596 * @param pPatchRec Patch record
5597 * @param fForceRemove Remove *all* patches
5598 */
5599int patmR3RemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5600{
5601 PPATCHINFO pPatch;
5602
5603 pPatch = &pPatchRec->patch;
5604
5605 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5606 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5607 {
5608 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5609 return VERR_ACCESS_DENIED;
5610 }
5611 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5612
5613 /* Note: NEVER EVER REUSE PATCH MEMORY */
5614 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5615
5616 if (pPatchRec->patch.pPatchBlockOffset)
5617 {
5618 PAVLOU32NODECORE pNode;
5619
5620 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5621 Assert(pNode);
5622 }
5623
5624 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5625 {
5626 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5627 AssertRC(rc);
5628 }
5629
5630#ifdef VBOX_WITH_STATISTICS
5631 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5632 {
5633 STAMR3Deregister(pVM, &pPatchRec->patch);
5634#ifndef DEBUG_sandervl
5635 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5636 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5637 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5638 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5639 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5640 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5641 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5642 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5643 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5644 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5645 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5646 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5647 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5648 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5649#endif
5650 }
5651#endif
5652
5653 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5654 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5655 pPatch->nrPatch2GuestRecs = 0;
5656 Assert(pPatch->Patch2GuestAddrTree == 0);
5657
5658 patmEmptyTree(pVM, &pPatch->FixupTree);
5659 pPatch->nrFixups = 0;
5660 Assert(pPatch->FixupTree == 0);
5661
5662 if (pPatchRec->patch.pTempInfo)
5663 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5664
5665 /* Note: might fail, because it has already been removed (e.g. during reset). */
5666 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5667
5668 /* Free the patch record */
5669 MMHyperFree(pVM, pPatchRec);
5670 return VINF_SUCCESS;
5671}
5672
5673/**
5674 * RTAvlU32DoWithAll() worker.
5675 * Checks whether the current trampoline instruction is the jump to the target patch
5676 * and updates the displacement to jump to the new target.
5677 *
5678 * @returns VBox status code.
5679 * @retval VERR_ALREADY_EXISTS if the jump was found.
5680 * @param pNode The current patch to guest record to check.
5681 * @param pvUser The refresh state.
5682 */
5683static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5684{
5685 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5686 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5687 PVM pVM = pRefreshPatchState->pVM;
5688
5689 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5690
5691 /*
5692 * Check if the patch instruction starts with a jump.
5693 * ASSUMES that there is no other patch to guest record that starts
5694 * with a jump.
5695 */
5696 if (*pPatchInstr == 0xE9)
5697 {
5698 /* Jump found, update the displacement. */
5699 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5700 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5701 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5702
5703 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5704 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5705
5706 *(uint32_t *)&pPatchInstr[1] = displ;
5707 return VERR_ALREADY_EXISTS; /** @todo better return code */
5708 }
5709
5710 return VINF_SUCCESS;
5711}
5712
5713/**
5714 * Attempt to refresh the patch by recompiling its entire code block
5715 *
5716 * @returns VBox status code.
5717 * @param pVM Pointer to the VM.
5718 * @param pPatchRec Patch record
5719 */
5720int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5721{
5722 PPATCHINFO pPatch;
5723 int rc;
5724 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5725 PTRAMPREC pTrampolinePatchesHead = NULL;
5726
5727 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5728
5729 pPatch = &pPatchRec->patch;
5730 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5731 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5732 {
5733 if (!pPatch->pTrampolinePatchesHead)
5734 {
5735 /*
5736 * It is sometimes possible that there are trampoline patches to this patch
5737 * but they are not recorded (after a saved state load for example).
5738 * Refuse to refresh those patches.
5739 * Can hurt performance in theory if the patched code is modified by the guest
5740 * and is executed often. However most of the time states are saved after the guest
5741 * code was modified and is not updated anymore afterwards so this shouldn't be a
5742 * big problem.
5743 */
5744 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5745 return VERR_PATCHING_REFUSED;
5746 }
5747 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5748 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5749 }
5750
5751 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5752
5753 rc = PATMR3DisablePatch(pVM, pInstrGC);
5754 AssertRC(rc);
5755
5756 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5757 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5758#ifdef VBOX_WITH_STATISTICS
5759 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5760 {
5761 STAMR3Deregister(pVM, &pPatchRec->patch);
5762#ifndef DEBUG_sandervl
5763 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5764 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5765 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5766 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5767 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5768 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5769 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5770 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5771 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5772 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5773 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5774 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5775 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5776 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5777#endif
5778 }
5779#endif
5780
5781 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5782
5783 /* Attempt to install a new patch. */
5784 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5785 if (RT_SUCCESS(rc))
5786 {
5787 RTRCPTR pPatchTargetGC;
5788 PPATMPATCHREC pNewPatchRec;
5789
5790 /* Determine target address in new patch */
5791 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5792 Assert(pPatchTargetGC);
5793 if (!pPatchTargetGC)
5794 {
5795 rc = VERR_PATCHING_REFUSED;
5796 goto failure;
5797 }
5798
5799 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5800 pPatch->uCurPatchOffset = 0;
5801
5802 /* insert jump to new patch in old patch block */
5803 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5804 if (RT_FAILURE(rc))
5805 goto failure;
5806
5807 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5808 Assert(pNewPatchRec); /* can't fail */
5809
5810 /* Remove old patch (only do that when everything is finished) */
5811 int rc2 = patmR3RemovePatch(pVM, pPatchRec, true /* force removal */);
5812 AssertRC(rc2);
5813
5814 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5815 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5816 Assert(fInserted); NOREF(fInserted);
5817
5818 Log(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5819 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5820
5821 /* Used by another patch, so don't remove it! */
5822 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5823
5824 if (pTrampolinePatchesHead)
5825 {
5826 /* Update all trampoline patches to jump to the new patch. */
5827 PTRAMPREC pTrampRec = NULL;
5828 PATMREFRESHPATCH RefreshPatch;
5829
5830 RefreshPatch.pVM = pVM;
5831 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5832
5833 pTrampRec = pTrampolinePatchesHead;
5834
5835 while (pTrampRec)
5836 {
5837 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5838
5839 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5840 /*
5841 * We have to find the right patch2guest record because there might be others
5842 * for statistics.
5843 */
5844 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5845 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5846 Assert(rc == VERR_ALREADY_EXISTS);
5847 rc = VINF_SUCCESS;
5848 pTrampRec = pTrampRec->pNext;
5849 }
5850 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5851 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5852 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5853 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5854 }
5855 }
5856
5857failure:
5858 if (RT_FAILURE(rc))
5859 {
5860 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5861
5862 /* Remove the new inactive patch */
5863 rc = PATMR3RemovePatch(pVM, pInstrGC);
5864 AssertRC(rc);
5865
5866 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5867 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5868 Assert(fInserted); NOREF(fInserted);
5869
5870 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5871 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5872 AssertRC(rc2);
5873
5874 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5875 }
5876 return rc;
5877}
5878
5879/**
5880 * Find patch for privileged instruction at specified location
5881 *
5882 * @returns Patch structure pointer if found; else NULL
5883 * @param pVM Pointer to the VM.
5884 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5885 * @param fIncludeHints Include hinted patches or not
5886 *
5887 */
5888PPATCHINFO patmFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5889{
5890 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5891 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5892 if (pPatchRec)
5893 {
5894 if ( pPatchRec->patch.uState == PATCH_ENABLED
5895 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5896 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5897 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5898 {
5899 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5900 return &pPatchRec->patch;
5901 }
5902 else
5903 if ( fIncludeHints
5904 && pPatchRec->patch.uState == PATCH_DISABLED
5905 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5906 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5907 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5908 {
5909 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5910 return &pPatchRec->patch;
5911 }
5912 }
5913 return NULL;
5914}
5915
5916/**
5917 * Checks whether the GC address is inside a generated patch jump
5918 *
5919 * @returns true -> yes, false -> no
5920 * @param pVM Pointer to the VM.
5921 * @param pAddr Guest context address.
5922 * @param pPatchAddr Guest context patch address (if true).
5923 */
5924VMMR3_INT_DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5925{
5926 RTRCPTR addr;
5927 PPATCHINFO pPatch;
5928
5929 Assert(!HMIsEnabled(pVM));
5930 if (PATMIsEnabled(pVM) == false)
5931 return false;
5932
5933 if (pPatchAddr == NULL)
5934 pPatchAddr = &addr;
5935
5936 *pPatchAddr = 0;
5937
5938 pPatch = patmFindActivePatchByEntrypoint(pVM, pAddr);
5939 if (pPatch)
5940 *pPatchAddr = pPatch->pPrivInstrGC;
5941
5942 return *pPatchAddr == 0 ? false : true;
5943}
5944
5945/**
5946 * Remove patch for privileged instruction at specified location
5947 *
5948 * @returns VBox status code.
5949 * @param pVM Pointer to the VM.
5950 * @param pInstr Guest context point to privileged instruction
5951 *
5952 * @note returns failure if patching is not allowed or possible
5953 *
5954 */
5955VMMR3_INT_DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5956{
5957 PPATMPATCHREC pPatchRec;
5958
5959 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5960 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5961 if (pPatchRec)
5962 {
5963 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5964 if (rc == VWRN_PATCH_REMOVED)
5965 return VINF_SUCCESS;
5966
5967 return patmR3RemovePatch(pVM, pPatchRec, false);
5968 }
5969 AssertFailed();
5970 return VERR_PATCH_NOT_FOUND;
5971}
5972
5973/**
5974 * Mark patch as dirty
5975 *
5976 * @returns VBox status code.
5977 * @param pVM Pointer to the VM.
5978 * @param pPatch Patch record
5979 *
5980 * @note returns failure if patching is not allowed or possible
5981 *
5982 */
5983static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5984{
5985 if (pPatch->pPatchBlockOffset)
5986 {
5987 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5988 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5989 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5990 }
5991
5992 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5993 /* Put back the replaced instruction. */
5994 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5995 if (rc == VWRN_PATCH_REMOVED)
5996 return VINF_SUCCESS;
5997
5998 /* Note: we don't restore patch pages for patches that are not enabled! */
5999 /* Note: be careful when changing this behaviour!! */
6000
6001 /* The patch pages are no longer marked for self-modifying code detection */
6002 if (pPatch->flags & PATMFL_CODE_MONITORED)
6003 {
6004 rc = patmRemovePatchPages(pVM, pPatch);
6005 AssertRCReturn(rc, rc);
6006 }
6007 pPatch->uState = PATCH_DIRTY;
6008
6009 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
6010 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6011
6012 return VINF_SUCCESS;
6013}
6014
6015/**
6016 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
6017 *
6018 * @returns VBox status code.
6019 * @param pVM Pointer to the VM.
6020 * @param pPatch Patch block structure pointer
6021 * @param pPatchGC GC address in patch block
6022 */
6023RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
6024{
6025 Assert(pPatch->Patch2GuestAddrTree);
6026 /* Get the closest record from below. */
6027 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6028 if (pPatchToGuestRec)
6029 return pPatchToGuestRec->pOrgInstrGC;
6030
6031 return 0;
6032}
6033
6034/**
6035 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6036 *
6037 * @returns corresponding GC pointer in patch block
6038 * @param pVM Pointer to the VM.
6039 * @param pPatch Current patch block pointer
6040 * @param pInstrGC Guest context pointer to privileged instruction
6041 *
6042 */
6043RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6044{
6045 if (pPatch->Guest2PatchAddrTree)
6046 {
6047 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
6048 if (pGuestToPatchRec)
6049 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6050 }
6051
6052 return 0;
6053}
6054
6055/**
6056 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6057 *
6058 * @returns corresponding GC pointer in patch block
6059 * @param pVM Pointer to the VM.
6060 * @param pInstrGC Guest context pointer to privileged instruction
6061 */
6062static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
6063{
6064 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
6065 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
6066 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
6067 return NIL_RTRCPTR;
6068}
6069
6070/**
6071 * Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no
6072 * identical match)
6073 *
6074 * @returns corresponding GC pointer in patch block
6075 * @param pVM Pointer to the VM.
6076 * @param pPatch Current patch block pointer
6077 * @param pInstrGC Guest context pointer to privileged instruction
6078 *
6079 */
6080RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6081{
6082 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
6083 if (pGuestToPatchRec)
6084 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6085 return NIL_RTRCPTR;
6086}
6087
6088/**
6089 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
6090 *
6091 * @returns original GC instruction pointer or 0 if not found
6092 * @param pVM Pointer to the VM.
6093 * @param pPatchGC GC address in patch block
6094 * @param pEnmState State of the translated address (out)
6095 *
6096 */
6097VMMR3_INT_DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
6098{
6099 PPATMPATCHREC pPatchRec;
6100 void *pvPatchCoreOffset;
6101 RTRCPTR pPrivInstrGC;
6102
6103 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
6104 Assert(!HMIsEnabled(pVM));
6105 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6106 if (pvPatchCoreOffset == 0)
6107 {
6108 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
6109 return 0;
6110 }
6111 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6112 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
6113 if (pEnmState)
6114 {
6115 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
6116 || pPatchRec->patch.uState == PATCH_DIRTY
6117 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
6118 || pPatchRec->patch.uState == PATCH_UNUSABLE),
6119 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
6120
6121 if ( !pPrivInstrGC
6122 || pPatchRec->patch.uState == PATCH_UNUSABLE
6123 || pPatchRec->patch.uState == PATCH_REFUSED)
6124 {
6125 pPrivInstrGC = 0;
6126 *pEnmState = PATMTRANS_FAILED;
6127 }
6128 else
6129 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
6130 {
6131 *pEnmState = PATMTRANS_INHIBITIRQ;
6132 }
6133 else
6134 if ( pPatchRec->patch.uState == PATCH_ENABLED
6135 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
6136 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
6137 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
6138 {
6139 *pEnmState = PATMTRANS_OVERWRITTEN;
6140 }
6141 else
6142 if (patmFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
6143 {
6144 *pEnmState = PATMTRANS_OVERWRITTEN;
6145 }
6146 else
6147 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
6148 {
6149 *pEnmState = PATMTRANS_PATCHSTART;
6150 }
6151 else
6152 *pEnmState = PATMTRANS_SAFE;
6153 }
6154 return pPrivInstrGC;
6155}
6156
6157/**
6158 * Returns the GC pointer of the patch for the specified GC address
6159 *
6160 * @returns VBox status code.
6161 * @param pVM Pointer to the VM.
6162 * @param pAddrGC Guest context address
6163 */
6164VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
6165{
6166 PPATMPATCHREC pPatchRec;
6167
6168 Assert(!HMIsEnabled(pVM));
6169
6170 /* Find the patch record. */
6171 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
6172 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
6173 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
6174 return PATCHCODE_PTR_GC(&pPatchRec->patch);
6175 return NIL_RTRCPTR;
6176}
6177
6178/**
6179 * Attempt to recover dirty instructions
6180 *
6181 * @returns VBox status code.
6182 * @param pVM Pointer to the VM.
6183 * @param pCtx Pointer to the guest CPU context.
6184 * @param pPatch Patch record.
6185 * @param pPatchToGuestRec Patch to guest address record.
6186 * @param pEip GC pointer of trapping instruction.
6187 */
6188static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
6189{
6190 DISCPUSTATE CpuOld, CpuNew;
6191 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
6192 int rc;
6193 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
6194 uint32_t cbDirty;
6195 PRECPATCHTOGUEST pRec;
6196 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
6197 PVMCPU pVCpu = VMMGetCpu0(pVM);
6198 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
6199
6200 pRec = pPatchToGuestRec;
6201 pCurInstrGC = pOrgInstrGC;
6202 pCurPatchInstrGC = pEip;
6203 cbDirty = 0;
6204 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6205
6206 /* Find all adjacent dirty instructions */
6207 while (true)
6208 {
6209 if (pRec->fJumpTarget)
6210 {
6211 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6212 pRec->fDirty = false;
6213 return VERR_PATCHING_REFUSED;
6214 }
6215
6216 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6217 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6218 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6219
6220 /* Only harmless instructions are acceptable. */
6221 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6222 if ( RT_FAILURE(rc)
6223 || !(CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS))
6224 {
6225 if (RT_SUCCESS(rc))
6226 cbDirty += CpuOld.cbInstr;
6227 else
6228 if (!cbDirty)
6229 cbDirty = 1;
6230 break;
6231 }
6232
6233#ifdef DEBUG
6234 char szBuf[256];
6235 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6236 szBuf, sizeof(szBuf), NULL);
6237 Log(("DIRTY: %s\n", szBuf));
6238#endif
6239 /* Mark as clean; if we fail we'll let it always fault. */
6240 pRec->fDirty = false;
6241
6242 /* Remove old lookup record. */
6243 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6244 pPatchToGuestRec = NULL;
6245
6246 pCurPatchInstrGC += CpuOld.cbInstr;
6247 cbDirty += CpuOld.cbInstr;
6248
6249 /* Let's see if there's another dirty instruction right after. */
6250 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6251 if (!pRec || !pRec->fDirty)
6252 break; /* no more dirty instructions */
6253
6254 /* In case of complex instructions the next guest instruction could be quite far off. */
6255 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6256 }
6257
6258 if ( RT_SUCCESS(rc)
6259 && (CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS)
6260 )
6261 {
6262 uint32_t cbLeft;
6263
6264 pCurPatchInstrHC = pPatchInstrHC;
6265 pCurPatchInstrGC = pEip;
6266 cbLeft = cbDirty;
6267
6268 while (cbLeft && RT_SUCCESS(rc))
6269 {
6270 bool fValidInstr;
6271
6272 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6273
6274 fValidInstr = !!(CpuNew.pCurInstr->fOpType & DISOPTYPE_HARMLESS);
6275 if ( !fValidInstr
6276 && (CpuNew.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
6277 )
6278 {
6279 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6280
6281 if ( pTargetGC >= pOrgInstrGC
6282 && pTargetGC <= pOrgInstrGC + cbDirty
6283 )
6284 {
6285 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6286 fValidInstr = true;
6287 }
6288 }
6289
6290 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6291 if ( rc == VINF_SUCCESS
6292 && CpuNew.cbInstr <= cbLeft /* must still fit */
6293 && fValidInstr
6294 )
6295 {
6296#ifdef DEBUG
6297 char szBuf[256];
6298 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6299 szBuf, sizeof(szBuf), NULL);
6300 Log(("NEW: %s\n", szBuf));
6301#endif
6302
6303 /* Copy the new instruction. */
6304 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.cbInstr);
6305 AssertRC(rc);
6306
6307 /* Add a new lookup record for the duplicated instruction. */
6308 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6309 }
6310 else
6311 {
6312#ifdef DEBUG
6313 char szBuf[256];
6314 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6315 szBuf, sizeof(szBuf), NULL);
6316 Log(("NEW: %s (FAILED)\n", szBuf));
6317#endif
6318 /* Restore the old lookup record for the duplicated instruction. */
6319 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6320
6321 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6322 rc = VERR_PATCHING_REFUSED;
6323 break;
6324 }
6325 pCurInstrGC += CpuNew.cbInstr;
6326 pCurPatchInstrHC += CpuNew.cbInstr;
6327 pCurPatchInstrGC += CpuNew.cbInstr;
6328 cbLeft -= CpuNew.cbInstr;
6329
6330 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6331 if (!cbLeft)
6332 {
6333 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6334 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6335 {
6336 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6337 if (pRec)
6338 {
6339 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6340 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6341
6342 Assert(!pRec->fDirty);
6343
6344 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6345 if (cbFiller >= SIZEOF_NEARJUMP32)
6346 {
6347 pPatchFillHC[0] = 0xE9;
6348 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6349#ifdef DEBUG
6350 char szBuf[256];
6351 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC,
6352 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6353 Log(("FILL: %s\n", szBuf));
6354#endif
6355 }
6356 else
6357 {
6358 for (unsigned i = 0; i < cbFiller; i++)
6359 {
6360 pPatchFillHC[i] = 0x90; /* NOP */
6361#ifdef DEBUG
6362 char szBuf[256];
6363 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC + i,
6364 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6365 Log(("FILL: %s\n", szBuf));
6366#endif
6367 }
6368 }
6369 }
6370 }
6371 }
6372 }
6373 }
6374 else
6375 rc = VERR_PATCHING_REFUSED;
6376
6377 if (RT_SUCCESS(rc))
6378 {
6379 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6380 }
6381 else
6382 {
6383 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6384 Assert(cbDirty);
6385
6386 /* Mark the whole instruction stream with breakpoints. */
6387 if (cbDirty)
6388 memset(pPatchInstrHC, 0xCC, cbDirty);
6389
6390 if ( pVM->patm.s.fOutOfMemory == false
6391 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6392 {
6393 rc = patmR3RefreshPatch(pVM, pPatch);
6394 if (RT_FAILURE(rc))
6395 {
6396 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6397 }
6398 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6399 rc = VERR_PATCHING_REFUSED;
6400 }
6401 }
6402 return rc;
6403}
6404
6405/**
6406 * Handle trap inside patch code
6407 *
6408 * @returns VBox status code.
6409 * @param pVM Pointer to the VM.
6410 * @param pCtx Pointer to the guest CPU context.
6411 * @param pEip GC pointer of trapping instruction.
6412 * @param ppNewEip GC pointer to new instruction.
6413 */
6414VMMR3_INT_DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6415{
6416 PPATMPATCHREC pPatch = 0;
6417 void *pvPatchCoreOffset;
6418 RTRCUINTPTR offset;
6419 RTRCPTR pNewEip;
6420 int rc ;
6421 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6422 PVMCPU pVCpu = VMMGetCpu0(pVM);
6423
6424 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6425 Assert(pVM->cCpus == 1);
6426
6427 pNewEip = 0;
6428 *ppNewEip = 0;
6429
6430 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6431
6432 /* Find the patch record. */
6433 /* Note: there might not be a patch to guest translation record (global function) */
6434 offset = pEip - pVM->patm.s.pPatchMemGC;
6435 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6436 if (pvPatchCoreOffset)
6437 {
6438 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6439
6440 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6441
6442 if (pPatch->patch.uState == PATCH_DIRTY)
6443 {
6444 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6445 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6446 {
6447 /* Function duplication patches set fPIF to 1 on entry */
6448 pVM->patm.s.pGCStateHC->fPIF = 1;
6449 }
6450 }
6451 else
6452 if (pPatch->patch.uState == PATCH_DISABLED)
6453 {
6454 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6455 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6456 {
6457 /* Function duplication patches set fPIF to 1 on entry */
6458 pVM->patm.s.pGCStateHC->fPIF = 1;
6459 }
6460 }
6461 else
6462 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6463 {
6464 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6465
6466 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6467 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6468 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6469 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6470 }
6471
6472 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6473 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6474
6475 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6476 pPatch->patch.cTraps++;
6477 PATM_STAT_FAULT_INC(&pPatch->patch);
6478 }
6479 else
6480 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6481
6482 /* Check if we were interrupted in PATM generated instruction code. */
6483 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6484 {
6485 DISCPUSTATE Cpu;
6486 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6487 AssertRC(rc);
6488
6489 if ( rc == VINF_SUCCESS
6490 && ( Cpu.pCurInstr->uOpcode == OP_PUSHF
6491 || Cpu.pCurInstr->uOpcode == OP_PUSH
6492 || Cpu.pCurInstr->uOpcode == OP_CALL)
6493 )
6494 {
6495 uint64_t fFlags;
6496
6497 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6498
6499 if (Cpu.pCurInstr->uOpcode == OP_PUSH)
6500 {
6501 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6502 if ( rc == VINF_SUCCESS
6503 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6504 {
6505 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6506
6507 /* Reset the PATM stack. */
6508 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6509
6510 pVM->patm.s.pGCStateHC->fPIF = 1;
6511
6512 Log(("Faulting push -> go back to the original instruction\n"));
6513
6514 /* continue at the original instruction */
6515 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6516 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6517 return VINF_SUCCESS;
6518 }
6519 }
6520
6521 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6522 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6523 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6524 if (rc == VINF_SUCCESS)
6525 {
6526 /* The guest page *must* be present. */
6527 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6528 if ( rc == VINF_SUCCESS
6529 && (fFlags & X86_PTE_P))
6530 {
6531 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6532 return VINF_PATCH_CONTINUE;
6533 }
6534 }
6535 }
6536 else
6537 if (pPatch->patch.pPrivInstrGC == pNewEip)
6538 {
6539 /* Invalidated patch or first instruction overwritten.
6540 * We can ignore the fPIF state in this case.
6541 */
6542 /* Reset the PATM stack. */
6543 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6544
6545 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6546
6547 pVM->patm.s.pGCStateHC->fPIF = 1;
6548
6549 /* continue at the original instruction */
6550 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6551 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6552 return VINF_SUCCESS;
6553 }
6554
6555 char szBuf[256];
6556 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6557
6558 /* Very bad. We crashed in emitted code. Probably stack? */
6559 if (pPatch)
6560 {
6561 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6562 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6563 }
6564 else
6565 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6566 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6567 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6568 }
6569
6570 /* From here on, we must have a valid patch to guest translation. */
6571 if (pvPatchCoreOffset == 0)
6572 {
6573 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6574 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6575 return VERR_PATCH_NOT_FOUND;
6576 }
6577
6578 /* Take care of dirty/changed instructions. */
6579 if (pPatchToGuestRec->fDirty)
6580 {
6581 Assert(pPatchToGuestRec->Core.Key == offset);
6582 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6583
6584 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6585 if (RT_SUCCESS(rc))
6586 {
6587 /* Retry the current instruction. */
6588 pNewEip = pEip;
6589 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6590 }
6591 else
6592 {
6593 /* Reset the PATM stack. */
6594 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6595
6596 rc = VINF_SUCCESS; /* Continue at original instruction. */
6597 }
6598
6599 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6600 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6601 return rc;
6602 }
6603
6604#ifdef VBOX_STRICT
6605 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6606 {
6607 DISCPUSTATE cpu;
6608 bool disret;
6609 uint32_t cbInstr;
6610 PATMP2GLOOKUPREC cacheRec;
6611 RT_ZERO(cacheRec);
6612 cacheRec.pPatch = &pPatch->patch;
6613
6614 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6615 &cpu, &cbInstr);
6616 if (cacheRec.Lock.pvMap)
6617 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6618
6619 if (disret && cpu.pCurInstr->uOpcode == OP_RETN)
6620 {
6621 RTRCPTR retaddr;
6622 PCPUMCTX pCtx2;
6623
6624 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6625
6626 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6627 AssertRC(rc);
6628
6629 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6630 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6631 }
6632 }
6633#endif
6634
6635 /* Return original address, correct by subtracting the CS base address. */
6636 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6637
6638 /* Reset the PATM stack. */
6639 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6640
6641 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6642 {
6643 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6644 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6645#ifdef VBOX_STRICT
6646 DISCPUSTATE cpu;
6647 bool disret;
6648 uint32_t cbInstr;
6649 PATMP2GLOOKUPREC cacheRec;
6650 RT_ZERO(cacheRec);
6651 cacheRec.pPatch = &pPatch->patch;
6652
6653 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
6654 &cpu, &cbInstr);
6655 if (cacheRec.Lock.pvMap)
6656 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6657
6658 if (disret && (cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_INT3))
6659 {
6660 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6661 &cpu, &cbInstr);
6662 if (cacheRec.Lock.pvMap)
6663 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6664
6665 Assert(cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_IRET);
6666 }
6667#endif
6668 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6669 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6670 }
6671
6672 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6673 DBGFR3_DISAS_INSTR_LOG(pVCpu, pCtx->cs.Sel, pNewEip, "PATCHRET: ");
6674 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6675 {
6676 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6677 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6678 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6679 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6680 return VERR_PATCH_DISABLED;
6681 }
6682
6683#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6684 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6685 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6686 {
6687 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6688 //we are only wasting time, back out the patch
6689 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6690 pTrapRec->pNextPatchInstr = 0;
6691 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6692 return VERR_PATCH_DISABLED;
6693 }
6694#endif
6695
6696 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6697 return VINF_SUCCESS;
6698}
6699
6700
6701/**
6702 * Handle page-fault in monitored page
6703 *
6704 * @returns VBox status code.
6705 * @param pVM Pointer to the VM.
6706 */
6707VMMR3_INT_DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6708{
6709 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6710
6711 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6712 addr &= PAGE_BASE_GC_MASK;
6713
6714 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6715 AssertRC(rc); NOREF(rc);
6716
6717 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6718 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6719 {
6720 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6721 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6722 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6723 if (rc == VWRN_PATCH_REMOVED)
6724 return VINF_SUCCESS;
6725
6726 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6727
6728 if (addr == pPatchRec->patch.pPrivInstrGC)
6729 addr++;
6730 }
6731
6732 for(;;)
6733 {
6734 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6735
6736 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6737 break;
6738
6739 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6740 {
6741 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6742 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6743 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6744 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6745 }
6746 addr = pPatchRec->patch.pPrivInstrGC + 1;
6747 }
6748
6749 pVM->patm.s.pvFaultMonitor = 0;
6750 return VINF_SUCCESS;
6751}
6752
6753
6754#ifdef VBOX_WITH_STATISTICS
6755
6756static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6757{
6758 if (pPatch->flags & PATMFL_SYSENTER)
6759 {
6760 return "SYSENT";
6761 }
6762 else
6763 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6764 {
6765 static char szTrap[16];
6766 uint32_t iGate;
6767
6768 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6769 if (iGate < 256)
6770 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6771 else
6772 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6773 return szTrap;
6774 }
6775 else
6776 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6777 return "DUPFUNC";
6778 else
6779 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6780 return "FUNCCALL";
6781 else
6782 if (pPatch->flags & PATMFL_TRAMPOLINE)
6783 return "TRAMP";
6784 else
6785 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6786}
6787
6788static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6789{
6790 NOREF(pVM);
6791 switch(pPatch->uState)
6792 {
6793 case PATCH_ENABLED:
6794 return "ENA";
6795 case PATCH_DISABLED:
6796 return "DIS";
6797 case PATCH_DIRTY:
6798 return "DIR";
6799 case PATCH_UNUSABLE:
6800 return "UNU";
6801 case PATCH_REFUSED:
6802 return "REF";
6803 case PATCH_DISABLE_PENDING:
6804 return "DIP";
6805 default:
6806 AssertFailed();
6807 return " ";
6808 }
6809}
6810
6811/**
6812 * Resets the sample.
6813 * @param pVM Pointer to the VM.
6814 * @param pvSample The sample registered using STAMR3RegisterCallback.
6815 */
6816static void patmResetStat(PVM pVM, void *pvSample)
6817{
6818 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6819 Assert(pPatch);
6820
6821 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6822 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6823}
6824
6825/**
6826 * Prints the sample into the buffer.
6827 *
6828 * @param pVM Pointer to the VM.
6829 * @param pvSample The sample registered using STAMR3RegisterCallback.
6830 * @param pszBuf The buffer to print into.
6831 * @param cchBuf The size of the buffer.
6832 */
6833static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6834{
6835 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6836 Assert(pPatch);
6837
6838 Assert(pPatch->uState != PATCH_REFUSED);
6839 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6840
6841 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6842 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6843 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6844}
6845
6846/**
6847 * Returns the GC address of the corresponding patch statistics counter
6848 *
6849 * @returns Stat address
6850 * @param pVM Pointer to the VM.
6851 * @param pPatch Patch structure
6852 */
6853RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6854{
6855 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6856 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6857}
6858
6859#endif /* VBOX_WITH_STATISTICS */
6860#ifdef VBOX_WITH_DEBUGGER
6861
6862/**
6863 * The '.patmoff' command.
6864 *
6865 * @returns VBox status.
6866 * @param pCmd Pointer to the command descriptor (as registered).
6867 * @param pCmdHlp Pointer to command helper functions.
6868 * @param pVM Pointer to the current VM (if any).
6869 * @param paArgs Pointer to (readonly) array of arguments.
6870 * @param cArgs Number of arguments in the array.
6871 */
6872static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6873{
6874 /*
6875 * Validate input.
6876 */
6877 NOREF(cArgs); NOREF(paArgs);
6878 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6879 PVM pVM = pUVM->pVM;
6880 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6881
6882 if (HMIsEnabled(pVM))
6883 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6884
6885 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6886 PATMR3AllowPatching(pVM->pUVM, false);
6887 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6888}
6889
6890/**
6891 * The '.patmon' command.
6892 *
6893 * @returns VBox status.
6894 * @param pCmd Pointer to the command descriptor (as registered).
6895 * @param pCmdHlp Pointer to command helper functions.
6896 * @param pVM Pointer to the current VM (if any).
6897 * @param paArgs Pointer to (readonly) array of arguments.
6898 * @param cArgs Number of arguments in the array.
6899 */
6900static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6901{
6902 /*
6903 * Validate input.
6904 */
6905 NOREF(cArgs); NOREF(paArgs);
6906 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6907 PVM pVM = pUVM->pVM;
6908 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6909
6910 if (HMIsEnabled(pVM))
6911 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6912
6913 PATMR3AllowPatching(pVM->pUVM, true);
6914 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6915 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6916}
6917
6918#endif /* VBOX_WITH_DEBUGGER */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette