VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 55228

Last change on this file since 55228 was 54764, checked in by vboxsync, 10 years ago

Added an infix 'ASMFIX' to the PATMA.h fixup types used in the patch template code in PATMA.asm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 262.3 KB
Line 
1/* $Id: PATM.cpp 54764 2015-03-15 03:25:11Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * @note Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2014 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/cpum.h>
29#include <VBox/vmm/cpumdis.h>
30#include <VBox/vmm/iom.h>
31#include <VBox/vmm/mm.h>
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/hm.h>
34#include <VBox/vmm/ssm.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/cfgm.h>
37#include <VBox/param.h>
38#include <VBox/vmm/selm.h>
39#include <VBox/vmm/csam.h>
40#include <iprt/avl.h>
41#include "PATMInternal.h"
42#include "PATMPatch.h"
43#include <VBox/vmm/vm.h>
44#include <VBox/vmm/uvm.h>
45#include <VBox/dbg.h>
46#include <VBox/err.h>
47#include <VBox/log.h>
48#include <iprt/assert.h>
49#include <iprt/asm.h>
50#include <VBox/dis.h>
51#include <VBox/disopcode.h>
52#include "internal/pgm.h"
53
54#include <iprt/string.h>
55#include "PATMA.h"
56
57//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
58//#define PATM_DISABLE_ALL
59
60/**
61 * Refresh trampoline patch state.
62 */
63typedef struct PATMREFRESHPATCH
64{
65 /** Pointer to the VM structure. */
66 PVM pVM;
67 /** The trampoline patch record. */
68 PPATCHINFO pPatchTrampoline;
69 /** The new patch we want to jump to. */
70 PPATCHINFO pPatchRec;
71} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
72
73
74#define PATMREAD_RAWCODE 1 /* read code as-is */
75#define PATMREAD_ORGCODE 2 /* read original guest opcode bytes; not the patched bytes */
76#define PATMREAD_NOCHECK 4 /* don't check for patch conflicts */
77
78/*
79 * Private structure used during disassembly
80 */
81typedef struct
82{
83 PVM pVM;
84 PPATCHINFO pPatchInfo;
85 R3PTRTYPE(uint8_t *) pbInstrHC;
86 RTRCPTR pInstrGC;
87 uint32_t fReadFlags;
88} PATMDISASM, *PPATMDISASM;
89
90
91/*******************************************************************************
92* Internal Functions *
93*******************************************************************************/
94
95static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
96static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
97static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
98
99#ifdef LOG_ENABLED // keep gcc quiet
100static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
101#endif
102#ifdef VBOX_WITH_STATISTICS
103static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
104static void patmResetStat(PVM pVM, void *pvSample);
105static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
106#endif
107
108#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
109#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
110
111static int patmReinit(PVM pVM);
112static DECLCALLBACK(int) patmR3RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
113static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC);
114static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch);
115
116#ifdef VBOX_WITH_DEBUGGER
117static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
118static FNDBGCCMD patmr3CmdOn;
119static FNDBGCCMD patmr3CmdOff;
120
121/** Command descriptors. */
122static const DBGCCMD g_aCmds[] =
123{
124 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
125 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
126 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
127};
128#endif
129
130/* Don't want to break saved states, so put it here as a global variable. */
131static unsigned int cIDTHandlersDisabled = 0;
132
133/**
134 * Initializes the PATM.
135 *
136 * @returns VBox status code.
137 * @param pVM Pointer to the VM.
138 */
139VMMR3_INT_DECL(int) PATMR3Init(PVM pVM)
140{
141 int rc;
142
143 /*
144 * We only need a saved state dummy loader if HM is enabled.
145 */
146 if (HMIsEnabled(pVM))
147 {
148 pVM->fPATMEnabled = false;
149 return SSMR3RegisterStub(pVM, "PATM", 0);
150 }
151
152 /*
153 * Raw-mode.
154 */
155 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
156
157 /* These values can't change as they are hardcoded in patch code (old saved states!) */
158 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
159 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
160 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
161 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
162
163 AssertReleaseMsg(g_fPatmInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
164 ("Interrupt flags out of sync!! g_fPatmInterruptFlag=%#x expected %#x. broken assembler?\n", g_fPatmInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
165
166 /* Allocate patch memory and GC patch state memory. */
167 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
168 /* Add another page in case the generated code is much larger than expected. */
169 /** @todo bad safety precaution */
170 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
171 if (RT_FAILURE(rc))
172 {
173 Log(("MMHyperAlloc failed with %Rrc\n", rc));
174 return rc;
175 }
176 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
177
178 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
179 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
180 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
181
182 patmR3DbgInit(pVM);
183
184 /*
185 * Hypervisor memory for GC status data (read/write)
186 *
187 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
188 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
189 *
190 */
191 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
192 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
193 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
194
195 /* Hypervisor memory for patch statistics */
196 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
197 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
198
199 /* Memory for patch lookup trees. */
200 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
201 AssertRCReturn(rc, rc);
202 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
203
204#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
205 /* Check CFGM option. */
206 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
207 if (RT_FAILURE(rc))
208# ifdef PATM_DISABLE_ALL
209 pVM->fPATMEnabled = false;
210# else
211 pVM->fPATMEnabled = true;
212# endif
213#endif
214
215 rc = patmReinit(pVM);
216 AssertRC(rc);
217 if (RT_FAILURE(rc))
218 return rc;
219
220 /*
221 * Register save and load state notifiers.
222 */
223 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SAVED_STATE_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
224 NULL, NULL, NULL,
225 NULL, patmR3Save, NULL,
226 NULL, patmR3Load, NULL);
227 AssertRCReturn(rc, rc);
228
229#ifdef VBOX_WITH_DEBUGGER
230 /*
231 * Debugger commands.
232 */
233 static bool s_fRegisteredCmds = false;
234 if (!s_fRegisteredCmds)
235 {
236 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
237 if (RT_SUCCESS(rc2))
238 s_fRegisteredCmds = true;
239 }
240#endif
241
242#ifdef VBOX_WITH_STATISTICS
243 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
244 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
245 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
246 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
247 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
248 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
249 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
250 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
251
252 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
253 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
254
255 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
256 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
257 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
258
259 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
260 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
261 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
262 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
263 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
264
265 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
266 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
267
268 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
269 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
270
271 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
272 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
273 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
274
275 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
276 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
277 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
278
279 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
280 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
281
282 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
283 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
284 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
285 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
286
287 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
288 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
289
290 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
291 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
292
293 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
294 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
295 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
296
297 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
298 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
299 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
300 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
301
302 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
303 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
304 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
305 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
306 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
307
308 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
309#endif /* VBOX_WITH_STATISTICS */
310
311 Log(("g_patmCallRecord.cbFunction %u\n", g_patmCallRecord.cbFunction));
312 Log(("g_patmCallIndirectRecord.cbFunction %u\n", g_patmCallIndirectRecord.cbFunction));
313 Log(("g_patmRetRecord.cbFunction %u\n", g_patmRetRecord.cbFunction));
314 Log(("g_patmJumpIndirectRecord.cbFunction %u\n", g_patmJumpIndirectRecord.cbFunction));
315 Log(("g_patmPopf32Record.cbFunction %u\n", g_patmPopf32Record.cbFunction));
316 Log(("g_patmIretRecord.cbFunction %u\n", g_patmIretRecord.cbFunction));
317 Log(("g_patmStiRecord.cbFunction %u\n", g_patmStiRecord.cbFunction));
318 Log(("g_patmCheckIFRecord.cbFunction %u\n", g_patmCheckIFRecord.cbFunction));
319
320 return rc;
321}
322
323/**
324 * Finalizes HMA page attributes.
325 *
326 * @returns VBox status code.
327 * @param pVM Pointer to the VM.
328 */
329VMMR3_INT_DECL(int) PATMR3InitFinalize(PVM pVM)
330{
331 if (HMIsEnabled(pVM))
332 return VINF_SUCCESS;
333
334 /*
335 * The GC state, stack and statistics must be read/write for the guest
336 * (supervisor only of course).
337 *
338 * Remember, we run guest code at ring-1 and ring-2 levels, which are
339 * considered supervisor levels by the paging structures. We run the VMM
340 * in ring-0 with CR0.WP=0 and mapping all VMM structures as read-only
341 * pages. The following structures are exceptions and must be mapped with
342 * write access so the ring-1 and ring-2 code can modify them.
343 */
344 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
345 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the GCState accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
346
347 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
348 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the GCStack accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
349
350 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
351 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the stats struct accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
352
353 /*
354 * Find the patch helper segment so we can identify code running there as patch code.
355 */
356 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_PatchHlpBegin", &pVM->patm.s.pbPatchHelpersRC);
357 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to resolve g_PatchHlpBegin: %Rrc\n", rc), rc);
358 pVM->patm.s.pbPatchHelpersR3 = (uint8_t *)MMHyperRCToR3(pVM, pVM->patm.s.pbPatchHelpersRC);
359 AssertLogRelReturn(pVM->patm.s.pbPatchHelpersR3 != NULL, VERR_INTERNAL_ERROR_3);
360
361 RTRCPTR RCPtrEnd;
362 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_PatchHlpEnd", &RCPtrEnd);
363 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to resolve g_PatchHlpEnd: %Rrc\n", rc), rc);
364
365 pVM->patm.s.cbPatchHelpers = RCPtrEnd - pVM->patm.s.pbPatchHelpersRC;
366 AssertLogRelMsgReturn(pVM->patm.s.cbPatchHelpers < _128K,
367 ("%RRv-%RRv => %#x\n", pVM->patm.s.pbPatchHelpersRC, RCPtrEnd, pVM->patm.s.cbPatchHelpers),
368 VERR_INTERNAL_ERROR_4);
369 return rc;
370}
371
372/**
373 * (Re)initializes PATM
374 *
375 * @param pVM The VM.
376 */
377static int patmReinit(PVM pVM)
378{
379 int rc;
380
381 /*
382 * Assert alignment and sizes.
383 */
384 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
385 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
386
387 /*
388 * Setup any fixed pointers and offsets.
389 */
390 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
391
392#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
393#ifndef PATM_DISABLE_ALL
394 pVM->fPATMEnabled = true;
395#endif
396#endif
397
398 Assert(pVM->patm.s.pGCStateHC);
399 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
400 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
401
402 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
403 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
404
405 Assert(pVM->patm.s.pGCStackHC);
406 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
407 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
408 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
409 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
410
411 Assert(pVM->patm.s.pStatsHC);
412 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
413 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
414
415 Assert(pVM->patm.s.pPatchMemHC);
416 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
417 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
418 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
419
420 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
421 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
422
423 Assert(pVM->patm.s.PatchLookupTreeHC);
424 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
425
426 /*
427 * (Re)Initialize PATM structure
428 */
429 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
430 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
431 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
432 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
433 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
434 pVM->patm.s.pvFaultMonitor = 0;
435 pVM->patm.s.deltaReloc = 0;
436
437 /* Lowest and highest patched instruction */
438 pVM->patm.s.pPatchedInstrGCLowest = ~0;
439 pVM->patm.s.pPatchedInstrGCHighest = 0;
440
441 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
442 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
443 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
444
445 pVM->patm.s.pfnSysEnterPatchGC = 0;
446 pVM->patm.s.pfnSysEnterGC = 0;
447
448 pVM->patm.s.fOutOfMemory = false;
449
450 pVM->patm.s.pfnHelperCallGC = 0;
451 patmR3DbgReset(pVM);
452
453 /* Generate all global functions to be used by future patches. */
454 /* We generate a fake patch in order to use the existing code for relocation. */
455 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
456 if (RT_FAILURE(rc))
457 {
458 Log(("Out of memory!!!!\n"));
459 return VERR_NO_MEMORY;
460 }
461 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
462 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
463 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
464
465 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
466 AssertRC(rc);
467
468 /* Update free pointer in patch memory. */
469 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
470 /* Round to next 8 byte boundary. */
471 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
472
473
474 return rc;
475}
476
477
478/**
479 * Applies relocations to data and code managed by this
480 * component. This function will be called at init and
481 * whenever the VMM need to relocate it self inside the GC.
482 *
483 * The PATM will update the addresses used by the switcher.
484 *
485 * @param pVM The VM.
486 * @param offDelta The relocation delta.
487 */
488VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM, RTRCINTPTR offDelta)
489{
490 if (HMIsEnabled(pVM))
491 return;
492
493 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
494 Assert((RTRCINTPTR)(GCPtrNew - pVM->patm.s.pGCStateGC) == offDelta);
495
496 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, offDelta));
497 if (offDelta)
498 {
499 PCPUMCTX pCtx;
500
501 /* Update CPUMCTX guest context pointer. */
502 pVM->patm.s.pCPUMCtxGC += offDelta;
503
504 pVM->patm.s.deltaReloc = offDelta;
505 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmR3RelocatePatches, (void *)pVM);
506
507 pVM->patm.s.pGCStateGC = GCPtrNew;
508 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
509 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
510 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
511 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
512
513 if (pVM->patm.s.pfnSysEnterPatchGC)
514 pVM->patm.s.pfnSysEnterPatchGC += offDelta;
515
516 /* If we are running patch code right now, then also adjust EIP. */
517 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
518 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
519 pCtx->eip += offDelta;
520
521 /* Deal with the global patch functions. */
522 pVM->patm.s.pfnHelperCallGC += offDelta;
523 pVM->patm.s.pfnHelperRetGC += offDelta;
524 pVM->patm.s.pfnHelperIretGC += offDelta;
525 pVM->patm.s.pfnHelperJumpGC += offDelta;
526
527 pVM->patm.s.pbPatchHelpersRC += offDelta;
528
529 patmR3RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
530 }
531}
532
533
534/**
535 * Terminates the PATM.
536 *
537 * Termination means cleaning up and freeing all resources,
538 * the VM it self is at this point powered off or suspended.
539 *
540 * @returns VBox status code.
541 * @param pVM Pointer to the VM.
542 */
543VMMR3_INT_DECL(int) PATMR3Term(PVM pVM)
544{
545 if (HMIsEnabled(pVM))
546 return VINF_SUCCESS;
547
548 patmR3DbgTerm(pVM);
549
550 /* Memory was all allocated from the two MM heaps and requires no freeing. */
551 return VINF_SUCCESS;
552}
553
554
555/**
556 * PATM reset callback.
557 *
558 * @returns VBox status code.
559 * @param pVM The VM which is reset.
560 */
561VMMR3_INT_DECL(int) PATMR3Reset(PVM pVM)
562{
563 Log(("PATMR3Reset\n"));
564 if (HMIsEnabled(pVM))
565 return VINF_SUCCESS;
566
567 /* Free all patches. */
568 for (;;)
569 {
570 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
571 if (pPatchRec)
572 patmR3RemovePatch(pVM, pPatchRec, true);
573 else
574 break;
575 }
576 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
577 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
578 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
579 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
580
581 int rc = patmReinit(pVM);
582 if (RT_SUCCESS(rc))
583 rc = PATMR3InitFinalize(pVM); /* paranoia */
584
585 return rc;
586}
587
588/**
589 * @callback_method_impl{FNDISREADBYTES}
590 */
591static DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
592{
593 PATMDISASM *pDisInfo = (PATMDISASM *)pDis->pvUser;
594
595/** @todo change this to read more! */
596 /*
597 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
598 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
599 */
600 /** @todo could change in the future! */
601 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
602 {
603 size_t cbRead = cbMaxRead;
604 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
605 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
606 if (RT_SUCCESS(rc))
607 {
608 if (cbRead >= cbMinRead)
609 {
610 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
611 return VINF_SUCCESS;
612 }
613
614 cbMinRead -= (uint8_t)cbRead;
615 cbMaxRead -= (uint8_t)cbRead;
616 offInstr += (uint8_t)cbRead;
617 uSrcAddr += cbRead;
618 }
619
620#ifdef VBOX_STRICT
621 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
622 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
623 {
624 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr, NULL) == false);
625 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr + cbMinRead-1, NULL) == false);
626 }
627#endif
628 }
629
630 int rc = VINF_SUCCESS;
631 RTGCPTR32 uSrcAddr = (RTGCPTR32)pDis->uInstrAddr + offInstr;
632 if ( !pDisInfo->pbInstrHC
633 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(uSrcAddr + cbMinRead - 1)
634 && !PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr)))
635 {
636 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr));
637 rc = PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
638 offInstr += cbMinRead;
639 }
640 else
641 {
642 /*
643 * pbInstrHC is the base address; adjust according to the GC pointer.
644 *
645 * Try read the max number of bytes here. Since the disassembler only
646 * ever uses these bytes for the current instruction, it doesn't matter
647 * much if we accidentally read the start of the next instruction even
648 * if it happens to be a patch jump or int3.
649 */
650 uint8_t const *pbInstrHC = pDisInfo->pbInstrHC; AssertPtr(pbInstrHC);
651 pbInstrHC += uSrcAddr - pDisInfo->pInstrGC;
652
653 size_t cbMaxRead1 = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
654 size_t cbMaxRead2 = PAGE_SIZE - ((uintptr_t)pbInstrHC & PAGE_OFFSET_MASK);
655 size_t cbToRead = RT_MIN(cbMaxRead1, RT_MAX(cbMaxRead2, cbMinRead));
656 if (cbToRead > cbMaxRead)
657 cbToRead = cbMaxRead;
658
659 memcpy(&pDis->abInstr[offInstr], pbInstrHC, cbToRead);
660 offInstr += (uint8_t)cbToRead;
661 }
662
663 pDis->cbCachedInstr = offInstr;
664 return rc;
665}
666
667
668DECLINLINE(bool) patmR3DisInstrToStr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
669 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
670{
671 PATMDISASM disinfo;
672 disinfo.pVM = pVM;
673 disinfo.pPatchInfo = pPatch;
674 disinfo.pbInstrHC = pbInstrHC;
675 disinfo.pInstrGC = InstrGCPtr32;
676 disinfo.fReadFlags = fReadFlags;
677 return RT_SUCCESS(DISInstrToStrWithReader(InstrGCPtr32,
678 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
679 patmReadBytes, &disinfo,
680 pCpu, pcbInstr, pszOutput, cbOutput));
681}
682
683
684DECLINLINE(bool) patmR3DisInstr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
685 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
686{
687 PATMDISASM disinfo;
688 disinfo.pVM = pVM;
689 disinfo.pPatchInfo = pPatch;
690 disinfo.pbInstrHC = pbInstrHC;
691 disinfo.pInstrGC = InstrGCPtr32;
692 disinfo.fReadFlags = fReadFlags;
693 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32,
694 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
695 patmReadBytes, &disinfo,
696 pCpu, pcbInstr));
697}
698
699
700DECLINLINE(bool) patmR3DisInstrNoStrOpMode(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC,
701 uint32_t fReadFlags,
702 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
703{
704 PATMDISASM disinfo;
705 disinfo.pVM = pVM;
706 disinfo.pPatchInfo = pPatch;
707 disinfo.pbInstrHC = pbInstrHC;
708 disinfo.pInstrGC = InstrGCPtr32;
709 disinfo.fReadFlags = fReadFlags;
710 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32, pPatch->uOpMode, patmReadBytes, &disinfo,
711 pCpu, pcbInstr));
712}
713
714#ifdef LOG_ENABLED
715# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
716 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_ORGCODE, a_szComment, " patch:")
717# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
718 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_RAWCODE, a_szComment, " patch:")
719
720# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) \
721 do { \
722 if (LogIsEnabled()) \
723 patmLogRawPatchInstr(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2); \
724 } while (0)
725
726static void patmLogRawPatchInstr(PVM pVM, PPATCHINFO pPatch, uint32_t fFlags,
727 const char *pszComment1, const char *pszComment2)
728{
729 DISCPUSTATE DisState;
730 char szOutput[128];
731 szOutput[0] = '\0';
732 patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC, NULL, fFlags,
733 &DisState, NULL, szOutput, sizeof(szOutput));
734 Log(("%s%s %s", pszComment1, pszComment2, szOutput));
735}
736
737#else
738# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
739# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
740# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) do { } while (0)
741#endif
742
743
744/**
745 * Callback function for RTAvloU32DoWithAll
746 *
747 * Updates all fixups in the patches
748 *
749 * @returns VBox status code.
750 * @param pNode Current node
751 * @param pParam Pointer to the VM.
752 */
753static DECLCALLBACK(int) patmR3RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
754{
755 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
756 PVM pVM = (PVM)pParam;
757 RTRCINTPTR delta;
758 int rc;
759
760 /* Nothing to do if the patch is not active. */
761 if (pPatch->patch.uState == PATCH_REFUSED)
762 return 0;
763
764 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
765 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Org patch jump:", "");
766
767 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
768 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
769
770 /*
771 * Apply fixups.
772 */
773 AVLPVKEY key = NULL;
774 for (;;)
775 {
776 /* Get the record that's closest from above (after or equal to key). */
777 PRELOCREC pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
778 if (!pRec)
779 break;
780
781 key = (uint8_t *)pRec->Core.Key + 1; /* search for the next record during the next round. */
782
783 switch (pRec->uType)
784 {
785 case FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL:
786 Assert(pRec->pDest == pRec->pSource); Assert(PATM_IS_ASMFIX(pRec->pSource));
787 Log(("Absolute patch template fixup type %#x at %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
788 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
789 break;
790
791 case FIXUP_ABSOLUTE:
792 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
793 if ( !pRec->pSource
794 || PATMIsPatchGCAddr(pVM, pRec->pSource))
795 {
796 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
797 }
798 else
799 {
800 uint8_t curInstr[15];
801 uint8_t oldInstr[15];
802 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
803
804 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
805
806 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
807 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
808
809 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
810 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
811
812 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
813
814 if ( rc == VERR_PAGE_NOT_PRESENT
815 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
816 {
817 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
818
819 Log(("PATM: Patch page not present -> check later!\n"));
820 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
821 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
822 }
823 else
824 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
825 {
826 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
827 /*
828 * Disable patch; this is not a good solution
829 */
830 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
831 pPatch->patch.uState = PATCH_DISABLED;
832 }
833 else
834 if (RT_SUCCESS(rc))
835 {
836 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
837 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
838 AssertRC(rc);
839 }
840 }
841 break;
842
843 case FIXUP_REL_JMPTOPATCH:
844 {
845 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
846
847 if ( pPatch->patch.uState == PATCH_ENABLED
848 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
849 {
850 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
851 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
852 RTRCPTR pJumpOffGC;
853 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
854 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
855
856#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
857 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
858#else
859 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
860#endif
861
862 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
863#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
864 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
865 {
866 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
867
868 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
869 oldJump[0] = pPatch->patch.aPrivInstr[0];
870 oldJump[1] = pPatch->patch.aPrivInstr[1];
871 *(RTRCUINTPTR *)&oldJump[2] = displOld;
872 }
873 else
874#endif
875 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
876 {
877 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
878 oldJump[0] = 0xE9;
879 *(RTRCUINTPTR *)&oldJump[1] = displOld;
880 }
881 else
882 {
883 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
884 continue; //this should never happen!!
885 }
886 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
887
888 /*
889 * Read old patch jump and compare it to the one we previously installed
890 */
891 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
892 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
893
894 if ( rc == VERR_PAGE_NOT_PRESENT
895 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
896 {
897 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
898
899 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
900 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
901 }
902 else
903 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
904 {
905 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
906 /*
907 * Disable patch; this is not a good solution
908 */
909 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
910 pPatch->patch.uState = PATCH_DISABLED;
911 }
912 else
913 if (RT_SUCCESS(rc))
914 {
915 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
916 AssertRC(rc);
917 }
918 else
919 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
920 }
921 else
922 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
923
924 pRec->pDest = pTarget;
925 break;
926 }
927
928 case FIXUP_REL_JMPTOGUEST:
929 {
930 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
931 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
932
933 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
934 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
935 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
936 pRec->pSource = pSource;
937 break;
938 }
939
940 case FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL:
941 case FIXUP_CONSTANT_IN_PATCH_ASM_TMPL:
942 /* Only applicable when loading state. */
943 Assert(pRec->pDest == pRec->pSource);
944 Assert(PATM_IS_ASMFIX(pRec->pSource));
945 break;
946
947 default:
948 AssertMsg(0, ("Invalid fixup type!!\n"));
949 return VERR_INVALID_PARAMETER;
950 }
951 }
952
953 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
954 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Rel patch jump:", "");
955 return 0;
956}
957
958/**
959 * \#PF Handler callback for virtual access handler ranges.
960 *
961 * Important to realize that a physical page in a range can have aliases, and
962 * for ALL and WRITE handlers these will also trigger.
963 *
964 * @returns VINF_SUCCESS if the handler have carried out the operation.
965 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
966 * @param pVM Pointer to the VM.
967 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
968 * @param pvPtr The HC mapping of that address.
969 * @param pvBuf What the guest is reading/writing.
970 * @param cbBuf How much it's reading/writing.
971 * @param enmAccessType The access type.
972 * @param pvUser User argument.
973 */
974DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
975 PGMACCESSTYPE enmAccessType, void *pvUser)
976{
977 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
978 NOREF(pvPtr); NOREF(pvBuf); NOREF(cbBuf); NOREF(pvUser);
979
980 /** @todo could be the wrong virtual address (alias) */
981 pVM->patm.s.pvFaultMonitor = GCPtr;
982 PATMR3HandleMonitoredPage(pVM);
983 return VINF_PGM_HANDLER_DO_DEFAULT;
984}
985
986#ifdef VBOX_WITH_DEBUGGER
987
988/**
989 * Callback function for RTAvloU32DoWithAll
990 *
991 * Enables the patch that's being enumerated
992 *
993 * @returns 0 (continue enumeration).
994 * @param pNode Current node
995 * @param pVM Pointer to the VM.
996 */
997static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
998{
999 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
1000
1001 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
1002 return 0;
1003}
1004
1005
1006/**
1007 * Callback function for RTAvloU32DoWithAll
1008 *
1009 * Disables the patch that's being enumerated
1010 *
1011 * @returns 0 (continue enumeration).
1012 * @param pNode Current node
1013 * @param pVM Pointer to the VM.
1014 */
1015static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
1016{
1017 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
1018
1019 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
1020 return 0;
1021}
1022
1023#endif /* VBOX_WITH_DEBUGGER */
1024
1025/**
1026 * Returns the host context pointer of the GC context structure
1027 *
1028 * @returns VBox status code.
1029 * @param pVM Pointer to the VM.
1030 */
1031VMMR3_INT_DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
1032{
1033 AssertReturn(!HMIsEnabled(pVM), NULL);
1034 return pVM->patm.s.pGCStateHC;
1035}
1036
1037
1038/**
1039 * Allows or disallow patching of privileged instructions executed by the guest OS
1040 *
1041 * @returns VBox status code.
1042 * @param pUVM The user mode VM handle.
1043 * @param fAllowPatching Allow/disallow patching
1044 */
1045VMMR3DECL(int) PATMR3AllowPatching(PUVM pUVM, bool fAllowPatching)
1046{
1047 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1048 PVM pVM = pUVM->pVM;
1049 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1050
1051 if (!HMIsEnabled(pVM))
1052 pVM->fPATMEnabled = fAllowPatching;
1053 else
1054 Assert(!pVM->fPATMEnabled);
1055 return VINF_SUCCESS;
1056}
1057
1058
1059/**
1060 * Checks if the patch manager is enabled or not.
1061 *
1062 * @returns true if enabled, false if not (or if invalid handle).
1063 * @param pUVM The user mode VM handle.
1064 */
1065VMMR3DECL(bool) PATMR3IsEnabled(PUVM pUVM)
1066{
1067 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1068 PVM pVM = pUVM->pVM;
1069 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1070 return PATMIsEnabled(pVM);
1071}
1072
1073
1074/**
1075 * Convert a GC patch block pointer to a HC patch pointer
1076 *
1077 * @returns HC pointer or NULL if it's not a GC patch pointer
1078 * @param pVM Pointer to the VM.
1079 * @param pAddrGC GC pointer
1080 */
1081VMMR3_INT_DECL(void *) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
1082{
1083 AssertReturn(!HMIsEnabled(pVM), NULL);
1084 RTRCUINTPTR offPatch = (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC;
1085 if (offPatch >= pVM->patm.s.cbPatchMem)
1086 {
1087 offPatch = (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC;
1088 if (offPatch >= pVM->patm.s.cbPatchHelpers)
1089 return NULL;
1090 return pVM->patm.s.pbPatchHelpersR3 + offPatch;
1091 }
1092 return pVM->patm.s.pPatchMemHC + offPatch;
1093}
1094
1095
1096/**
1097 * Convert guest context address to host context pointer
1098 *
1099 * @returns VBox status code.
1100 * @param pVM Pointer to the VM.
1101 * @param pCacheRec Address conversion cache record
1102 * @param pGCPtr Guest context pointer
1103 *
1104 * @returns Host context pointer or NULL in case of an error
1105 *
1106 */
1107R3PTRTYPE(uint8_t *) patmR3GCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
1108{
1109 int rc;
1110 R3PTRTYPE(uint8_t *) pHCPtr;
1111 uint32_t offset;
1112
1113 offset = (RTRCUINTPTR)pGCPtr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC;
1114 if (offset < pVM->patm.s.cbPatchMem)
1115 {
1116#ifdef VBOX_STRICT
1117 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1118 Assert(pPatch); Assert(offset - pPatch->pPatchBlockOffset < pPatch->cbPatchBlockSize);
1119#endif
1120 return pVM->patm.s.pPatchMemHC + offset;
1121 }
1122 /* Note! We're _not_ including the patch helpers here. */
1123
1124 offset = pGCPtr & PAGE_OFFSET_MASK;
1125 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1126 return pCacheRec->pPageLocStartHC + offset;
1127
1128 /* Release previous lock if any. */
1129 if (pCacheRec->Lock.pvMap)
1130 {
1131 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1132 pCacheRec->Lock.pvMap = NULL;
1133 }
1134
1135 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1136 if (rc != VINF_SUCCESS)
1137 {
1138 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1139 return NULL;
1140 }
1141 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1142 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1143 return pHCPtr;
1144}
1145
1146
1147/**
1148 * Calculates and fills in all branch targets
1149 *
1150 * @returns VBox status code.
1151 * @param pVM Pointer to the VM.
1152 * @param pPatch Current patch block pointer
1153 *
1154 */
1155static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1156{
1157 int32_t displ;
1158
1159 PJUMPREC pRec = 0;
1160 unsigned nrJumpRecs = 0;
1161
1162 /*
1163 * Set all branch targets inside the patch block.
1164 * We remove all jump records as they are no longer needed afterwards.
1165 */
1166 while (true)
1167 {
1168 RCPTRTYPE(uint8_t *) pInstrGC;
1169 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1170
1171 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1172 if (pRec == 0)
1173 break;
1174
1175 nrJumpRecs++;
1176
1177 /* HC in patch block to GC in patch block. */
1178 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1179
1180 if (pRec->opcode == OP_CALL)
1181 {
1182 /* Special case: call function replacement patch from this patch block.
1183 */
1184 PPATMPATCHREC pFunctionRec = patmQueryFunctionPatch(pVM, pRec->pTargetGC);
1185 if (!pFunctionRec)
1186 {
1187 int rc;
1188
1189 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1190 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1191 else
1192 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1193
1194 if (RT_FAILURE(rc))
1195 {
1196 uint8_t *pPatchHC;
1197 RTRCPTR pPatchGC;
1198 RTRCPTR pOrgInstrGC;
1199
1200 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1201 Assert(pOrgInstrGC);
1202
1203 /* Failure for some reason -> mark exit point with int 3. */
1204 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1205
1206 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1207 Assert(pPatchGC);
1208
1209 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1210
1211 /* Set a breakpoint at the very beginning of the recompiled instruction */
1212 *pPatchHC = 0xCC;
1213
1214 continue;
1215 }
1216 }
1217 else
1218 {
1219 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1220 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1221 }
1222
1223 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1224 }
1225 else
1226 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1227
1228 if (pBranchTargetGC == 0)
1229 {
1230 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1231 return VERR_PATCHING_REFUSED;
1232 }
1233 /* Our jumps *always* have a dword displacement (to make things easier). */
1234 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1235 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1236 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1237 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1238 }
1239 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1240 Assert(pPatch->JumpTree == 0);
1241 return VINF_SUCCESS;
1242}
1243
1244/**
1245 * Add an illegal instruction record
1246 *
1247 * @param pVM Pointer to the VM.
1248 * @param pPatch Patch structure ptr
1249 * @param pInstrGC Guest context pointer to privileged instruction
1250 *
1251 */
1252static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1253{
1254 PAVLPVNODECORE pRec;
1255
1256 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1257 Assert(pRec);
1258 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1259
1260 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1261 Assert(ret); NOREF(ret);
1262 pPatch->pTempInfo->nrIllegalInstr++;
1263}
1264
1265static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1266{
1267 PAVLPVNODECORE pRec;
1268
1269 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1270 if (pRec)
1271 return true;
1272 else
1273 return false;
1274}
1275
1276/**
1277 * Add a patch to guest lookup record
1278 *
1279 * @param pVM Pointer to the VM.
1280 * @param pPatch Patch structure ptr
1281 * @param pPatchInstrHC Guest context pointer to patch block
1282 * @param pInstrGC Guest context pointer to privileged instruction
1283 * @param enmType Lookup type
1284 * @param fDirty Dirty flag
1285 *
1286 * @note Be extremely careful with this function. Make absolutely sure the guest
1287 * address is correct! (to avoid executing instructions twice!)
1288 */
1289void patmR3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1290{
1291 bool ret;
1292 PRECPATCHTOGUEST pPatchToGuestRec;
1293 PRECGUESTTOPATCH pGuestToPatchRec;
1294 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1295
1296 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1297 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1298
1299 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1300 {
1301 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1302 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1303 return; /* already there */
1304
1305 Assert(!pPatchToGuestRec);
1306 }
1307#ifdef VBOX_STRICT
1308 else
1309 {
1310 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1311 Assert(!pPatchToGuestRec);
1312 }
1313#endif
1314
1315 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1316 Assert(pPatchToGuestRec);
1317 pPatchToGuestRec->Core.Key = PatchOffset;
1318 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1319 pPatchToGuestRec->enmType = enmType;
1320 pPatchToGuestRec->fDirty = fDirty;
1321
1322 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1323 Assert(ret);
1324
1325 /* GC to patch address */
1326 if (enmType == PATM_LOOKUP_BOTHDIR)
1327 {
1328 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1329 if (!pGuestToPatchRec)
1330 {
1331 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1332 pGuestToPatchRec->Core.Key = pInstrGC;
1333 pGuestToPatchRec->PatchOffset = PatchOffset;
1334
1335 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1336 Assert(ret);
1337 }
1338 }
1339
1340 pPatch->nrPatch2GuestRecs++;
1341}
1342
1343
1344/**
1345 * Removes a patch to guest lookup record
1346 *
1347 * @param pVM Pointer to the VM.
1348 * @param pPatch Patch structure ptr
1349 * @param pPatchInstrGC Guest context pointer to patch block
1350 */
1351void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1352{
1353 PAVLU32NODECORE pNode;
1354 PAVLU32NODECORE pNode2;
1355 PRECPATCHTOGUEST pPatchToGuestRec;
1356 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1357
1358 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1359 Assert(pPatchToGuestRec);
1360 if (pPatchToGuestRec)
1361 {
1362 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1363 {
1364 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1365
1366 Assert(pGuestToPatchRec->Core.Key);
1367 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1368 Assert(pNode2);
1369 }
1370 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1371 Assert(pNode);
1372
1373 MMR3HeapFree(pPatchToGuestRec);
1374 pPatch->nrPatch2GuestRecs--;
1375 }
1376}
1377
1378
1379/**
1380 * RTAvlPVDestroy callback.
1381 */
1382static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1383{
1384 MMR3HeapFree(pNode);
1385 return 0;
1386}
1387
1388/**
1389 * Empty the specified tree (PV tree, MMR3 heap)
1390 *
1391 * @param pVM Pointer to the VM.
1392 * @param ppTree Tree to empty
1393 */
1394static void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1395{
1396 NOREF(pVM);
1397 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1398}
1399
1400
1401/**
1402 * RTAvlU32Destroy callback.
1403 */
1404static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1405{
1406 MMR3HeapFree(pNode);
1407 return 0;
1408}
1409
1410/**
1411 * Empty the specified tree (U32 tree, MMR3 heap)
1412 *
1413 * @param pVM Pointer to the VM.
1414 * @param ppTree Tree to empty
1415 */
1416static void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1417{
1418 NOREF(pVM);
1419 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1420}
1421
1422
1423/**
1424 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1425 *
1426 * @returns VBox status code.
1427 * @param pVM Pointer to the VM.
1428 * @param pCpu CPU disassembly state
1429 * @param pInstrGC Guest context pointer to privileged instruction
1430 * @param pCurInstrGC Guest context pointer to the current instruction
1431 * @param pCacheRec Cache record ptr
1432 *
1433 */
1434static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1435{
1436 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1437 bool fIllegalInstr = false;
1438
1439 /*
1440 * Preliminary heuristics:
1441 *- no call instructions without a fixed displacement between cli and sti/popf
1442 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1443 *- no nested pushf/cli
1444 *- sti/popf should be the (eventual) target of all branches
1445 *- no near or far returns; no int xx, no into
1446 *
1447 * Note: Later on we can impose less stricter guidelines if the need arises
1448 */
1449
1450 /* Bail out if the patch gets too big. */
1451 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1452 {
1453 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1454 fIllegalInstr = true;
1455 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1456 }
1457 else
1458 {
1459 /* No unconditional jumps or calls without fixed displacements. */
1460 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1461 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1462 )
1463 {
1464 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1465 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1466 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1467 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1468 )
1469 {
1470 fIllegalInstr = true;
1471 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1472 }
1473 }
1474
1475 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1476 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->uOpcode == OP_JMP)
1477 {
1478 if ( pCurInstrGC > pPatch->pPrivInstrGC
1479 && pCurInstrGC + pCpu->cbInstr < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1480 {
1481 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1482 /* We turn this one into a int 3 callable patch. */
1483 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1484 }
1485 }
1486 else
1487 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1488 if (pPatch->opcode == OP_PUSHF)
1489 {
1490 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->uOpcode == OP_PUSHF)
1491 {
1492 fIllegalInstr = true;
1493 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1494 }
1495 }
1496
1497 /* no far returns */
1498 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1499 {
1500 pPatch->pTempInfo->nrRetInstr++;
1501 fIllegalInstr = true;
1502 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1503 }
1504 else if ( pCpu->pCurInstr->uOpcode == OP_INT3
1505 || pCpu->pCurInstr->uOpcode == OP_INT
1506 || pCpu->pCurInstr->uOpcode == OP_INTO)
1507 {
1508 /* No int xx or into either. */
1509 fIllegalInstr = true;
1510 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1511 }
1512 }
1513
1514 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1515
1516 /* Illegal instruction -> end of analysis phase for this code block */
1517 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1518 return VINF_SUCCESS;
1519
1520 /* Check for exit points. */
1521 switch (pCpu->pCurInstr->uOpcode)
1522 {
1523 case OP_SYSEXIT:
1524 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1525
1526 case OP_SYSENTER:
1527 case OP_ILLUD2:
1528 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1529 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1530 return VINF_SUCCESS;
1531
1532 case OP_STI:
1533 case OP_POPF:
1534 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1535 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1536 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1537 {
1538 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1539 return VERR_PATCHING_REFUSED;
1540 }
1541 if (pPatch->opcode == OP_PUSHF)
1542 {
1543 if (pCpu->pCurInstr->uOpcode == OP_POPF)
1544 {
1545 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1546 return VINF_SUCCESS;
1547
1548 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1549 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1550 pPatch->flags |= PATMFL_CHECK_SIZE;
1551 }
1552 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1553 }
1554 /* else: fall through. */
1555 case OP_RETN: /* exit point for function replacement */
1556 return VINF_SUCCESS;
1557
1558 case OP_IRET:
1559 return VINF_SUCCESS; /* exitpoint */
1560
1561 case OP_CPUID:
1562 case OP_CALL:
1563 case OP_JMP:
1564 break;
1565
1566#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1567 case OP_STR:
1568 break;
1569#endif
1570
1571 default:
1572 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1573 {
1574 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1575 return VINF_SUCCESS; /* exit point */
1576 }
1577 break;
1578 }
1579
1580 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1581 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW))
1582 {
1583 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1584 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->cbInstr));
1585 return VINF_SUCCESS;
1586 }
1587
1588 return VWRN_CONTINUE_ANALYSIS;
1589}
1590
1591/**
1592 * Analyses the instructions inside a function for compliance
1593 *
1594 * @returns VBox status code.
1595 * @param pVM Pointer to the VM.
1596 * @param pCpu CPU disassembly state
1597 * @param pInstrGC Guest context pointer to privileged instruction
1598 * @param pCurInstrGC Guest context pointer to the current instruction
1599 * @param pCacheRec Cache record ptr
1600 *
1601 */
1602static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1603{
1604 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1605 bool fIllegalInstr = false;
1606 NOREF(pInstrGC);
1607
1608 //Preliminary heuristics:
1609 //- no call instructions
1610 //- ret ends a block
1611
1612 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1613
1614 // bail out if the patch gets too big
1615 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1616 {
1617 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1618 fIllegalInstr = true;
1619 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1620 }
1621 else
1622 {
1623 // no unconditional jumps or calls without fixed displacements
1624 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1625 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1626 )
1627 {
1628 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1629 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1630 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1631 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1632 )
1633 {
1634 fIllegalInstr = true;
1635 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1636 }
1637 }
1638 else /* no far returns */
1639 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1640 {
1641 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1642 fIllegalInstr = true;
1643 }
1644 else /* no int xx or into either */
1645 if (pCpu->pCurInstr->uOpcode == OP_INT3 || pCpu->pCurInstr->uOpcode == OP_INT || pCpu->pCurInstr->uOpcode == OP_INTO)
1646 {
1647 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1648 fIllegalInstr = true;
1649 }
1650
1651 #if 0
1652 ///@todo we can handle certain in/out and privileged instructions in the guest context
1653 if (pCpu->pCurInstr->fOpType & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr->uOpcode != OP_STI)
1654 {
1655 Log(("Illegal instructions for function patch!!\n"));
1656 return VERR_PATCHING_REFUSED;
1657 }
1658 #endif
1659 }
1660
1661 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1662
1663 /* Illegal instruction -> end of analysis phase for this code block */
1664 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1665 {
1666 return VINF_SUCCESS;
1667 }
1668
1669 // Check for exit points
1670 switch (pCpu->pCurInstr->uOpcode)
1671 {
1672 case OP_ILLUD2:
1673 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1674 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1675 return VINF_SUCCESS;
1676
1677 case OP_IRET:
1678 case OP_SYSEXIT: /* will fault or emulated in GC */
1679 case OP_RETN:
1680 return VINF_SUCCESS;
1681
1682#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1683 case OP_STR:
1684 break;
1685#endif
1686
1687 case OP_POPF:
1688 case OP_STI:
1689 return VWRN_CONTINUE_ANALYSIS;
1690 default:
1691 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1692 {
1693 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1694 return VINF_SUCCESS; /* exit point */
1695 }
1696 return VWRN_CONTINUE_ANALYSIS;
1697 }
1698
1699 return VWRN_CONTINUE_ANALYSIS;
1700}
1701
1702/**
1703 * Recompiles the instructions in a code block
1704 *
1705 * @returns VBox status code.
1706 * @param pVM Pointer to the VM.
1707 * @param pCpu CPU disassembly state
1708 * @param pInstrGC Guest context pointer to privileged instruction
1709 * @param pCurInstrGC Guest context pointer to the current instruction
1710 * @param pCacheRec Cache record ptr
1711 *
1712 */
1713static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1714{
1715 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1716 int rc = VINF_SUCCESS;
1717 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1718
1719 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1720
1721 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1722 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1723 {
1724 /*
1725 * Been there, done that; so insert a jump (we don't want to duplicate code)
1726 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1727 */
1728 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1729 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1730 }
1731
1732 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1733 {
1734 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1735 }
1736 else
1737 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1738
1739 if (RT_FAILURE(rc))
1740 return rc;
1741
1742 /* Note: Never do a direct return unless a failure is encountered! */
1743
1744 /* Clear recompilation of next instruction flag; we are doing that right here. */
1745 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1746 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1747
1748 /* Add lookup record for patch to guest address translation */
1749 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1750
1751 /* Update lowest and highest instruction address for this patch */
1752 if (pCurInstrGC < pPatch->pInstrGCLowest)
1753 pPatch->pInstrGCLowest = pCurInstrGC;
1754 else
1755 if (pCurInstrGC > pPatch->pInstrGCHighest)
1756 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->cbInstr;
1757
1758 /* Illegal instruction -> end of recompile phase for this code block. */
1759 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1760 {
1761 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1762 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1763 goto end;
1764 }
1765
1766 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1767 * Indirect calls are handled below.
1768 */
1769 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1770 && (pCpu->pCurInstr->uOpcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1771 && (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J))
1772 {
1773 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1774 if (pTargetGC == 0)
1775 {
1776 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
1777 return VERR_PATCHING_REFUSED;
1778 }
1779
1780 if (pCpu->pCurInstr->uOpcode == OP_CALL)
1781 {
1782 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1783 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1784 if (RT_FAILURE(rc))
1785 goto end;
1786 }
1787 else
1788 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->uOpcode, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1789
1790 if (RT_SUCCESS(rc))
1791 rc = VWRN_CONTINUE_RECOMPILE;
1792
1793 goto end;
1794 }
1795
1796 switch (pCpu->pCurInstr->uOpcode)
1797 {
1798 case OP_CLI:
1799 {
1800 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1801 * until we've found the proper exit point(s).
1802 */
1803 if ( pCurInstrGC != pInstrGC
1804 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1805 )
1806 {
1807 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1808 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1809 }
1810 /* Set by irq inhibition; no longer valid now. */
1811 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1812
1813 rc = patmPatchGenCli(pVM, pPatch);
1814 if (RT_SUCCESS(rc))
1815 rc = VWRN_CONTINUE_RECOMPILE;
1816 break;
1817 }
1818
1819 case OP_MOV:
1820 if (pCpu->pCurInstr->fOpType & DISOPTYPE_POTENTIALLY_DANGEROUS)
1821 {
1822 /* mov ss, src? */
1823 if ( (pCpu->Param1.fUse & DISUSE_REG_SEG)
1824 && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS))
1825 {
1826 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1827 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1828 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1829 }
1830#if 0 /* necessary for Haiku */
1831 else
1832 if ( (pCpu->Param2.fUse & DISUSE_REG_SEG)
1833 && (pCpu->Param2.Base.idxSegReg == USE_REG_SS)
1834 && (pCpu->Param1.fUse & (DISUSE_REG_GEN32|DISUSE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1835 {
1836 /* mov GPR, ss */
1837 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1838 if (RT_SUCCESS(rc))
1839 rc = VWRN_CONTINUE_RECOMPILE;
1840 break;
1841 }
1842#endif
1843 }
1844 goto duplicate_instr;
1845
1846 case OP_POP:
1847 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1848 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS)
1849 {
1850 Assert(pCpu->pCurInstr->fOpType & DISOPTYPE_INHIBIT_IRQS);
1851
1852 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1853 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1854 }
1855 goto duplicate_instr;
1856
1857 case OP_STI:
1858 {
1859 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1860
1861 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1862 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1863 {
1864 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1865 fInhibitIRQInstr = true;
1866 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1867 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1868 }
1869 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1870
1871 if (RT_SUCCESS(rc))
1872 {
1873 DISCPUSTATE cpu = *pCpu;
1874 unsigned cbInstr;
1875 int disret;
1876 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1877
1878 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1879
1880 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1881 { /* Force pNextInstrHC out of scope after using it */
1882 uint8_t *pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1883 if (pNextInstrHC == NULL)
1884 {
1885 AssertFailed();
1886 return VERR_PATCHING_REFUSED;
1887 }
1888
1889 // Disassemble the next instruction
1890 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
1891 }
1892 if (disret == false)
1893 {
1894 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1895 return VERR_PATCHING_REFUSED;
1896 }
1897 pReturnInstrGC = pNextInstrGC + cbInstr;
1898
1899 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1900 || pReturnInstrGC <= pInstrGC
1901 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1902 )
1903 {
1904 /* Not an exit point for function duplication patches */
1905 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1906 && RT_SUCCESS(rc))
1907 {
1908 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1909 rc = VWRN_CONTINUE_RECOMPILE;
1910 }
1911 else
1912 rc = VINF_SUCCESS; //exit point
1913 }
1914 else {
1915 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1916 rc = VERR_PATCHING_REFUSED; //not allowed!!
1917 }
1918 }
1919 break;
1920 }
1921
1922 case OP_POPF:
1923 {
1924 bool fGenerateJmpBack = (pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32);
1925
1926 /* Not an exit point for IDT handler or function replacement patches */
1927 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1928 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1929 fGenerateJmpBack = false;
1930
1931 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->cbInstr, !!(pCpu->fPrefix & DISPREFIX_OPSIZE), fGenerateJmpBack);
1932 if (RT_SUCCESS(rc))
1933 {
1934 if (fGenerateJmpBack == false)
1935 {
1936 /* Not an exit point for IDT handler or function replacement patches */
1937 rc = VWRN_CONTINUE_RECOMPILE;
1938 }
1939 else
1940 {
1941 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1942 rc = VINF_SUCCESS; /* exit point! */
1943 }
1944 }
1945 break;
1946 }
1947
1948 case OP_PUSHF:
1949 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1950 if (RT_SUCCESS(rc))
1951 rc = VWRN_CONTINUE_RECOMPILE;
1952 break;
1953
1954 case OP_PUSH:
1955 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1956 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS)
1957 {
1958 rc = patmPatchGenPushCS(pVM, pPatch);
1959 if (RT_SUCCESS(rc))
1960 rc = VWRN_CONTINUE_RECOMPILE;
1961 break;
1962 }
1963 goto duplicate_instr;
1964
1965 case OP_IRET:
1966 Log(("IRET at %RRv\n", pCurInstrGC));
1967 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1968 if (RT_SUCCESS(rc))
1969 {
1970 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1971 rc = VINF_SUCCESS; /* exit point by definition */
1972 }
1973 break;
1974
1975 case OP_ILLUD2:
1976 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1977 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1978 if (RT_SUCCESS(rc))
1979 rc = VINF_SUCCESS; /* exit point by definition */
1980 Log(("Illegal opcode (0xf 0xb)\n"));
1981 break;
1982
1983 case OP_CPUID:
1984 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1985 if (RT_SUCCESS(rc))
1986 rc = VWRN_CONTINUE_RECOMPILE;
1987 break;
1988
1989 case OP_STR:
1990#ifdef VBOX_WITH_SAFE_STR /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table and move OP_STR into #ifndef */
1991 /* Now safe because our shadow TR entry is identical to the guest's. */
1992 goto duplicate_instr;
1993#endif
1994 case OP_SLDT:
1995 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1996 if (RT_SUCCESS(rc))
1997 rc = VWRN_CONTINUE_RECOMPILE;
1998 break;
1999
2000 case OP_SGDT:
2001 case OP_SIDT:
2002 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
2003 if (RT_SUCCESS(rc))
2004 rc = VWRN_CONTINUE_RECOMPILE;
2005 break;
2006
2007 case OP_RETN:
2008 /* retn is an exit point for function patches */
2009 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
2010 if (RT_SUCCESS(rc))
2011 rc = VINF_SUCCESS; /* exit point by definition */
2012 break;
2013
2014 case OP_SYSEXIT:
2015 /* Duplicate it, so it can be emulated in GC (or fault). */
2016 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2017 if (RT_SUCCESS(rc))
2018 rc = VINF_SUCCESS; /* exit point by definition */
2019 break;
2020
2021 case OP_CALL:
2022 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2023 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2024 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2025 */
2026 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2027 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far calls! */)
2028 {
2029 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
2030 if (RT_SUCCESS(rc))
2031 {
2032 rc = VWRN_CONTINUE_RECOMPILE;
2033 }
2034 break;
2035 }
2036 goto gen_illegal_instr;
2037
2038 case OP_JMP:
2039 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2040 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2041 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2042 */
2043 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2044 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far jumps! */)
2045 {
2046 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
2047 if (RT_SUCCESS(rc))
2048 rc = VINF_SUCCESS; /* end of branch */
2049 break;
2050 }
2051 goto gen_illegal_instr;
2052
2053 case OP_INT3:
2054 case OP_INT:
2055 case OP_INTO:
2056 goto gen_illegal_instr;
2057
2058 case OP_MOV_DR:
2059 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2060 if (pCpu->pCurInstr->fParam2 == OP_PARM_Dd)
2061 {
2062 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
2063 if (RT_SUCCESS(rc))
2064 rc = VWRN_CONTINUE_RECOMPILE;
2065 break;
2066 }
2067 goto duplicate_instr;
2068
2069 case OP_MOV_CR:
2070 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2071 if (pCpu->pCurInstr->fParam2 == OP_PARM_Cd)
2072 {
2073 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
2074 if (RT_SUCCESS(rc))
2075 rc = VWRN_CONTINUE_RECOMPILE;
2076 break;
2077 }
2078 goto duplicate_instr;
2079
2080 default:
2081 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW | DISOPTYPE_PRIVILEGED_NOTRAP))
2082 {
2083gen_illegal_instr:
2084 rc = patmPatchGenIllegalInstr(pVM, pPatch);
2085 if (RT_SUCCESS(rc))
2086 rc = VINF_SUCCESS; /* exit point by definition */
2087 }
2088 else
2089 {
2090duplicate_instr:
2091 Log(("patmPatchGenDuplicate\n"));
2092 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2093 if (RT_SUCCESS(rc))
2094 rc = VWRN_CONTINUE_RECOMPILE;
2095 }
2096 break;
2097 }
2098
2099end:
2100
2101 if ( !fInhibitIRQInstr
2102 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2103 {
2104 int rc2;
2105 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2106
2107 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2108 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
2109 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
2110 {
2111 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
2112
2113 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2114 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
2115 rc = VINF_SUCCESS; /* end of the line */
2116 }
2117 else
2118 {
2119 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
2120 }
2121 if (RT_FAILURE(rc2))
2122 rc = rc2;
2123 }
2124
2125 if (RT_SUCCESS(rc))
2126 {
2127 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
2128 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
2129 && pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32
2130 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
2131 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
2132 )
2133 {
2134 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2135
2136 // The end marker for this kind of patch is any instruction at a location outside our patch jump
2137 Log(("patmRecompileCallback: end found for single instruction patch at %RRv cbInstr %d\n", pNextInstrGC, pCpu->cbInstr));
2138
2139 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2140 AssertRC(rc);
2141 }
2142 }
2143 return rc;
2144}
2145
2146
2147#ifdef LOG_ENABLED
2148
2149/**
2150 * Add a disasm jump record (temporary for prevent duplicate analysis)
2151 *
2152 * @param pVM Pointer to the VM.
2153 * @param pPatch Patch structure ptr
2154 * @param pInstrGC Guest context pointer to privileged instruction
2155 *
2156 */
2157static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2158{
2159 PAVLPVNODECORE pRec;
2160
2161 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2162 Assert(pRec);
2163 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2164
2165 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2166 Assert(ret);
2167}
2168
2169/**
2170 * Checks if jump target has been analysed before.
2171 *
2172 * @returns VBox status code.
2173 * @param pPatch Patch struct
2174 * @param pInstrGC Jump target
2175 *
2176 */
2177static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2178{
2179 PAVLPVNODECORE pRec;
2180
2181 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2182 if (pRec)
2183 return true;
2184 return false;
2185}
2186
2187/**
2188 * For proper disassembly of the final patch block
2189 *
2190 * @returns VBox status code.
2191 * @param pVM Pointer to the VM.
2192 * @param pCpu CPU disassembly state
2193 * @param pInstrGC Guest context pointer to privileged instruction
2194 * @param pCurInstrGC Guest context pointer to the current instruction
2195 * @param pCacheRec Cache record ptr
2196 *
2197 */
2198int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2199{
2200 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2201 NOREF(pInstrGC);
2202
2203 if (pCpu->pCurInstr->uOpcode == OP_INT3)
2204 {
2205 /* Could be an int3 inserted in a call patch. Check to be sure */
2206 DISCPUSTATE cpu;
2207 RTRCPTR pOrgJumpGC;
2208
2209 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2210
2211 { /* Force pOrgJumpHC out of scope after using it */
2212 uint8_t *pOrgJumpHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2213
2214 bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL);
2215 if (!disret || cpu.pCurInstr->uOpcode != OP_CALL || cpu.Param1.cb != 4 /* only near calls */)
2216 return VINF_SUCCESS;
2217 }
2218 return VWRN_CONTINUE_ANALYSIS;
2219 }
2220
2221 if ( pCpu->pCurInstr->uOpcode == OP_ILLUD2
2222 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2223 {
2224 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2225 return VWRN_CONTINUE_ANALYSIS;
2226 }
2227
2228 if ( (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2229 || pCpu->pCurInstr->uOpcode == OP_INT
2230 || pCpu->pCurInstr->uOpcode == OP_IRET
2231 || pCpu->pCurInstr->uOpcode == OP_RETN
2232 || pCpu->pCurInstr->uOpcode == OP_RETF
2233 )
2234 {
2235 return VINF_SUCCESS;
2236 }
2237
2238 if (pCpu->pCurInstr->uOpcode == OP_ILLUD2)
2239 return VINF_SUCCESS;
2240
2241 return VWRN_CONTINUE_ANALYSIS;
2242}
2243
2244
2245/**
2246 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2247 *
2248 * @returns VBox status code.
2249 * @param pVM Pointer to the VM.
2250 * @param pInstrGC Guest context pointer to the initial privileged instruction
2251 * @param pCurInstrGC Guest context pointer to the current instruction
2252 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2253 * @param pCacheRec Cache record ptr
2254 *
2255 */
2256int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2257{
2258 DISCPUSTATE cpu;
2259 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2260 int rc = VWRN_CONTINUE_ANALYSIS;
2261 uint32_t cbInstr, delta;
2262 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2263 bool disret;
2264 char szOutput[256];
2265
2266 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2267
2268 /* We need this to determine branch targets (and for disassembling). */
2269 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2270
2271 while (rc == VWRN_CONTINUE_ANALYSIS)
2272 {
2273 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2274 if (pCurInstrHC == NULL)
2275 {
2276 rc = VERR_PATCHING_REFUSED;
2277 goto end;
2278 }
2279
2280 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_RAWCODE,
2281 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2282 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2283 {
2284 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2285
2286 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2287 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2288 else
2289 Log(("DIS %s", szOutput));
2290
2291 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2292 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2293 {
2294 rc = VINF_SUCCESS;
2295 goto end;
2296 }
2297 }
2298 else
2299 Log(("DIS: %s", szOutput));
2300
2301 if (disret == false)
2302 {
2303 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2304 rc = VINF_SUCCESS;
2305 goto end;
2306 }
2307
2308 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2309 if (rc != VWRN_CONTINUE_ANALYSIS) {
2310 break; //done!
2311 }
2312
2313 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2314 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2315 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2316 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2317 )
2318 {
2319 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2320 RTRCPTR pOrgTargetGC;
2321
2322 if (pTargetGC == 0)
2323 {
2324 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2325 rc = VERR_PATCHING_REFUSED;
2326 break;
2327 }
2328
2329 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2330 {
2331 //jump back to guest code
2332 rc = VINF_SUCCESS;
2333 goto end;
2334 }
2335 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2336
2337 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2338 {
2339 rc = VINF_SUCCESS;
2340 goto end;
2341 }
2342
2343 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2344 {
2345 /* New jump, let's check it. */
2346 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2347
2348 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2349 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2350 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2351
2352 if (rc != VINF_SUCCESS) {
2353 break; //done!
2354 }
2355 }
2356 if (cpu.pCurInstr->uOpcode == OP_JMP)
2357 {
2358 /* Unconditional jump; return to caller. */
2359 rc = VINF_SUCCESS;
2360 goto end;
2361 }
2362
2363 rc = VWRN_CONTINUE_ANALYSIS;
2364 }
2365 pCurInstrGC += cbInstr;
2366 }
2367end:
2368 return rc;
2369}
2370
2371/**
2372 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2373 *
2374 * @returns VBox status code.
2375 * @param pVM Pointer to the VM.
2376 * @param pInstrGC Guest context pointer to the initial privileged instruction
2377 * @param pCurInstrGC Guest context pointer to the current instruction
2378 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2379 * @param pCacheRec Cache record ptr
2380 *
2381 */
2382int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2383{
2384 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2385
2386 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2387 /* Free all disasm jump records. */
2388 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2389 return rc;
2390}
2391
2392#endif /* LOG_ENABLED */
2393
2394/**
2395 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2396 * If so, this patch is permanently disabled.
2397 *
2398 * @param pVM Pointer to the VM.
2399 * @param pInstrGC Guest context pointer to instruction
2400 * @param pConflictGC Guest context pointer to check
2401 *
2402 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2403 *
2404 */
2405VMMR3_INT_DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2406{
2407 AssertReturn(!HMIsEnabled(pVM), VERR_PATCH_NO_CONFLICT);
2408 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2409 if (pTargetPatch)
2410 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2411 return VERR_PATCH_NO_CONFLICT;
2412}
2413
2414/**
2415 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2416 *
2417 * @returns VBox status code.
2418 * @param pVM Pointer to the VM.
2419 * @param pInstrGC Guest context pointer to privileged instruction
2420 * @param pCurInstrGC Guest context pointer to the current instruction
2421 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2422 * @param pCacheRec Cache record ptr
2423 *
2424 */
2425static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2426{
2427 DISCPUSTATE cpu;
2428 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2429 int rc = VWRN_CONTINUE_ANALYSIS;
2430 uint32_t cbInstr;
2431 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2432 bool disret;
2433#ifdef LOG_ENABLED
2434 char szOutput[256];
2435#endif
2436
2437 while (rc == VWRN_CONTINUE_RECOMPILE)
2438 {
2439 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2440 if (pCurInstrHC == NULL)
2441 {
2442 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2443 goto end;
2444 }
2445#ifdef LOG_ENABLED
2446 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE,
2447 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2448 Log(("Recompile: %s", szOutput));
2449#else
2450 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
2451#endif
2452 if (disret == false)
2453 {
2454 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2455
2456 /* Add lookup record for patch to guest address translation */
2457 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2458 patmPatchGenIllegalInstr(pVM, pPatch);
2459 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2460 goto end;
2461 }
2462
2463 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2464 if (rc != VWRN_CONTINUE_RECOMPILE)
2465 {
2466 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2467 if ( rc == VINF_SUCCESS
2468 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2469 {
2470 DISCPUSTATE cpunext;
2471 uint32_t opsizenext;
2472 uint8_t *pNextInstrHC;
2473 RTRCPTR pNextInstrGC = pCurInstrGC + cbInstr;
2474
2475 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2476
2477 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2478 * Recompile the next instruction as well
2479 */
2480 pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2481 if (pNextInstrHC == NULL)
2482 {
2483 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2484 goto end;
2485 }
2486 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpunext, &opsizenext);
2487 if (disret == false)
2488 {
2489 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2490 goto end;
2491 }
2492 switch(cpunext.pCurInstr->uOpcode)
2493 {
2494 case OP_IRET: /* inhibit cleared in generated code */
2495 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2496 case OP_HLT:
2497 break; /* recompile these */
2498
2499 default:
2500 if (cpunext.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2501 {
2502 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2503
2504 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2505 AssertRC(rc);
2506 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2507 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2508 }
2509 break;
2510 }
2511
2512 /* Note: after a cli we must continue to a proper exit point */
2513 if (cpunext.pCurInstr->uOpcode != OP_CLI)
2514 {
2515 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2516 if (RT_SUCCESS(rc))
2517 {
2518 rc = VINF_SUCCESS;
2519 goto end;
2520 }
2521 break;
2522 }
2523 else
2524 rc = VWRN_CONTINUE_RECOMPILE;
2525 }
2526 else
2527 break; /* done! */
2528 }
2529
2530 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2531
2532
2533 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2534 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2535 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2536 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2537 )
2538 {
2539 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2540 if (addr == 0)
2541 {
2542 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2543 rc = VERR_PATCHING_REFUSED;
2544 break;
2545 }
2546
2547 Log(("Jump encountered target %RRv\n", addr));
2548
2549 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2550 if (!(cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW))
2551 {
2552 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2553 /* First we need to finish this linear code stream until the next exit point. */
2554 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+cbInstr, pfnPATMR3Recompile, pCacheRec);
2555 if (RT_FAILURE(rc))
2556 {
2557 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2558 break; //fatal error
2559 }
2560 }
2561
2562 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2563 {
2564 /* New code; let's recompile it. */
2565 Log(("patmRecompileCodeStream continue with jump\n"));
2566
2567 /*
2568 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2569 * this patch so we can continue our analysis
2570 *
2571 * We rely on CSAM to detect and resolve conflicts
2572 */
2573 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, addr);
2574 if(pTargetPatch)
2575 {
2576 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2577 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2578 }
2579
2580 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2581 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2582 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2583
2584 if(pTargetPatch)
2585 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2586
2587 if (RT_FAILURE(rc))
2588 {
2589 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2590 break; //done!
2591 }
2592 }
2593 /* Always return to caller here; we're done! */
2594 rc = VINF_SUCCESS;
2595 goto end;
2596 }
2597 else
2598 if (cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW)
2599 {
2600 rc = VINF_SUCCESS;
2601 goto end;
2602 }
2603 pCurInstrGC += cbInstr;
2604 }
2605end:
2606 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2607 return rc;
2608}
2609
2610
2611/**
2612 * Generate the jump from guest to patch code
2613 *
2614 * @returns VBox status code.
2615 * @param pVM Pointer to the VM.
2616 * @param pPatch Patch record
2617 * @param pCacheRec Guest translation lookup cache record
2618 */
2619static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2620{
2621 uint8_t temp[8];
2622 uint8_t *pPB;
2623 int rc;
2624
2625 Assert(pPatch->cbPatchJump <= sizeof(temp));
2626 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2627
2628 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2629 Assert(pPB);
2630
2631#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2632 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2633 {
2634 Assert(pPatch->pPatchJumpDestGC);
2635
2636 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2637 {
2638 // jmp [PatchCode]
2639 if (fAddFixup)
2640 {
2641 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
2642 pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2643 {
2644 Log(("Relocation failed for the jump in the guest code!!\n"));
2645 return VERR_PATCHING_REFUSED;
2646 }
2647 }
2648
2649 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2650 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2651 }
2652 else
2653 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2654 {
2655 // jmp [PatchCode]
2656 if (fAddFixup)
2657 {
2658 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
2659 pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2660 {
2661 Log(("Relocation failed for the jump in the guest code!!\n"));
2662 return VERR_PATCHING_REFUSED;
2663 }
2664 }
2665
2666 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2667 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2668 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2669 }
2670 else
2671 {
2672 Assert(0);
2673 return VERR_PATCHING_REFUSED;
2674 }
2675 }
2676 else
2677#endif
2678 {
2679 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2680
2681 // jmp [PatchCode]
2682 if (fAddFixup)
2683 {
2684 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32,
2685 PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2686 {
2687 Log(("Relocation failed for the jump in the guest code!!\n"));
2688 return VERR_PATCHING_REFUSED;
2689 }
2690 }
2691 temp[0] = 0xE9; //jmp
2692 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2693 }
2694 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2695 AssertRC(rc);
2696
2697 if (rc == VINF_SUCCESS)
2698 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2699
2700 return rc;
2701}
2702
2703/**
2704 * Remove the jump from guest to patch code
2705 *
2706 * @returns VBox status code.
2707 * @param pVM Pointer to the VM.
2708 * @param pPatch Patch record
2709 */
2710static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2711{
2712#ifdef DEBUG
2713 DISCPUSTATE cpu;
2714 char szOutput[256];
2715 uint32_t cbInstr, i = 0;
2716 bool disret;
2717
2718 while (i < pPatch->cbPrivInstr)
2719 {
2720 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2721 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2722 if (disret == false)
2723 break;
2724
2725 Log(("Org patch jump: %s", szOutput));
2726 Assert(cbInstr);
2727 i += cbInstr;
2728 }
2729#endif
2730
2731 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2732 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2733#ifdef DEBUG
2734 if (rc == VINF_SUCCESS)
2735 {
2736 i = 0;
2737 while (i < pPatch->cbPrivInstr)
2738 {
2739 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2740 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2741 if (disret == false)
2742 break;
2743
2744 Log(("Org instr: %s", szOutput));
2745 Assert(cbInstr);
2746 i += cbInstr;
2747 }
2748 }
2749#endif
2750 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2751 return rc;
2752}
2753
2754/**
2755 * Generate the call from guest to patch code
2756 *
2757 * @returns VBox status code.
2758 * @param pVM Pointer to the VM.
2759 * @param pPatch Patch record
2760 * @param pInstrHC HC address where to insert the jump
2761 * @param pCacheRec Guest translation cache record
2762 */
2763static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2764{
2765 uint8_t temp[8];
2766 uint8_t *pPB;
2767 int rc;
2768
2769 Assert(pPatch->cbPatchJump <= sizeof(temp));
2770
2771 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2772 Assert(pPB);
2773
2774 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2775
2776 // jmp [PatchCode]
2777 if (fAddFixup)
2778 {
2779 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH,
2780 pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2781 {
2782 Log(("Relocation failed for the jump in the guest code!!\n"));
2783 return VERR_PATCHING_REFUSED;
2784 }
2785 }
2786
2787 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2788 temp[0] = pPatch->aPrivInstr[0];
2789 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2790
2791 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2792 AssertRC(rc);
2793
2794 return rc;
2795}
2796
2797
2798/**
2799 * Patch cli/sti pushf/popf instruction block at specified location
2800 *
2801 * @returns VBox status code.
2802 * @param pVM Pointer to the VM.
2803 * @param pInstrGC Guest context point to privileged instruction
2804 * @param pInstrHC Host context point to privileged instruction
2805 * @param uOpcode Instruction opcode
2806 * @param uOpSize Size of starting instruction
2807 * @param pPatchRec Patch record
2808 *
2809 * @note returns failure if patching is not allowed or possible
2810 *
2811 */
2812static int patmR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2813 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2814{
2815 PPATCHINFO pPatch = &pPatchRec->patch;
2816 int rc = VERR_PATCHING_REFUSED;
2817 uint32_t orgOffsetPatchMem = ~0;
2818 RTRCPTR pInstrStart;
2819 bool fInserted;
2820 NOREF(pInstrHC); NOREF(uOpSize);
2821
2822 /* Save original offset (in case of failures later on) */
2823 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2824 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2825
2826 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2827 switch (uOpcode)
2828 {
2829 case OP_MOV:
2830 break;
2831
2832 case OP_CLI:
2833 case OP_PUSHF:
2834 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2835 /* Note: special precautions are taken when disabling and enabling such patches. */
2836 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2837 break;
2838
2839 default:
2840 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2841 {
2842 AssertMsg(0, ("patmR3PatchBlock: Invalid opcode %x\n", uOpcode));
2843 return VERR_INVALID_PARAMETER;
2844 }
2845 }
2846
2847 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2848 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2849
2850 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2851 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2852 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2853 )
2854 {
2855 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2856 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2857 rc = VERR_PATCHING_REFUSED;
2858 goto failure;
2859 }
2860
2861 pPatch->nrPatch2GuestRecs = 0;
2862 pInstrStart = pInstrGC;
2863
2864#ifdef PATM_ENABLE_CALL
2865 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2866#endif
2867
2868 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2869 pPatch->uCurPatchOffset = 0;
2870
2871 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2872 {
2873 Assert(pPatch->flags & PATMFL_INTHANDLER);
2874
2875 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2876 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2877 if (RT_FAILURE(rc))
2878 goto failure;
2879 }
2880
2881 /***************************************************************************************************************************/
2882 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2883 /***************************************************************************************************************************/
2884#ifdef VBOX_WITH_STATISTICS
2885 if (!(pPatch->flags & PATMFL_SYSENTER))
2886 {
2887 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2888 if (RT_FAILURE(rc))
2889 goto failure;
2890 }
2891#endif
2892
2893 PATMP2GLOOKUPREC cacheRec;
2894 RT_ZERO(cacheRec);
2895 cacheRec.pPatch = pPatch;
2896
2897 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2898 /* Free leftover lock if any. */
2899 if (cacheRec.Lock.pvMap)
2900 {
2901 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2902 cacheRec.Lock.pvMap = NULL;
2903 }
2904 if (rc != VINF_SUCCESS)
2905 {
2906 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2907 goto failure;
2908 }
2909
2910 /* Calculated during analysis. */
2911 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2912 {
2913 /* Most likely cause: we encountered an illegal instruction very early on. */
2914 /** @todo could turn it into an int3 callable patch. */
2915 Log(("patmR3PatchBlock: patch block too small -> refuse\n"));
2916 rc = VERR_PATCHING_REFUSED;
2917 goto failure;
2918 }
2919
2920 /* size of patch block */
2921 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2922
2923
2924 /* Update free pointer in patch memory. */
2925 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2926 /* Round to next 8 byte boundary. */
2927 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2928
2929 /*
2930 * Insert into patch to guest lookup tree
2931 */
2932 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2933 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2934 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2935 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2936 if (!fInserted)
2937 {
2938 rc = VERR_PATCHING_REFUSED;
2939 goto failure;
2940 }
2941
2942 /* Note that patmr3SetBranchTargets can install additional patches!! */
2943 rc = patmr3SetBranchTargets(pVM, pPatch);
2944 if (rc != VINF_SUCCESS)
2945 {
2946 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2947 goto failure;
2948 }
2949
2950#ifdef LOG_ENABLED
2951 Log(("Patch code ----------------------------------------------------------\n"));
2952 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2953 /* Free leftover lock if any. */
2954 if (cacheRec.Lock.pvMap)
2955 {
2956 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2957 cacheRec.Lock.pvMap = NULL;
2958 }
2959 Log(("Patch code ends -----------------------------------------------------\n"));
2960#endif
2961
2962 /* make a copy of the guest code bytes that will be overwritten */
2963 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2964
2965 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2966 AssertRC(rc);
2967
2968 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2969 {
2970 /*uint8_t bASMInt3 = 0xCC; - unused */
2971
2972 Log(("patmR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2973 /* Replace first opcode byte with 'int 3'. */
2974 rc = patmActivateInt3Patch(pVM, pPatch);
2975 if (RT_FAILURE(rc))
2976 goto failure;
2977
2978 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2979 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2980
2981 pPatch->flags &= ~PATMFL_INSTR_HINT;
2982 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2983 }
2984 else
2985 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2986 {
2987 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2988 /* now insert a jump in the guest code */
2989 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2990 AssertRC(rc);
2991 if (RT_FAILURE(rc))
2992 goto failure;
2993
2994 }
2995
2996 patmR3DbgAddPatch(pVM, pPatchRec);
2997
2998 PATM_LOG_RAW_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
2999
3000 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3001 pPatch->pTempInfo->nrIllegalInstr = 0;
3002
3003 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3004
3005 pPatch->uState = PATCH_ENABLED;
3006 return VINF_SUCCESS;
3007
3008failure:
3009 if (pPatchRec->CoreOffset.Key)
3010 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3011
3012 patmEmptyTree(pVM, &pPatch->FixupTree);
3013 pPatch->nrFixups = 0;
3014
3015 patmEmptyTree(pVM, &pPatch->JumpTree);
3016 pPatch->nrJumpRecs = 0;
3017
3018 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3019 pPatch->pTempInfo->nrIllegalInstr = 0;
3020
3021 /* Turn this cli patch into a dummy. */
3022 pPatch->uState = PATCH_REFUSED;
3023 pPatch->pPatchBlockOffset = 0;
3024
3025 // Give back the patch memory we no longer need
3026 Assert(orgOffsetPatchMem != (uint32_t)~0);
3027 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3028
3029 return rc;
3030}
3031
3032/**
3033 * Patch IDT handler
3034 *
3035 * @returns VBox status code.
3036 * @param pVM Pointer to the VM.
3037 * @param pInstrGC Guest context point to privileged instruction
3038 * @param uOpSize Size of starting instruction
3039 * @param pPatchRec Patch record
3040 * @param pCacheRec Cache record ptr
3041 *
3042 * @note returns failure if patching is not allowed or possible
3043 *
3044 */
3045static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3046{
3047 PPATCHINFO pPatch = &pPatchRec->patch;
3048 bool disret;
3049 DISCPUSTATE cpuPush, cpuJmp;
3050 uint32_t cbInstr;
3051 RTRCPTR pCurInstrGC = pInstrGC;
3052 uint8_t *pCurInstrHC, *pInstrHC;
3053 uint32_t orgOffsetPatchMem = ~0;
3054
3055 pInstrHC = pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
3056 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
3057
3058 /*
3059 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
3060 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
3061 * condition here and only patch the common entypoint once.
3062 */
3063 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &cbInstr);
3064 Assert(disret);
3065 if (disret && cpuPush.pCurInstr->uOpcode == OP_PUSH)
3066 {
3067 RTRCPTR pJmpInstrGC;
3068 int rc;
3069 pCurInstrGC += cbInstr;
3070
3071 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuJmp, &cbInstr);
3072 if ( disret
3073 && cpuJmp.pCurInstr->uOpcode == OP_JMP
3074 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
3075 )
3076 {
3077 bool fInserted;
3078 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3079 if (pJmpPatch == 0)
3080 {
3081 /* Patch it first! */
3082 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
3083 if (rc != VINF_SUCCESS)
3084 goto failure;
3085 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3086 Assert(pJmpPatch);
3087 }
3088 if (pJmpPatch->patch.uState != PATCH_ENABLED)
3089 goto failure;
3090
3091 /* save original offset (in case of failures later on) */
3092 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3093
3094 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3095 pPatch->uCurPatchOffset = 0;
3096 pPatch->nrPatch2GuestRecs = 0;
3097
3098#ifdef VBOX_WITH_STATISTICS
3099 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3100 if (RT_FAILURE(rc))
3101 goto failure;
3102#endif
3103
3104 /* Install fake cli patch (to clear the virtual IF) */
3105 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
3106 if (RT_FAILURE(rc))
3107 goto failure;
3108
3109 /* Add lookup record for patch to guest address translation (for the push) */
3110 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
3111
3112 /* Duplicate push. */
3113 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
3114 if (RT_FAILURE(rc))
3115 goto failure;
3116
3117 /* Generate jump to common entrypoint. */
3118 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
3119 if (RT_FAILURE(rc))
3120 goto failure;
3121
3122 /* size of patch block */
3123 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3124
3125 /* Update free pointer in patch memory. */
3126 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3127 /* Round to next 8 byte boundary */
3128 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3129
3130 /* There's no jump from guest to patch code. */
3131 pPatch->cbPatchJump = 0;
3132
3133
3134#ifdef LOG_ENABLED
3135 Log(("Patch code ----------------------------------------------------------\n"));
3136 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3137 Log(("Patch code ends -----------------------------------------------------\n"));
3138#endif
3139 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3140
3141 /*
3142 * Insert into patch to guest lookup tree
3143 */
3144 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3145 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3146 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3147 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3148 patmR3DbgAddPatch(pVM, pPatchRec);
3149
3150 pPatch->uState = PATCH_ENABLED;
3151
3152 return VINF_SUCCESS;
3153 }
3154 }
3155failure:
3156 /* Give back the patch memory we no longer need */
3157 if (orgOffsetPatchMem != (uint32_t)~0)
3158 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3159
3160 return patmR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3161}
3162
3163/**
3164 * Install a trampoline to call a guest trap handler directly
3165 *
3166 * @returns VBox status code.
3167 * @param pVM Pointer to the VM.
3168 * @param pInstrGC Guest context point to privileged instruction
3169 * @param pPatchRec Patch record
3170 * @param pCacheRec Cache record ptr
3171 *
3172 */
3173static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3174{
3175 PPATCHINFO pPatch = &pPatchRec->patch;
3176 int rc = VERR_PATCHING_REFUSED;
3177 uint32_t orgOffsetPatchMem = ~0;
3178 bool fInserted;
3179
3180 // save original offset (in case of failures later on)
3181 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3182
3183 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3184 pPatch->uCurPatchOffset = 0;
3185 pPatch->nrPatch2GuestRecs = 0;
3186
3187#ifdef VBOX_WITH_STATISTICS
3188 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3189 if (RT_FAILURE(rc))
3190 goto failure;
3191#endif
3192
3193 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3194 if (RT_FAILURE(rc))
3195 goto failure;
3196
3197 /* size of patch block */
3198 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3199
3200 /* Update free pointer in patch memory. */
3201 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3202 /* Round to next 8 byte boundary */
3203 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3204
3205 /* There's no jump from guest to patch code. */
3206 pPatch->cbPatchJump = 0;
3207
3208#ifdef LOG_ENABLED
3209 Log(("Patch code ----------------------------------------------------------\n"));
3210 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3211 Log(("Patch code ends -----------------------------------------------------\n"));
3212#endif
3213 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "TRAP handler");
3214 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3215
3216 /*
3217 * Insert into patch to guest lookup tree
3218 */
3219 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3220 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3221 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3222 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3223 patmR3DbgAddPatch(pVM, pPatchRec);
3224
3225 pPatch->uState = PATCH_ENABLED;
3226 return VINF_SUCCESS;
3227
3228failure:
3229 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3230
3231 /* Turn this cli patch into a dummy. */
3232 pPatch->uState = PATCH_REFUSED;
3233 pPatch->pPatchBlockOffset = 0;
3234
3235 /* Give back the patch memory we no longer need */
3236 Assert(orgOffsetPatchMem != (uint32_t)~0);
3237 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3238
3239 return rc;
3240}
3241
3242
3243#ifdef LOG_ENABLED
3244/**
3245 * Check if the instruction is patched as a common idt handler
3246 *
3247 * @returns true or false
3248 * @param pVM Pointer to the VM.
3249 * @param pInstrGC Guest context point to the instruction
3250 *
3251 */
3252static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3253{
3254 PPATMPATCHREC pRec;
3255
3256 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3257 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3258 return true;
3259 return false;
3260}
3261#endif //DEBUG
3262
3263
3264/**
3265 * Duplicates a complete function
3266 *
3267 * @returns VBox status code.
3268 * @param pVM Pointer to the VM.
3269 * @param pInstrGC Guest context point to privileged instruction
3270 * @param pPatchRec Patch record
3271 * @param pCacheRec Cache record ptr
3272 *
3273 */
3274static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3275{
3276 PPATCHINFO pPatch = &pPatchRec->patch;
3277 int rc = VERR_PATCHING_REFUSED;
3278 uint32_t orgOffsetPatchMem = ~0;
3279 bool fInserted;
3280
3281 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3282 /* Save original offset (in case of failures later on). */
3283 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3284
3285 /* We will not go on indefinitely with call instruction handling. */
3286 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3287 {
3288 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3289 return VERR_PATCHING_REFUSED;
3290 }
3291
3292 pVM->patm.s.ulCallDepth++;
3293
3294#ifdef PATM_ENABLE_CALL
3295 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3296#endif
3297
3298 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3299
3300 pPatch->nrPatch2GuestRecs = 0;
3301 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3302 pPatch->uCurPatchOffset = 0;
3303
3304 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3305 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3306 if (RT_FAILURE(rc))
3307 goto failure;
3308
3309#ifdef VBOX_WITH_STATISTICS
3310 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3311 if (RT_FAILURE(rc))
3312 goto failure;
3313#endif
3314
3315 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3316 if (rc != VINF_SUCCESS)
3317 {
3318 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3319 goto failure;
3320 }
3321
3322 //size of patch block
3323 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3324
3325 //update free pointer in patch memory
3326 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3327 /* Round to next 8 byte boundary. */
3328 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3329
3330 pPatch->uState = PATCH_ENABLED;
3331
3332 /*
3333 * Insert into patch to guest lookup tree
3334 */
3335 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3336 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3337 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3338 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3339 if (!fInserted)
3340 {
3341 rc = VERR_PATCHING_REFUSED;
3342 goto failure;
3343 }
3344
3345 /* Note that patmr3SetBranchTargets can install additional patches!! */
3346 rc = patmr3SetBranchTargets(pVM, pPatch);
3347 if (rc != VINF_SUCCESS)
3348 {
3349 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3350 goto failure;
3351 }
3352
3353 patmR3DbgAddPatch(pVM, pPatchRec);
3354
3355#ifdef LOG_ENABLED
3356 Log(("Patch code ----------------------------------------------------------\n"));
3357 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3358 Log(("Patch code ends -----------------------------------------------------\n"));
3359#endif
3360
3361 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3362
3363 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3364 pPatch->pTempInfo->nrIllegalInstr = 0;
3365
3366 pVM->patm.s.ulCallDepth--;
3367 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3368 return VINF_SUCCESS;
3369
3370failure:
3371 if (pPatchRec->CoreOffset.Key)
3372 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3373
3374 patmEmptyTree(pVM, &pPatch->FixupTree);
3375 pPatch->nrFixups = 0;
3376
3377 patmEmptyTree(pVM, &pPatch->JumpTree);
3378 pPatch->nrJumpRecs = 0;
3379
3380 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3381 pPatch->pTempInfo->nrIllegalInstr = 0;
3382
3383 /* Turn this cli patch into a dummy. */
3384 pPatch->uState = PATCH_REFUSED;
3385 pPatch->pPatchBlockOffset = 0;
3386
3387 // Give back the patch memory we no longer need
3388 Assert(orgOffsetPatchMem != (uint32_t)~0);
3389 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3390
3391 pVM->patm.s.ulCallDepth--;
3392 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3393 return rc;
3394}
3395
3396/**
3397 * Creates trampoline code to jump inside an existing patch
3398 *
3399 * @returns VBox status code.
3400 * @param pVM Pointer to the VM.
3401 * @param pInstrGC Guest context point to privileged instruction
3402 * @param pPatchRec Patch record
3403 *
3404 */
3405static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3406{
3407 PPATCHINFO pPatch = &pPatchRec->patch;
3408 RTRCPTR pPage, pPatchTargetGC = 0;
3409 uint32_t orgOffsetPatchMem = ~0;
3410 int rc = VERR_PATCHING_REFUSED;
3411 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3412 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3413 bool fInserted = false;
3414
3415 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3416 /* Save original offset (in case of failures later on). */
3417 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3418
3419 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3420 /** @todo we already checked this before */
3421 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3422
3423 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3424 if (pPatchPage)
3425 {
3426 uint32_t i;
3427
3428 for (i=0;i<pPatchPage->cCount;i++)
3429 {
3430 if (pPatchPage->papPatch[i])
3431 {
3432 pPatchToJmp = pPatchPage->papPatch[i];
3433
3434 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3435 && pPatchToJmp->uState == PATCH_ENABLED)
3436 {
3437 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3438 if (pPatchTargetGC)
3439 {
3440 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3441 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3442 Assert(pPatchToGuestRec);
3443
3444 pPatchToGuestRec->fJumpTarget = true;
3445 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3446 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3447 break;
3448 }
3449 }
3450 }
3451 }
3452 }
3453 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3454
3455 /*
3456 * Only record the trampoline patch if this is the first patch to the target
3457 * or we recorded other patches already.
3458 * The goal is to refuse refreshing function duplicates if the guest
3459 * modifies code after a saved state was loaded because it is not possible
3460 * to save the relation between trampoline and target without changing the
3461 * saved satte version.
3462 */
3463 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3464 || pPatchToJmp->pTrampolinePatchesHead)
3465 {
3466 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3467 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3468 if (!pTrampRec)
3469 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3470
3471 pTrampRec->pPatchTrampoline = pPatchRec;
3472 }
3473
3474 pPatch->nrPatch2GuestRecs = 0;
3475 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3476 pPatch->uCurPatchOffset = 0;
3477
3478 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3479 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3480 if (RT_FAILURE(rc))
3481 goto failure;
3482
3483#ifdef VBOX_WITH_STATISTICS
3484 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3485 if (RT_FAILURE(rc))
3486 goto failure;
3487#endif
3488
3489 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3490 if (RT_FAILURE(rc))
3491 goto failure;
3492
3493 /*
3494 * Insert into patch to guest lookup tree
3495 */
3496 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3497 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3498 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3499 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3500 if (!fInserted)
3501 {
3502 rc = VERR_PATCHING_REFUSED;
3503 goto failure;
3504 }
3505 patmR3DbgAddPatch(pVM, pPatchRec);
3506
3507 /* size of patch block */
3508 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3509
3510 /* Update free pointer in patch memory. */
3511 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3512 /* Round to next 8 byte boundary */
3513 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3514
3515 /* There's no jump from guest to patch code. */
3516 pPatch->cbPatchJump = 0;
3517
3518 /* Enable the patch. */
3519 pPatch->uState = PATCH_ENABLED;
3520 /* We allow this patch to be called as a function. */
3521 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3522
3523 if (pTrampRec)
3524 {
3525 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3526 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3527 }
3528 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3529 return VINF_SUCCESS;
3530
3531failure:
3532 if (pPatchRec->CoreOffset.Key)
3533 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3534
3535 patmEmptyTree(pVM, &pPatch->FixupTree);
3536 pPatch->nrFixups = 0;
3537
3538 patmEmptyTree(pVM, &pPatch->JumpTree);
3539 pPatch->nrJumpRecs = 0;
3540
3541 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3542 pPatch->pTempInfo->nrIllegalInstr = 0;
3543
3544 /* Turn this cli patch into a dummy. */
3545 pPatch->uState = PATCH_REFUSED;
3546 pPatch->pPatchBlockOffset = 0;
3547
3548 // Give back the patch memory we no longer need
3549 Assert(orgOffsetPatchMem != (uint32_t)~0);
3550 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3551
3552 if (pTrampRec)
3553 MMR3HeapFree(pTrampRec);
3554
3555 return rc;
3556}
3557
3558
3559/**
3560 * Patch branch target function for call/jump at specified location.
3561 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3562 *
3563 * @returns VBox status code.
3564 * @param pVM Pointer to the VM.
3565 * @param pCtx Pointer to the guest CPU context.
3566 *
3567 */
3568VMMR3_INT_DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3569{
3570 RTRCPTR pBranchTarget, pPage;
3571 int rc;
3572 RTRCPTR pPatchTargetGC = 0;
3573 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
3574
3575 pBranchTarget = pCtx->edx;
3576 pBranchTarget = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3577
3578 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3579 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3580
3581 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3582 if (pPatchPage)
3583 {
3584 uint32_t i;
3585
3586 for (i=0;i<pPatchPage->cCount;i++)
3587 {
3588 if (pPatchPage->papPatch[i])
3589 {
3590 PPATCHINFO pPatch = pPatchPage->papPatch[i];
3591
3592 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3593 && pPatch->uState == PATCH_ENABLED)
3594 {
3595 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3596 if (pPatchTargetGC)
3597 {
3598 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3599 break;
3600 }
3601 }
3602 }
3603 }
3604 }
3605
3606 if (pPatchTargetGC)
3607 {
3608 /* Create a trampoline that also sets PATM_ASMFIX_INTERRUPTFLAG. */
3609 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3610 }
3611 else
3612 {
3613 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3614 }
3615
3616 if (rc == VINF_SUCCESS)
3617 {
3618 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3619 Assert(pPatchTargetGC);
3620 }
3621
3622 if (pPatchTargetGC)
3623 {
3624 pCtx->eax = pPatchTargetGC;
3625 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3626 }
3627 else
3628 {
3629 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3630 pCtx->eax = 0;
3631 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3632 }
3633 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3634 rc = patmAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3635 AssertRC(rc);
3636
3637 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3638 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3639 return VINF_SUCCESS;
3640}
3641
3642/**
3643 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3644 *
3645 * @returns VBox status code.
3646 * @param pVM Pointer to the VM.
3647 * @param pCpu Disassembly CPU structure ptr
3648 * @param pInstrGC Guest context point to privileged instruction
3649 * @param pCacheRec Cache record ptr
3650 *
3651 */
3652static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3653{
3654 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3655 int rc = VERR_PATCHING_REFUSED;
3656 DISCPUSTATE cpu;
3657 RTRCPTR pTargetGC;
3658 PPATMPATCHREC pPatchFunction;
3659 uint32_t cbInstr;
3660 bool disret;
3661
3662 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3663 Assert((pCpu->pCurInstr->uOpcode == OP_CALL || pCpu->pCurInstr->uOpcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32);
3664
3665 if ((pCpu->pCurInstr->uOpcode != OP_CALL && pCpu->pCurInstr->uOpcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32)
3666 {
3667 rc = VERR_PATCHING_REFUSED;
3668 goto failure;
3669 }
3670
3671 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3672 if (pTargetGC == 0)
3673 {
3674 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
3675 rc = VERR_PATCHING_REFUSED;
3676 goto failure;
3677 }
3678
3679 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3680 if (pPatchFunction == NULL)
3681 {
3682 for(;;)
3683 {
3684 /* It could be an indirect call (call -> jmp dest).
3685 * Note that it's dangerous to assume the jump will never change...
3686 */
3687 uint8_t *pTmpInstrHC;
3688
3689 pTmpInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3690 Assert(pTmpInstrHC);
3691 if (pTmpInstrHC == 0)
3692 break;
3693
3694 disret = patmR3DisInstr(pVM, pPatch, pTargetGC, pTmpInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
3695 if (disret == false || cpu.pCurInstr->uOpcode != OP_JMP)
3696 break;
3697
3698 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3699 if (pTargetGC == 0)
3700 {
3701 break;
3702 }
3703
3704 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3705 break;
3706 }
3707 if (pPatchFunction == 0)
3708 {
3709 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3710 rc = VERR_PATCHING_REFUSED;
3711 goto failure;
3712 }
3713 }
3714
3715 // make a copy of the guest code bytes that will be overwritten
3716 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3717
3718 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3719 AssertRC(rc);
3720
3721 /* Now replace the original call in the guest code */
3722 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3723 AssertRC(rc);
3724 if (RT_FAILURE(rc))
3725 goto failure;
3726
3727 /* Lowest and highest address for write monitoring. */
3728 pPatch->pInstrGCLowest = pInstrGC;
3729 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3730 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "Call");
3731
3732 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3733
3734 pPatch->uState = PATCH_ENABLED;
3735 return VINF_SUCCESS;
3736
3737failure:
3738 /* Turn this patch into a dummy. */
3739 pPatch->uState = PATCH_REFUSED;
3740
3741 return rc;
3742}
3743
3744/**
3745 * Replace the address in an MMIO instruction with the cached version.
3746 *
3747 * @returns VBox status code.
3748 * @param pVM Pointer to the VM.
3749 * @param pInstrGC Guest context point to privileged instruction
3750 * @param pCpu Disassembly CPU structure ptr
3751 * @param pCacheRec Cache record ptr
3752 *
3753 * @note returns failure if patching is not allowed or possible
3754 *
3755 */
3756static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3757{
3758 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3759 uint8_t *pPB;
3760 int rc = VERR_PATCHING_REFUSED;
3761
3762 Assert(pVM->patm.s.mmio.pCachedData);
3763 if (!pVM->patm.s.mmio.pCachedData)
3764 goto failure;
3765
3766 if (pCpu->Param2.fUse != DISUSE_DISPLACEMENT32)
3767 goto failure;
3768
3769 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3770 if (pPB == 0)
3771 goto failure;
3772
3773 /* Add relocation record for cached data access. */
3774 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC,
3775 pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3776 {
3777 Log(("Relocation failed for cached mmio address!!\n"));
3778 return VERR_PATCHING_REFUSED;
3779 }
3780 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "MMIO patch old instruction:", "");
3781
3782 /* Save original instruction. */
3783 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3784 AssertRC(rc);
3785
3786 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3787
3788 /* Replace address with that of the cached item. */
3789 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR),
3790 &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3791 AssertRC(rc);
3792 if (RT_FAILURE(rc))
3793 {
3794 goto failure;
3795 }
3796
3797 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3798 pVM->patm.s.mmio.pCachedData = 0;
3799 pVM->patm.s.mmio.GCPhys = 0;
3800 pPatch->uState = PATCH_ENABLED;
3801 return VINF_SUCCESS;
3802
3803failure:
3804 /* Turn this patch into a dummy. */
3805 pPatch->uState = PATCH_REFUSED;
3806
3807 return rc;
3808}
3809
3810
3811/**
3812 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3813 *
3814 * @returns VBox status code.
3815 * @param pVM Pointer to the VM.
3816 * @param pInstrGC Guest context point to privileged instruction
3817 * @param pPatch Patch record
3818 *
3819 * @note returns failure if patching is not allowed or possible
3820 *
3821 */
3822static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3823{
3824 DISCPUSTATE cpu;
3825 uint32_t cbInstr;
3826 bool disret;
3827 uint8_t *pInstrHC;
3828
3829 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3830
3831 /* Convert GC to HC address. */
3832 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3833 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3834
3835 /* Disassemble mmio instruction. */
3836 disret = patmR3DisInstrNoStrOpMode(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE,
3837 &cpu, &cbInstr);
3838 if (disret == false)
3839 {
3840 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3841 return VERR_PATCHING_REFUSED;
3842 }
3843
3844 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
3845 if (cbInstr > MAX_INSTR_SIZE)
3846 return VERR_PATCHING_REFUSED;
3847 if (cpu.Param2.fUse != DISUSE_DISPLACEMENT32)
3848 return VERR_PATCHING_REFUSED;
3849
3850 /* Add relocation record for cached data access. */
3851 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3852 {
3853 Log(("Relocation failed for cached mmio address!!\n"));
3854 return VERR_PATCHING_REFUSED;
3855 }
3856 /* Replace address with that of the cached item. */
3857 *(RTRCPTR *)&pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3858
3859 /* Lowest and highest address for write monitoring. */
3860 pPatch->pInstrGCLowest = pInstrGC;
3861 pPatch->pInstrGCHighest = pInstrGC + cpu.cbInstr;
3862
3863 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3864 pVM->patm.s.mmio.pCachedData = 0;
3865 pVM->patm.s.mmio.GCPhys = 0;
3866 return VINF_SUCCESS;
3867}
3868
3869/**
3870 * Activates an int3 patch
3871 *
3872 * @returns VBox status code.
3873 * @param pVM Pointer to the VM.
3874 * @param pPatch Patch record
3875 */
3876static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3877{
3878 uint8_t bASMInt3 = 0xCC;
3879 int rc;
3880
3881 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3882 Assert(pPatch->uState != PATCH_ENABLED);
3883
3884 /* Replace first opcode byte with 'int 3'. */
3885 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
3886 AssertRC(rc);
3887
3888 pPatch->cbPatchJump = sizeof(bASMInt3);
3889
3890 return rc;
3891}
3892
3893/**
3894 * Deactivates an int3 patch
3895 *
3896 * @returns VBox status code.
3897 * @param pVM Pointer to the VM.
3898 * @param pPatch Patch record
3899 */
3900static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3901{
3902 uint8_t ASMInt3 = 0xCC;
3903 int rc;
3904
3905 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3906 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3907
3908 /* Restore first opcode byte. */
3909 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3910 AssertRC(rc);
3911 return rc;
3912}
3913
3914/**
3915 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically
3916 * in the raw-mode context.
3917 *
3918 * @returns VBox status code.
3919 * @param pVM Pointer to the VM.
3920 * @param pInstrGC Guest context point to privileged instruction
3921 * @param pInstrHC Host context point to privileged instruction
3922 * @param pCpu Disassembly CPU structure ptr
3923 * @param pPatch Patch record
3924 *
3925 * @note returns failure if patching is not allowed or possible
3926 *
3927 */
3928int patmR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3929{
3930 uint8_t bASMInt3 = 0xCC;
3931 int rc;
3932
3933 /* Note: Do not use patch memory here! It might called during patch installation too. */
3934 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "patmR3PatchInstrInt3:", "");
3935
3936 /* Save the original instruction. */
3937 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3938 AssertRC(rc);
3939 pPatch->cbPatchJump = sizeof(bASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3940
3941 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3942
3943 /* Replace first opcode byte with 'int 3'. */
3944 rc = patmActivateInt3Patch(pVM, pPatch);
3945 if (RT_FAILURE(rc))
3946 goto failure;
3947
3948 /* Lowest and highest address for write monitoring. */
3949 pPatch->pInstrGCLowest = pInstrGC;
3950 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3951
3952 pPatch->uState = PATCH_ENABLED;
3953 return VINF_SUCCESS;
3954
3955failure:
3956 /* Turn this patch into a dummy. */
3957 return VERR_PATCHING_REFUSED;
3958}
3959
3960#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3961/**
3962 * Patch a jump instruction at specified location
3963 *
3964 * @returns VBox status code.
3965 * @param pVM Pointer to the VM.
3966 * @param pInstrGC Guest context point to privileged instruction
3967 * @param pInstrHC Host context point to privileged instruction
3968 * @param pCpu Disassembly CPU structure ptr
3969 * @param pPatchRec Patch record
3970 *
3971 * @note returns failure if patching is not allowed or possible
3972 *
3973 */
3974int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3975{
3976 PPATCHINFO pPatch = &pPatchRec->patch;
3977 int rc = VERR_PATCHING_REFUSED;
3978
3979 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3980 pPatch->uCurPatchOffset = 0;
3981 pPatch->cbPatchBlockSize = 0;
3982 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3983
3984 /*
3985 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3986 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3987 */
3988 switch (pCpu->pCurInstr->uOpcode)
3989 {
3990 case OP_JO:
3991 case OP_JNO:
3992 case OP_JC:
3993 case OP_JNC:
3994 case OP_JE:
3995 case OP_JNE:
3996 case OP_JBE:
3997 case OP_JNBE:
3998 case OP_JS:
3999 case OP_JNS:
4000 case OP_JP:
4001 case OP_JNP:
4002 case OP_JL:
4003 case OP_JNL:
4004 case OP_JLE:
4005 case OP_JNLE:
4006 case OP_JMP:
4007 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
4008 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL);
4009 if (!(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL))
4010 goto failure;
4011
4012 Assert(pCpu->cbInstr == SIZEOF_NEARJUMP32 || pCpu->cbInstr == SIZEOF_NEAR_COND_JUMP32);
4013 if (pCpu->cbInstr != SIZEOF_NEARJUMP32 && pCpu->cbInstr != SIZEOF_NEAR_COND_JUMP32)
4014 goto failure;
4015
4016 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->cbInstr))
4017 {
4018 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
4019 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
4020 rc = VERR_PATCHING_REFUSED;
4021 goto failure;
4022 }
4023
4024 break;
4025
4026 default:
4027 goto failure;
4028 }
4029
4030 // make a copy of the guest code bytes that will be overwritten
4031 Assert(pCpu->cbInstr <= sizeof(pPatch->aPrivInstr));
4032 Assert(pCpu->cbInstr >= SIZEOF_NEARJUMP32);
4033 pPatch->cbPatchJump = pCpu->cbInstr;
4034
4035 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
4036 AssertRC(rc);
4037
4038 /* Now insert a jump in the guest code. */
4039 /*
4040 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
4041 * references the target instruction in the conflict patch.
4042 */
4043 RTRCPTR pJmpDest = patmR3GuestGCPtrToPatchGCPtrSimple(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue);
4044
4045 AssertMsg(pJmpDest, ("patmR3GuestGCPtrToPatchGCPtrSimple failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue));
4046 pPatch->pPatchJumpDestGC = pJmpDest;
4047
4048 PATMP2GLOOKUPREC cacheRec;
4049 RT_ZERO(cacheRec);
4050 cacheRec.pPatch = pPatch;
4051
4052 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
4053 /* Free leftover lock if any. */
4054 if (cacheRec.Lock.pvMap)
4055 {
4056 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4057 cacheRec.Lock.pvMap = NULL;
4058 }
4059 AssertRC(rc);
4060 if (RT_FAILURE(rc))
4061 goto failure;
4062
4063 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
4064
4065 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
4066 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
4067
4068 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
4069
4070 /* Lowest and highest address for write monitoring. */
4071 pPatch->pInstrGCLowest = pInstrGC;
4072 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
4073
4074 pPatch->uState = PATCH_ENABLED;
4075 return VINF_SUCCESS;
4076
4077failure:
4078 /* Turn this cli patch into a dummy. */
4079 pPatch->uState = PATCH_REFUSED;
4080
4081 return rc;
4082}
4083#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
4084
4085
4086/**
4087 * Gives hint to PATM about supervisor guest instructions
4088 *
4089 * @returns VBox status code.
4090 * @param pVM Pointer to the VM.
4091 * @param pInstr Guest context point to privileged instruction
4092 * @param flags Patch flags
4093 */
4094VMMR3_INT_DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4095{
4096 Assert(pInstrGC);
4097 Assert(flags == PATMFL_CODE32);
4098
4099 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4100 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4101}
4102
4103/**
4104 * Patch privileged instruction at specified location
4105 *
4106 * @returns VBox status code.
4107 * @param pVM Pointer to the VM.
4108 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4109 * @param flags Patch flags
4110 *
4111 * @note returns failure if patching is not allowed or possible
4112 */
4113VMMR3_INT_DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4114{
4115 DISCPUSTATE cpu;
4116 R3PTRTYPE(uint8_t *) pInstrHC;
4117 uint32_t cbInstr;
4118 PPATMPATCHREC pPatchRec;
4119 PCPUMCTX pCtx = 0;
4120 bool disret;
4121 int rc;
4122 PVMCPU pVCpu = VMMGetCpu0(pVM);
4123 LogFlow(("PATMR3InstallPatch: %08x (%#llx)\n", pInstrGC, flags));
4124
4125 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4126
4127 if ( !pVM
4128 || pInstrGC == 0
4129 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4130 {
4131 AssertFailed();
4132 return VERR_INVALID_PARAMETER;
4133 }
4134
4135 if (PATMIsEnabled(pVM) == false)
4136 return VERR_PATCHING_REFUSED;
4137
4138 /* Test for patch conflict only with patches that actually change guest code. */
4139 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4140 {
4141 PPATCHINFO pConflictPatch = patmFindActivePatchByEntrypoint(pVM, pInstrGC);
4142 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4143 if (pConflictPatch != 0)
4144 return VERR_PATCHING_REFUSED;
4145 }
4146
4147 if (!(flags & PATMFL_CODE32))
4148 {
4149 /** @todo Only 32 bits code right now */
4150 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4151 return VERR_NOT_IMPLEMENTED;
4152 }
4153
4154 /* We ran out of patch memory; don't bother anymore. */
4155 if (pVM->patm.s.fOutOfMemory == true)
4156 return VERR_PATCHING_REFUSED;
4157
4158#if 1 /* DONT COMMIT ENABLED! */
4159 /* Blacklisted NT4SP1 areas - debugging why we sometimes crash early on, */
4160 if ( 0
4161 //|| (pInstrGC - 0x80010000U) < 0x10000U // NT4SP1 HAL
4162 //|| (pInstrGC - 0x80010000U) < 0x5000U // NT4SP1 HAL
4163 //|| (pInstrGC - 0x80013000U) < 0x2000U // NT4SP1 HAL
4164 //|| (pInstrGC - 0x80014000U) < 0x1000U // NT4SP1 HAL
4165 //|| (pInstrGC - 0x80014000U) < 0x800U // NT4SP1 HAL
4166 //|| (pInstrGC - 0x80014400U) < 0x400U // NT4SP1 HAL
4167 //|| (pInstrGC - 0x80014400U) < 0x200U // NT4SP1 HAL
4168 //|| (pInstrGC - 0x80014400U) < 0x100U // NT4SP1 HAL
4169 //|| (pInstrGC - 0x80014500U) < 0x100U // NT4SP1 HAL - negative
4170 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4171 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4172 //|| (pInstrGC - 0x80014440U) < 0x40U // NT4SP1 HAL
4173 //|| (pInstrGC - 0x80014440U) < 0x20U // NT4SP1 HAL
4174 || pInstrGC == 0x80014447 /* KfLowerIrql */
4175 || 0)
4176 {
4177 Log(("PATMR3InstallPatch: %08x is blacklisted\n", pInstrGC));
4178 return VERR_PATCHING_REFUSED;
4179 }
4180#endif
4181
4182 /* Make sure the code selector is wide open; otherwise refuse. */
4183 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4184 if (CPUMGetGuestCPL(pVCpu) == 0)
4185 {
4186 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4187 if (pInstrGCFlat != pInstrGC)
4188 {
4189 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs.Sel, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4190 return VERR_PATCHING_REFUSED;
4191 }
4192 }
4193
4194 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4195 if (!(flags & PATMFL_GUEST_SPECIFIC))
4196 {
4197 /* New code. Make sure CSAM has a go at it first. */
4198 CSAMR3CheckCode(pVM, pInstrGC);
4199 }
4200
4201 /* Note: obsolete */
4202 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4203 && (flags & PATMFL_MMIO_ACCESS))
4204 {
4205 RTRCUINTPTR offset;
4206 void *pvPatchCoreOffset;
4207
4208 /* Find the patch record. */
4209 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4210 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4211 if (pvPatchCoreOffset == NULL)
4212 {
4213 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4214 return VERR_PATCH_NOT_FOUND; //fatal error
4215 }
4216 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4217
4218 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4219 }
4220
4221 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4222
4223 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4224 if (pPatchRec)
4225 {
4226 Assert(!(flags & PATMFL_TRAMPOLINE));
4227
4228 /* Hints about existing patches are ignored. */
4229 if (flags & PATMFL_INSTR_HINT)
4230 return VERR_PATCHING_REFUSED;
4231
4232 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4233 {
4234 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4235 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4236 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4237 }
4238
4239 if (pPatchRec->patch.uState == PATCH_DISABLED)
4240 {
4241 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4242 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4243 {
4244 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4245 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4246 }
4247 else
4248 Log(("Enabling patch %RRv again\n", pInstrGC));
4249
4250 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4251 rc = PATMR3EnablePatch(pVM, pInstrGC);
4252 if (RT_SUCCESS(rc))
4253 return VWRN_PATCH_ENABLED;
4254
4255 return rc;
4256 }
4257 if ( pPatchRec->patch.uState == PATCH_ENABLED
4258 || pPatchRec->patch.uState == PATCH_DIRTY)
4259 {
4260 /*
4261 * The patch might have been overwritten.
4262 */
4263 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4264 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4265 {
4266 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4267 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4268 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4269 {
4270 if (flags & PATMFL_IDTHANDLER)
4271 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4272
4273 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4274 }
4275 }
4276 rc = PATMR3RemovePatch(pVM, pInstrGC);
4277 if (RT_FAILURE(rc))
4278 return VERR_PATCHING_REFUSED;
4279 }
4280 else
4281 {
4282 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4283 /* already tried it once! */
4284 return VERR_PATCHING_REFUSED;
4285 }
4286 }
4287
4288 RTGCPHYS GCPhys;
4289 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4290 if (rc != VINF_SUCCESS)
4291 {
4292 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4293 return rc;
4294 }
4295 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4296 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4297 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4298 {
4299 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4300 return VERR_PATCHING_REFUSED;
4301 }
4302
4303 /* Initialize cache record for guest address translations. */
4304 bool fInserted;
4305 PATMP2GLOOKUPREC cacheRec;
4306 RT_ZERO(cacheRec);
4307
4308 pInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4309 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4310
4311 /* Allocate patch record. */
4312 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4313 if (RT_FAILURE(rc))
4314 {
4315 Log(("Out of memory!!!!\n"));
4316 return VERR_NO_MEMORY;
4317 }
4318 pPatchRec->Core.Key = pInstrGC;
4319 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4320 /* Insert patch record into the lookup tree. */
4321 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4322 Assert(fInserted);
4323
4324 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4325 pPatchRec->patch.flags = flags;
4326 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
4327 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4328
4329 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4330 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4331
4332 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4333 {
4334 /*
4335 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4336 */
4337 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4338 if (pPatchNear)
4339 {
4340 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4341 {
4342 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4343
4344 pPatchRec->patch.uState = PATCH_UNUSABLE;
4345 /*
4346 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4347 */
4348 return VERR_PATCHING_REFUSED;
4349 }
4350 }
4351 }
4352
4353 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4354 if (pPatchRec->patch.pTempInfo == 0)
4355 {
4356 Log(("Out of memory!!!!\n"));
4357 return VERR_NO_MEMORY;
4358 }
4359
4360 disret = patmR3DisInstrNoStrOpMode(pVM, &pPatchRec->patch, pInstrGC, NULL, PATMREAD_ORGCODE, &cpu, &cbInstr);
4361 if (disret == false)
4362 {
4363 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4364 return VERR_PATCHING_REFUSED;
4365 }
4366
4367 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
4368 if (cbInstr > MAX_INSTR_SIZE)
4369 return VERR_PATCHING_REFUSED;
4370
4371 pPatchRec->patch.cbPrivInstr = cbInstr;
4372 pPatchRec->patch.opcode = cpu.pCurInstr->uOpcode;
4373
4374 /* Restricted hinting for now. */
4375 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->uOpcode == OP_CLI);
4376
4377 /* Initialize cache record patch pointer. */
4378 cacheRec.pPatch = &pPatchRec->patch;
4379
4380 /* Allocate statistics slot */
4381 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4382 {
4383 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4384 }
4385 else
4386 {
4387 Log(("WARNING: Patch index wrap around!!\n"));
4388 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4389 }
4390
4391 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4392 {
4393 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4394 }
4395 else
4396 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4397 {
4398 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4399 }
4400 else
4401 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4402 {
4403 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4404 }
4405 else
4406 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4407 {
4408 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4409 }
4410 else
4411 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4412 {
4413 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4414 }
4415 else
4416 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4417 {
4418 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4419 }
4420 else
4421 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4422 {
4423 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4424 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4425
4426 rc = patmIdtHandler(pVM, pInstrGC, cbInstr, pPatchRec, &cacheRec);
4427#ifdef VBOX_WITH_STATISTICS
4428 if ( rc == VINF_SUCCESS
4429 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4430 {
4431 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4432 }
4433#endif
4434 }
4435 else
4436 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4437 {
4438 switch (cpu.pCurInstr->uOpcode)
4439 {
4440 case OP_SYSENTER:
4441 case OP_PUSH:
4442 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4443 if (rc == VINF_SUCCESS)
4444 {
4445 if (rc == VINF_SUCCESS)
4446 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4447 return rc;
4448 }
4449 break;
4450
4451 default:
4452 rc = VERR_NOT_IMPLEMENTED;
4453 break;
4454 }
4455 }
4456 else
4457 {
4458 switch (cpu.pCurInstr->uOpcode)
4459 {
4460 case OP_SYSENTER:
4461 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4462 if (rc == VINF_SUCCESS)
4463 {
4464 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4465 return VINF_SUCCESS;
4466 }
4467 break;
4468
4469#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4470 case OP_JO:
4471 case OP_JNO:
4472 case OP_JC:
4473 case OP_JNC:
4474 case OP_JE:
4475 case OP_JNE:
4476 case OP_JBE:
4477 case OP_JNBE:
4478 case OP_JS:
4479 case OP_JNS:
4480 case OP_JP:
4481 case OP_JNP:
4482 case OP_JL:
4483 case OP_JNL:
4484 case OP_JLE:
4485 case OP_JNLE:
4486 case OP_JECXZ:
4487 case OP_LOOP:
4488 case OP_LOOPNE:
4489 case OP_LOOPE:
4490 case OP_JMP:
4491 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4492 {
4493 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4494 break;
4495 }
4496 return VERR_NOT_IMPLEMENTED;
4497#endif
4498
4499 case OP_PUSHF:
4500 case OP_CLI:
4501 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4502 rc = patmR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec);
4503 break;
4504
4505#ifndef VBOX_WITH_SAFE_STR
4506 case OP_STR:
4507#endif
4508 case OP_SGDT:
4509 case OP_SLDT:
4510 case OP_SIDT:
4511 case OP_CPUID:
4512 case OP_LSL:
4513 case OP_LAR:
4514 case OP_SMSW:
4515 case OP_VERW:
4516 case OP_VERR:
4517 case OP_IRET:
4518#ifdef VBOX_WITH_RAW_RING1
4519 case OP_MOV:
4520#endif
4521 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4522 break;
4523
4524 default:
4525 return VERR_NOT_IMPLEMENTED;
4526 }
4527 }
4528
4529 if (rc != VINF_SUCCESS)
4530 {
4531 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4532 {
4533 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4534 pPatchRec->patch.nrPatch2GuestRecs = 0;
4535 }
4536 pVM->patm.s.uCurrentPatchIdx--;
4537 }
4538 else
4539 {
4540 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4541 AssertRCReturn(rc, rc);
4542
4543 /* Keep track upper and lower boundaries of patched instructions */
4544 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4545 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4546 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4547 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4548
4549 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4550 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4551
4552 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4553 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4554
4555 rc = VINF_SUCCESS;
4556
4557 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4558 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4559 {
4560 rc = PATMR3DisablePatch(pVM, pInstrGC);
4561 AssertRCReturn(rc, rc);
4562 }
4563
4564#ifdef VBOX_WITH_STATISTICS
4565 /* Register statistics counter */
4566 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4567 {
4568 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4569 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4570#ifndef DEBUG_sandervl
4571 /* Full breakdown for the GUI. */
4572 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4573 "/PATM/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4574 STAMR3RegisterF(pVM, &pPatchRec->patch.pPatchBlockOffset,STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/offPatchBlock", pPatchRec->patch.pPrivInstrGC);
4575 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4576 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4577 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4578 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4579 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4580 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4581 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4582 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4583 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4584 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4585 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4586 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4587 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4588 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4589#endif
4590 }
4591#endif
4592
4593 /* Add debug symbol. */
4594 patmR3DbgAddPatch(pVM, pPatchRec);
4595 }
4596 /* Free leftover lock if any. */
4597 if (cacheRec.Lock.pvMap)
4598 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4599 return rc;
4600}
4601
4602/**
4603 * Query instruction size
4604 *
4605 * @returns VBox status code.
4606 * @param pVM Pointer to the VM.
4607 * @param pPatch Patch record
4608 * @param pInstrGC Instruction address
4609 */
4610static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4611{
4612 uint8_t *pInstrHC;
4613 PGMPAGEMAPLOCK Lock;
4614
4615 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4616 if (rc == VINF_SUCCESS)
4617 {
4618 DISCPUSTATE cpu;
4619 bool disret;
4620 uint32_t cbInstr;
4621
4622 disret = patmR3DisInstr(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE | PATMREAD_NOCHECK, &cpu, &cbInstr);
4623 PGMPhysReleasePageMappingLock(pVM, &Lock);
4624 if (disret)
4625 return cbInstr;
4626 }
4627 return 0;
4628}
4629
4630/**
4631 * Add patch to page record
4632 *
4633 * @returns VBox status code.
4634 * @param pVM Pointer to the VM.
4635 * @param pPage Page address
4636 * @param pPatch Patch record
4637 */
4638int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4639{
4640 PPATMPATCHPAGE pPatchPage;
4641 int rc;
4642
4643 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4644
4645 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4646 if (pPatchPage)
4647 {
4648 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4649 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4650 {
4651 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4652 PPATCHINFO *papPatchOld = pPatchPage->papPatch;
4653
4654 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4655 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH,
4656 (void **)&pPatchPage->papPatch);
4657 if (RT_FAILURE(rc))
4658 {
4659 Log(("Out of memory!!!!\n"));
4660 return VERR_NO_MEMORY;
4661 }
4662 memcpy(pPatchPage->papPatch, papPatchOld, cMaxPatchesOld * sizeof(pPatchPage->papPatch[0]));
4663 MMHyperFree(pVM, papPatchOld);
4664 }
4665 pPatchPage->papPatch[pPatchPage->cCount] = pPatch;
4666 pPatchPage->cCount++;
4667 }
4668 else
4669 {
4670 bool fInserted;
4671
4672 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4673 if (RT_FAILURE(rc))
4674 {
4675 Log(("Out of memory!!!!\n"));
4676 return VERR_NO_MEMORY;
4677 }
4678 pPatchPage->Core.Key = pPage;
4679 pPatchPage->cCount = 1;
4680 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4681
4682 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH,
4683 (void **)&pPatchPage->papPatch);
4684 if (RT_FAILURE(rc))
4685 {
4686 Log(("Out of memory!!!!\n"));
4687 MMHyperFree(pVM, pPatchPage);
4688 return VERR_NO_MEMORY;
4689 }
4690 pPatchPage->papPatch[0] = pPatch;
4691
4692 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4693 Assert(fInserted);
4694 pVM->patm.s.cPageRecords++;
4695
4696 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4697 }
4698 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4699
4700 /* Get the closest guest instruction (from below) */
4701 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4702 Assert(pGuestToPatchRec);
4703 if (pGuestToPatchRec)
4704 {
4705 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4706 if ( pPatchPage->pLowestAddrGC == 0
4707 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4708 {
4709 RTRCUINTPTR offset;
4710
4711 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4712
4713 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4714 /* If we're too close to the page boundary, then make sure an
4715 instruction from the previous page doesn't cross the
4716 boundary itself. */
4717 if (offset && offset < MAX_INSTR_SIZE)
4718 {
4719 /* Get the closest guest instruction (from above) */
4720 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4721
4722 if (pGuestToPatchRec)
4723 {
4724 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4725 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4726 {
4727 pPatchPage->pLowestAddrGC = pPage;
4728 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4729 }
4730 }
4731 }
4732 }
4733 }
4734
4735 /* Get the closest guest instruction (from above) */
4736 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4737 Assert(pGuestToPatchRec);
4738 if (pGuestToPatchRec)
4739 {
4740 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4741 if ( pPatchPage->pHighestAddrGC == 0
4742 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4743 {
4744 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4745 /* Increase by instruction size. */
4746 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4747//// Assert(size);
4748 pPatchPage->pHighestAddrGC += size;
4749 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4750 }
4751 }
4752
4753 return VINF_SUCCESS;
4754}
4755
4756/**
4757 * Remove patch from page record
4758 *
4759 * @returns VBox status code.
4760 * @param pVM Pointer to the VM.
4761 * @param pPage Page address
4762 * @param pPatch Patch record
4763 */
4764int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4765{
4766 PPATMPATCHPAGE pPatchPage;
4767 int rc;
4768
4769 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4770 Assert(pPatchPage);
4771
4772 if (!pPatchPage)
4773 return VERR_INVALID_PARAMETER;
4774
4775 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4776
4777 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4778 if (pPatchPage->cCount > 1)
4779 {
4780 uint32_t i;
4781
4782 /* Used by multiple patches */
4783 for (i = 0; i < pPatchPage->cCount; i++)
4784 {
4785 if (pPatchPage->papPatch[i] == pPatch)
4786 {
4787 /* close the gap between the remaining pointers. */
4788 uint32_t cNew = --pPatchPage->cCount;
4789 if (i < cNew)
4790 pPatchPage->papPatch[i] = pPatchPage->papPatch[cNew];
4791 pPatchPage->papPatch[cNew] = NULL;
4792 return VINF_SUCCESS;
4793 }
4794 }
4795 AssertMsgFailed(("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4796 }
4797 else
4798 {
4799 PPATMPATCHPAGE pPatchNode;
4800
4801 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4802
4803 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4804 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4805 Assert(pPatchNode && pPatchNode == pPatchPage);
4806
4807 Assert(pPatchPage->papPatch);
4808 rc = MMHyperFree(pVM, pPatchPage->papPatch);
4809 AssertRC(rc);
4810 rc = MMHyperFree(pVM, pPatchPage);
4811 AssertRC(rc);
4812 pVM->patm.s.cPageRecords--;
4813 }
4814 return VINF_SUCCESS;
4815}
4816
4817/**
4818 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4819 *
4820 * @returns VBox status code.
4821 * @param pVM Pointer to the VM.
4822 * @param pPatch Patch record
4823 */
4824int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4825{
4826 int rc;
4827 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4828
4829 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4830 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4831 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4832
4833 /** @todo optimize better (large gaps between current and next used page) */
4834 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4835 {
4836 /* Get the closest guest instruction (from above) */
4837 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4838 if ( pGuestToPatchRec
4839 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4840 )
4841 {
4842 /* Code in page really patched -> add record */
4843 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4844 AssertRC(rc);
4845 }
4846 }
4847 pPatch->flags |= PATMFL_CODE_MONITORED;
4848 return VINF_SUCCESS;
4849}
4850
4851/**
4852 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4853 *
4854 * @returns VBox status code.
4855 * @param pVM Pointer to the VM.
4856 * @param pPatch Patch record
4857 */
4858static int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4859{
4860 int rc;
4861 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4862
4863 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4864 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4865 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4866
4867 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4868 {
4869 /* Get the closest guest instruction (from above) */
4870 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4871 if ( pGuestToPatchRec
4872 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4873 )
4874 {
4875 /* Code in page really patched -> remove record */
4876 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4877 AssertRC(rc);
4878 }
4879 }
4880 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4881 return VINF_SUCCESS;
4882}
4883
4884/**
4885 * Notifies PATM about a (potential) write to code that has been patched.
4886 *
4887 * @returns VBox status code.
4888 * @param pVM Pointer to the VM.
4889 * @param GCPtr GC pointer to write address
4890 * @param cbWrite Nr of bytes to write
4891 *
4892 */
4893VMMR3_INT_DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4894{
4895 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4896
4897 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4898
4899 Assert(VM_IS_EMT(pVM));
4900 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4901
4902 /* Quick boundary check */
4903 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4904 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4905 )
4906 return VINF_SUCCESS;
4907
4908 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4909
4910 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4911 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4912
4913 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4914 {
4915loop_start:
4916 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4917 if (pPatchPage)
4918 {
4919 uint32_t i;
4920 bool fValidPatchWrite = false;
4921
4922 /* Quick check to see if the write is in the patched part of the page */
4923 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4924 || pPatchPage->pHighestAddrGC < GCPtr)
4925 {
4926 break;
4927 }
4928
4929 for (i=0;i<pPatchPage->cCount;i++)
4930 {
4931 if (pPatchPage->papPatch[i])
4932 {
4933 PPATCHINFO pPatch = pPatchPage->papPatch[i];
4934 RTRCPTR pPatchInstrGC;
4935 //unused: bool fForceBreak = false;
4936
4937 Assert(pPatchPage->papPatch[i]->flags & PATMFL_CODE_MONITORED);
4938 /** @todo inefficient and includes redundant checks for multiple pages. */
4939 for (uint32_t j=0; j<cbWrite; j++)
4940 {
4941 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4942
4943 if ( pPatch->cbPatchJump
4944 && pGuestPtrGC >= pPatch->pPrivInstrGC
4945 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4946 {
4947 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4948 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4949 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4950 if (rc == VINF_SUCCESS)
4951 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4952 goto loop_start;
4953
4954 continue;
4955 }
4956
4957 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4958 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4959 if (!pPatchInstrGC)
4960 {
4961 RTRCPTR pClosestInstrGC;
4962 uint32_t size;
4963
4964 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4965 if (pPatchInstrGC)
4966 {
4967 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4968 Assert(pClosestInstrGC <= pGuestPtrGC);
4969 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4970 /* Check if this is not a write into a gap between two patches */
4971 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4972 pPatchInstrGC = 0;
4973 }
4974 }
4975 if (pPatchInstrGC)
4976 {
4977 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4978
4979 fValidPatchWrite = true;
4980
4981 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4982 Assert(pPatchToGuestRec);
4983 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4984 {
4985 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4986
4987 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4988 {
4989 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4990
4991 patmR3MarkDirtyPatch(pVM, pPatch);
4992
4993 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4994 goto loop_start;
4995 }
4996 else
4997 {
4998 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4999 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
5000
5001 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
5002 pPatchToGuestRec->fDirty = true;
5003
5004 *pInstrHC = 0xCC;
5005
5006 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
5007 }
5008 }
5009 /* else already marked dirty */
5010 }
5011 }
5012 }
5013 } /* for each patch */
5014
5015 if (fValidPatchWrite == false)
5016 {
5017 /* Write to a part of the page that either:
5018 * - doesn't contain any code (shared code/data); rather unlikely
5019 * - old code page that's no longer in active use.
5020 */
5021invalid_write_loop_start:
5022 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
5023
5024 if (pPatchPage)
5025 {
5026 for (i=0;i<pPatchPage->cCount;i++)
5027 {
5028 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5029
5030 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
5031 {
5032 /* Note: possibly dangerous assumption that all future writes will be harmless. */
5033 if (pPatch->flags & PATMFL_IDTHANDLER)
5034 {
5035 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5036
5037 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
5038 int rc = patmRemovePatchPages(pVM, pPatch);
5039 AssertRC(rc);
5040 }
5041 else
5042 {
5043 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5044 patmR3MarkDirtyPatch(pVM, pPatch);
5045 }
5046 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
5047 goto invalid_write_loop_start;
5048 }
5049 } /* for */
5050 }
5051 }
5052 }
5053 }
5054 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
5055 return VINF_SUCCESS;
5056
5057}
5058
5059/**
5060 * Disable all patches in a flushed page
5061 *
5062 * @returns VBox status code
5063 * @param pVM Pointer to the VM.
5064 * @param addr GC address of the page to flush
5065 * @note Currently only called by CSAMR3FlushPage; optimization to avoid
5066 * having to double check if the physical address has changed
5067 */
5068VMMR3_INT_DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
5069{
5070 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5071
5072 addr &= PAGE_BASE_GC_MASK;
5073
5074 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
5075 if (pPatchPage)
5076 {
5077 int i;
5078
5079 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
5080 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
5081 {
5082 if (pPatchPage->papPatch[i])
5083 {
5084 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5085
5086 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
5087 patmR3MarkDirtyPatch(pVM, pPatch);
5088 }
5089 }
5090 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
5091 }
5092 return VINF_SUCCESS;
5093}
5094
5095/**
5096 * Checks if the instructions at the specified address has been patched already.
5097 *
5098 * @returns boolean, patched or not
5099 * @param pVM Pointer to the VM.
5100 * @param pInstrGC Guest context pointer to instruction
5101 */
5102VMMR3_INT_DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
5103{
5104 Assert(!HMIsEnabled(pVM));
5105 PPATMPATCHREC pPatchRec;
5106 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5107 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
5108 return true;
5109 return false;
5110}
5111
5112/**
5113 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
5114 *
5115 * @returns VBox status code.
5116 * @param pVM Pointer to the VM.
5117 * @param pInstrGC GC address of instr
5118 * @param pByte opcode byte pointer (OUT)
5119 *
5120 */
5121VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
5122{
5123 PPATMPATCHREC pPatchRec;
5124
5125 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5126
5127 /* Shortcut. */
5128 if (!PATMIsEnabled(pVM))
5129 return VERR_PATCH_NOT_FOUND;
5130 Assert(!HMIsEnabled(pVM));
5131 if ( pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5132 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5133 return VERR_PATCH_NOT_FOUND;
5134
5135 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5136 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5137 if ( pPatchRec
5138 && pPatchRec->patch.uState == PATCH_ENABLED
5139 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5140 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5141 {
5142 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5143 *pByte = pPatchRec->patch.aPrivInstr[offset];
5144
5145 if (pPatchRec->patch.cbPatchJump == 1)
5146 {
5147 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5148 }
5149 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5150 return VINF_SUCCESS;
5151 }
5152 return VERR_PATCH_NOT_FOUND;
5153}
5154
5155/**
5156 * Read instruction bytes of the original code that was overwritten by the 5
5157 * bytes patch jump.
5158 *
5159 * @returns VINF_SUCCESS or VERR_PATCH_NOT_FOUND.
5160 * @param pVM Pointer to the VM.
5161 * @param GCPtrInstr GC address of instr
5162 * @param pbDst The output buffer.
5163 * @param cbToRead The maximum number bytes to read.
5164 * @param pcbRead Where to return the acutal number of bytes read.
5165 */
5166VMMR3_INT_DECL(int) PATMR3ReadOrgInstr(PVM pVM, RTGCPTR32 GCPtrInstr, uint8_t *pbDst, size_t cbToRead, size_t *pcbRead)
5167{
5168 /* Shortcut. */
5169 if (!PATMIsEnabled(pVM))
5170 return VERR_PATCH_NOT_FOUND;
5171 Assert(!HMIsEnabled(pVM));
5172 if ( GCPtrInstr < pVM->patm.s.pPatchedInstrGCLowest
5173 || GCPtrInstr > pVM->patm.s.pPatchedInstrGCHighest)
5174 return VERR_PATCH_NOT_FOUND;
5175
5176 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5177
5178 /*
5179 * If the patch is enabled and the pointer lies within 5 bytes of this
5180 * priv instr ptr, then we've got a hit!
5181 */
5182 RTGCPTR32 off;
5183 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree,
5184 GCPtrInstr, false /*fAbove*/);
5185 if ( pPatchRec
5186 && pPatchRec->patch.uState == PATCH_ENABLED
5187 && (off = GCPtrInstr - pPatchRec->patch.pPrivInstrGC) < pPatchRec->patch.cbPatchJump)
5188 {
5189 uint8_t const *pbSrc = &pPatchRec->patch.aPrivInstr[off];
5190 uint32_t const cbMax = pPatchRec->patch.cbPatchJump - off;
5191 if (cbToRead > cbMax)
5192 cbToRead = cbMax;
5193 switch (cbToRead)
5194 {
5195 case 5: pbDst[4] = pbSrc[4];
5196 case 4: pbDst[3] = pbSrc[3];
5197 case 3: pbDst[2] = pbSrc[2];
5198 case 2: pbDst[1] = pbSrc[1];
5199 case 1: pbDst[0] = pbSrc[0];
5200 break;
5201 default:
5202 memcpy(pbDst, pbSrc, cbToRead);
5203 }
5204 *pcbRead = cbToRead;
5205
5206 if (pPatchRec->patch.cbPatchJump == 1)
5207 Log(("PATMR3ReadOrgInstr: returning opcode %.*Rhxs for instruction at %RX32\n", cbToRead, pbSrc, GCPtrInstr));
5208 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5209 return VINF_SUCCESS;
5210 }
5211
5212 return VERR_PATCH_NOT_FOUND;
5213}
5214
5215/**
5216 * Disable patch for privileged instruction at specified location
5217 *
5218 * @returns VBox status code.
5219 * @param pVM Pointer to the VM.
5220 * @param pInstr Guest context point to privileged instruction
5221 *
5222 * @note returns failure if patching is not allowed or possible
5223 *
5224 */
5225VMMR3_INT_DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5226{
5227 PPATMPATCHREC pPatchRec;
5228 PPATCHINFO pPatch;
5229
5230 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5231 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5232 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5233 if (pPatchRec)
5234 {
5235 int rc = VINF_SUCCESS;
5236
5237 pPatch = &pPatchRec->patch;
5238
5239 /* Already disabled? */
5240 if (pPatch->uState == PATCH_DISABLED)
5241 return VINF_SUCCESS;
5242
5243 /* Clear the IDT entries for the patch we're disabling. */
5244 /* Note: very important as we clear IF in the patch itself */
5245 /** @todo this needs to be changed */
5246 if (pPatch->flags & PATMFL_IDTHANDLER)
5247 {
5248 uint32_t iGate;
5249
5250 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5251 if (iGate != (uint32_t)~0)
5252 {
5253 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5254 if (++cIDTHandlersDisabled < 256)
5255 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5256 }
5257 }
5258
5259 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5260 if ( pPatch->pPatchBlockOffset
5261 && pPatch->uState == PATCH_ENABLED)
5262 {
5263 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5264 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5265 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5266 }
5267
5268 /* IDT or function patches haven't changed any guest code. */
5269 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5270 {
5271 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5272 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5273
5274 if (pPatch->uState != PATCH_REFUSED)
5275 {
5276 uint8_t temp[16];
5277
5278 Assert(pPatch->cbPatchJump < sizeof(temp));
5279
5280 /* Let's first check if the guest code is still the same. */
5281 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5282 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5283 if (rc == VINF_SUCCESS)
5284 {
5285 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5286
5287 if ( temp[0] != 0xE9 /* jmp opcode */
5288 || *(RTRCINTPTR *)(&temp[1]) != displ
5289 )
5290 {
5291 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5292 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5293 /* Remove it completely */
5294 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5295 rc = PATMR3RemovePatch(pVM, pInstrGC);
5296 AssertRC(rc);
5297 return VWRN_PATCH_REMOVED;
5298 }
5299 patmRemoveJumpToPatch(pVM, pPatch);
5300 }
5301 else
5302 {
5303 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5304 pPatch->uState = PATCH_DISABLE_PENDING;
5305 }
5306 }
5307 else
5308 {
5309 AssertMsgFailed(("Patch was refused!\n"));
5310 return VERR_PATCH_ALREADY_DISABLED;
5311 }
5312 }
5313 else
5314 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5315 {
5316 uint8_t temp[16];
5317
5318 Assert(pPatch->cbPatchJump < sizeof(temp));
5319
5320 /* Let's first check if the guest code is still the same. */
5321 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5322 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5323 if (rc == VINF_SUCCESS)
5324 {
5325 if (temp[0] != 0xCC)
5326 {
5327 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5328 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5329 /* Remove it completely */
5330 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5331 rc = PATMR3RemovePatch(pVM, pInstrGC);
5332 AssertRC(rc);
5333 return VWRN_PATCH_REMOVED;
5334 }
5335 patmDeactivateInt3Patch(pVM, pPatch);
5336 }
5337 }
5338
5339 if (rc == VINF_SUCCESS)
5340 {
5341 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5342 if (pPatch->uState == PATCH_DISABLE_PENDING)
5343 {
5344 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5345 pPatch->uState = PATCH_UNUSABLE;
5346 }
5347 else
5348 if (pPatch->uState != PATCH_DIRTY)
5349 {
5350 pPatch->uOldState = pPatch->uState;
5351 pPatch->uState = PATCH_DISABLED;
5352 }
5353 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5354 }
5355
5356 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5357 return VINF_SUCCESS;
5358 }
5359 Log(("Patch not found!\n"));
5360 return VERR_PATCH_NOT_FOUND;
5361}
5362
5363/**
5364 * Permanently disable patch for privileged instruction at specified location
5365 *
5366 * @returns VBox status code.
5367 * @param pVM Pointer to the VM.
5368 * @param pInstr Guest context instruction pointer
5369 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5370 * @param pConflictPatch Conflicting patch
5371 *
5372 */
5373static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5374{
5375 NOREF(pConflictAddr);
5376#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5377 PATCHINFO patch;
5378 DISCPUSTATE cpu;
5379 R3PTRTYPE(uint8_t *) pInstrHC;
5380 uint32_t cbInstr;
5381 bool disret;
5382 int rc;
5383
5384 RT_ZERO(patch);
5385 pInstrHC = patmR3GCVirtToHCVirt(pVM, &patch, pInstrGC);
5386 disret = patmR3DisInstr(pVM, &patch, pInstrGC, pInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
5387 /*
5388 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5389 * with one that jumps right into the conflict patch.
5390 * Otherwise we must disable the conflicting patch to avoid serious problems.
5391 */
5392 if ( disret == true
5393 && (pConflictPatch->flags & PATMFL_CODE32)
5394 && (cpu.pCurInstr->uOpcode == OP_JMP || (cpu.pCurInstr->fOpType & DISOPTYPE_COND_CONTROLFLOW))
5395 && (cpu.Param1.fUse & DISUSE_IMMEDIATE32_REL))
5396 {
5397 /* Hint patches must be enabled first. */
5398 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5399 {
5400 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5401 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5402 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5403 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5404 /* Enabling might fail if the patched code has changed in the meantime. */
5405 if (rc != VINF_SUCCESS)
5406 return rc;
5407 }
5408
5409 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5410 if (RT_SUCCESS(rc))
5411 {
5412 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5413 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5414 return VINF_SUCCESS;
5415 }
5416 }
5417#endif
5418
5419 if (pConflictPatch->opcode == OP_CLI)
5420 {
5421 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5422 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5423 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5424 if (rc == VWRN_PATCH_REMOVED)
5425 return VINF_SUCCESS;
5426 if (RT_SUCCESS(rc))
5427 {
5428 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5429 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5430 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5431 if (rc == VERR_PATCH_NOT_FOUND)
5432 return VINF_SUCCESS; /* removed already */
5433
5434 AssertRC(rc);
5435 if (RT_SUCCESS(rc))
5436 {
5437 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5438 return VINF_SUCCESS;
5439 }
5440 }
5441 /* else turned into unusable patch (see below) */
5442 }
5443 else
5444 {
5445 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5446 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5447 if (rc == VWRN_PATCH_REMOVED)
5448 return VINF_SUCCESS;
5449 }
5450
5451 /* No need to monitor the code anymore. */
5452 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5453 {
5454 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5455 AssertRC(rc);
5456 }
5457 pConflictPatch->uState = PATCH_UNUSABLE;
5458 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5459 return VERR_PATCH_DISABLED;
5460}
5461
5462/**
5463 * Enable patch for privileged instruction at specified location
5464 *
5465 * @returns VBox status code.
5466 * @param pVM Pointer to the VM.
5467 * @param pInstr Guest context point to privileged instruction
5468 *
5469 * @note returns failure if patching is not allowed or possible
5470 *
5471 */
5472VMMR3_INT_DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5473{
5474 PPATMPATCHREC pPatchRec;
5475 PPATCHINFO pPatch;
5476
5477 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5478 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5479 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5480 if (pPatchRec)
5481 {
5482 int rc = VINF_SUCCESS;
5483
5484 pPatch = &pPatchRec->patch;
5485
5486 if (pPatch->uState == PATCH_DISABLED)
5487 {
5488 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5489 {
5490 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5491 uint8_t temp[16];
5492
5493 Assert(pPatch->cbPatchJump < sizeof(temp));
5494
5495 /* Let's first check if the guest code is still the same. */
5496 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5497 AssertRC(rc2);
5498 if (rc2 == VINF_SUCCESS)
5499 {
5500 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5501 {
5502 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5503 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5504 /* Remove it completely */
5505 rc = PATMR3RemovePatch(pVM, pInstrGC);
5506 AssertRC(rc);
5507 return VERR_PATCH_NOT_FOUND;
5508 }
5509
5510 PATMP2GLOOKUPREC cacheRec;
5511 RT_ZERO(cacheRec);
5512 cacheRec.pPatch = pPatch;
5513
5514 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5515 /* Free leftover lock if any. */
5516 if (cacheRec.Lock.pvMap)
5517 {
5518 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5519 cacheRec.Lock.pvMap = NULL;
5520 }
5521 AssertRC(rc2);
5522 if (RT_FAILURE(rc2))
5523 return rc2;
5524
5525#ifdef DEBUG
5526 {
5527 DISCPUSTATE cpu;
5528 char szOutput[256];
5529 uint32_t cbInstr;
5530 uint32_t i = 0;
5531 bool disret;
5532 while(i < pPatch->cbPatchJump)
5533 {
5534 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
5535 &cpu, &cbInstr, szOutput, sizeof(szOutput));
5536 Log(("Renewed patch instr: %s", szOutput));
5537 i += cbInstr;
5538 }
5539 }
5540#endif
5541 }
5542 }
5543 else
5544 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5545 {
5546 uint8_t temp[16];
5547
5548 Assert(pPatch->cbPatchJump < sizeof(temp));
5549
5550 /* Let's first check if the guest code is still the same. */
5551 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5552 AssertRC(rc2);
5553
5554 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5555 {
5556 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5557 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5558 rc = PATMR3RemovePatch(pVM, pInstrGC);
5559 AssertRC(rc);
5560 return VERR_PATCH_NOT_FOUND;
5561 }
5562
5563 rc2 = patmActivateInt3Patch(pVM, pPatch);
5564 if (RT_FAILURE(rc2))
5565 return rc2;
5566 }
5567
5568 pPatch->uState = pPatch->uOldState; //restore state
5569
5570 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5571 if (pPatch->pPatchBlockOffset)
5572 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5573
5574 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5575 }
5576 else
5577 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5578
5579 return rc;
5580 }
5581 return VERR_PATCH_NOT_FOUND;
5582}
5583
5584/**
5585 * Remove patch for privileged instruction at specified location
5586 *
5587 * @returns VBox status code.
5588 * @param pVM Pointer to the VM.
5589 * @param pPatchRec Patch record
5590 * @param fForceRemove Remove *all* patches
5591 */
5592int patmR3RemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5593{
5594 PPATCHINFO pPatch;
5595
5596 pPatch = &pPatchRec->patch;
5597
5598 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5599 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5600 {
5601 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5602 return VERR_ACCESS_DENIED;
5603 }
5604 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5605
5606 /* Note: NEVER EVER REUSE PATCH MEMORY */
5607 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5608
5609 if (pPatchRec->patch.pPatchBlockOffset)
5610 {
5611 PAVLOU32NODECORE pNode;
5612
5613 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5614 Assert(pNode);
5615 }
5616
5617 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5618 {
5619 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5620 AssertRC(rc);
5621 }
5622
5623#ifdef VBOX_WITH_STATISTICS
5624 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5625 {
5626 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
5627 STAMR3DeregisterF(pVM->pUVM, "/PATM/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
5628 }
5629#endif
5630
5631 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5632 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5633 pPatch->nrPatch2GuestRecs = 0;
5634 Assert(pPatch->Patch2GuestAddrTree == 0);
5635
5636 patmEmptyTree(pVM, &pPatch->FixupTree);
5637 pPatch->nrFixups = 0;
5638 Assert(pPatch->FixupTree == 0);
5639
5640 if (pPatchRec->patch.pTempInfo)
5641 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5642
5643 /* Note: might fail, because it has already been removed (e.g. during reset). */
5644 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5645
5646 /* Free the patch record */
5647 MMHyperFree(pVM, pPatchRec);
5648 return VINF_SUCCESS;
5649}
5650
5651/**
5652 * RTAvlU32DoWithAll() worker.
5653 * Checks whether the current trampoline instruction is the jump to the target patch
5654 * and updates the displacement to jump to the new target.
5655 *
5656 * @returns VBox status code.
5657 * @retval VERR_ALREADY_EXISTS if the jump was found.
5658 * @param pNode The current patch to guest record to check.
5659 * @param pvUser The refresh state.
5660 */
5661static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5662{
5663 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5664 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5665 PVM pVM = pRefreshPatchState->pVM;
5666
5667 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5668
5669 /*
5670 * Check if the patch instruction starts with a jump.
5671 * ASSUMES that there is no other patch to guest record that starts
5672 * with a jump.
5673 */
5674 if (*pPatchInstr == 0xE9)
5675 {
5676 /* Jump found, update the displacement. */
5677 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5678 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5679 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5680
5681 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5682 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5683
5684 *(uint32_t *)&pPatchInstr[1] = displ;
5685 return VERR_ALREADY_EXISTS; /** @todo better return code */
5686 }
5687
5688 return VINF_SUCCESS;
5689}
5690
5691/**
5692 * Attempt to refresh the patch by recompiling its entire code block
5693 *
5694 * @returns VBox status code.
5695 * @param pVM Pointer to the VM.
5696 * @param pPatchRec Patch record
5697 */
5698int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5699{
5700 PPATCHINFO pPatch;
5701 int rc;
5702 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5703 PTRAMPREC pTrampolinePatchesHead = NULL;
5704
5705 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5706
5707 pPatch = &pPatchRec->patch;
5708 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5709 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5710 {
5711 if (!pPatch->pTrampolinePatchesHead)
5712 {
5713 /*
5714 * It is sometimes possible that there are trampoline patches to this patch
5715 * but they are not recorded (after a saved state load for example).
5716 * Refuse to refresh those patches.
5717 * Can hurt performance in theory if the patched code is modified by the guest
5718 * and is executed often. However most of the time states are saved after the guest
5719 * code was modified and is not updated anymore afterwards so this shouldn't be a
5720 * big problem.
5721 */
5722 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5723 return VERR_PATCHING_REFUSED;
5724 }
5725 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5726 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5727 }
5728
5729 /* Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5730
5731 rc = PATMR3DisablePatch(pVM, pInstrGC);
5732 AssertRC(rc);
5733
5734 /* Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5735 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5736#ifdef VBOX_WITH_STATISTICS
5737 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5738 {
5739 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
5740 STAMR3DeregisterF(pVM->pUVM, "/PATM/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
5741 }
5742#endif
5743
5744 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5745
5746 /* Attempt to install a new patch. */
5747 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5748 if (RT_SUCCESS(rc))
5749 {
5750 RTRCPTR pPatchTargetGC;
5751 PPATMPATCHREC pNewPatchRec;
5752
5753 /* Determine target address in new patch */
5754 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5755 Assert(pPatchTargetGC);
5756 if (!pPatchTargetGC)
5757 {
5758 rc = VERR_PATCHING_REFUSED;
5759 goto failure;
5760 }
5761
5762 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5763 pPatch->uCurPatchOffset = 0;
5764
5765 /* insert jump to new patch in old patch block */
5766 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5767 if (RT_FAILURE(rc))
5768 goto failure;
5769
5770 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5771 Assert(pNewPatchRec); /* can't fail */
5772
5773 /* Remove old patch (only do that when everything is finished) */
5774 int rc2 = patmR3RemovePatch(pVM, pPatchRec, true /* force removal */);
5775 AssertRC(rc2);
5776
5777 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5778 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5779 Assert(fInserted); NOREF(fInserted);
5780
5781 Log(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5782 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5783
5784 /* Used by another patch, so don't remove it! */
5785 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5786
5787 if (pTrampolinePatchesHead)
5788 {
5789 /* Update all trampoline patches to jump to the new patch. */
5790 PTRAMPREC pTrampRec = NULL;
5791 PATMREFRESHPATCH RefreshPatch;
5792
5793 RefreshPatch.pVM = pVM;
5794 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5795
5796 pTrampRec = pTrampolinePatchesHead;
5797
5798 while (pTrampRec)
5799 {
5800 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5801
5802 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5803 /*
5804 * We have to find the right patch2guest record because there might be others
5805 * for statistics.
5806 */
5807 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5808 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5809 Assert(rc == VERR_ALREADY_EXISTS);
5810 rc = VINF_SUCCESS;
5811 pTrampRec = pTrampRec->pNext;
5812 }
5813 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5814 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5815 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5816 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5817 }
5818 }
5819
5820failure:
5821 if (RT_FAILURE(rc))
5822 {
5823 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5824
5825 /* Remove the new inactive patch */
5826 rc = PATMR3RemovePatch(pVM, pInstrGC);
5827 AssertRC(rc);
5828
5829 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5830 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5831 Assert(fInserted); NOREF(fInserted);
5832
5833 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5834 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5835 AssertRC(rc2);
5836
5837 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5838 }
5839 return rc;
5840}
5841
5842/**
5843 * Find patch for privileged instruction at specified location
5844 *
5845 * @returns Patch structure pointer if found; else NULL
5846 * @param pVM Pointer to the VM.
5847 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5848 * @param fIncludeHints Include hinted patches or not
5849 *
5850 */
5851PPATCHINFO patmFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5852{
5853 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5854 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5855 if (pPatchRec)
5856 {
5857 if ( pPatchRec->patch.uState == PATCH_ENABLED
5858 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5859 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5860 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5861 {
5862 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5863 return &pPatchRec->patch;
5864 }
5865 else
5866 if ( fIncludeHints
5867 && pPatchRec->patch.uState == PATCH_DISABLED
5868 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5869 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5870 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5871 {
5872 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5873 return &pPatchRec->patch;
5874 }
5875 }
5876 return NULL;
5877}
5878
5879/**
5880 * Checks whether the GC address is inside a generated patch jump
5881 *
5882 * @returns true -> yes, false -> no
5883 * @param pVM Pointer to the VM.
5884 * @param pAddr Guest context address.
5885 * @param pPatchAddr Guest context patch address (if true).
5886 */
5887VMMR3_INT_DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5888{
5889 RTRCPTR addr;
5890 PPATCHINFO pPatch;
5891
5892 Assert(!HMIsEnabled(pVM));
5893 if (PATMIsEnabled(pVM) == false)
5894 return false;
5895
5896 if (pPatchAddr == NULL)
5897 pPatchAddr = &addr;
5898
5899 *pPatchAddr = 0;
5900
5901 pPatch = patmFindActivePatchByEntrypoint(pVM, pAddr);
5902 if (pPatch)
5903 *pPatchAddr = pPatch->pPrivInstrGC;
5904
5905 return *pPatchAddr == 0 ? false : true;
5906}
5907
5908/**
5909 * Remove patch for privileged instruction at specified location
5910 *
5911 * @returns VBox status code.
5912 * @param pVM Pointer to the VM.
5913 * @param pInstr Guest context point to privileged instruction
5914 *
5915 * @note returns failure if patching is not allowed or possible
5916 *
5917 */
5918VMMR3_INT_DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5919{
5920 PPATMPATCHREC pPatchRec;
5921
5922 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5923 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5924 if (pPatchRec)
5925 {
5926 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5927 if (rc == VWRN_PATCH_REMOVED)
5928 return VINF_SUCCESS;
5929
5930 return patmR3RemovePatch(pVM, pPatchRec, false);
5931 }
5932 AssertFailed();
5933 return VERR_PATCH_NOT_FOUND;
5934}
5935
5936/**
5937 * Mark patch as dirty
5938 *
5939 * @returns VBox status code.
5940 * @param pVM Pointer to the VM.
5941 * @param pPatch Patch record
5942 *
5943 * @note returns failure if patching is not allowed or possible
5944 *
5945 */
5946static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5947{
5948 if (pPatch->pPatchBlockOffset)
5949 {
5950 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5951 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5952 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5953 }
5954
5955 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5956 /* Put back the replaced instruction. */
5957 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5958 if (rc == VWRN_PATCH_REMOVED)
5959 return VINF_SUCCESS;
5960
5961 /* Note: we don't restore patch pages for patches that are not enabled! */
5962 /* Note: be careful when changing this behaviour!! */
5963
5964 /* The patch pages are no longer marked for self-modifying code detection */
5965 if (pPatch->flags & PATMFL_CODE_MONITORED)
5966 {
5967 rc = patmRemovePatchPages(pVM, pPatch);
5968 AssertRCReturn(rc, rc);
5969 }
5970 pPatch->uState = PATCH_DIRTY;
5971
5972 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5973 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5974
5975 return VINF_SUCCESS;
5976}
5977
5978/**
5979 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5980 *
5981 * @returns VBox status code.
5982 * @param pVM Pointer to the VM.
5983 * @param pPatch Patch block structure pointer
5984 * @param pPatchGC GC address in patch block
5985 */
5986RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5987{
5988 Assert(pPatch->Patch2GuestAddrTree);
5989 /* Get the closest record from below. */
5990 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5991 if (pPatchToGuestRec)
5992 return pPatchToGuestRec->pOrgInstrGC;
5993
5994 return 0;
5995}
5996
5997/**
5998 * Converts Guest code GC ptr to Patch code GC ptr (if found)
5999 *
6000 * @returns corresponding GC pointer in patch block
6001 * @param pVM Pointer to the VM.
6002 * @param pPatch Current patch block pointer
6003 * @param pInstrGC Guest context pointer to privileged instruction
6004 *
6005 */
6006RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6007{
6008 if (pPatch->Guest2PatchAddrTree)
6009 {
6010 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
6011 if (pGuestToPatchRec)
6012 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6013 }
6014
6015 return 0;
6016}
6017
6018/**
6019 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6020 *
6021 * @returns corresponding GC pointer in patch block
6022 * @param pVM Pointer to the VM.
6023 * @param pInstrGC Guest context pointer to privileged instruction
6024 */
6025static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
6026{
6027 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
6028 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
6029 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
6030 return NIL_RTRCPTR;
6031}
6032
6033/**
6034 * Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no
6035 * identical match)
6036 *
6037 * @returns corresponding GC pointer in patch block
6038 * @param pVM Pointer to the VM.
6039 * @param pPatch Current patch block pointer
6040 * @param pInstrGC Guest context pointer to privileged instruction
6041 *
6042 */
6043RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6044{
6045 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
6046 if (pGuestToPatchRec)
6047 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6048 return NIL_RTRCPTR;
6049}
6050
6051/**
6052 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
6053 *
6054 * @returns original GC instruction pointer or 0 if not found
6055 * @param pVM Pointer to the VM.
6056 * @param pPatchGC GC address in patch block
6057 * @param pEnmState State of the translated address (out)
6058 *
6059 */
6060VMMR3_INT_DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
6061{
6062 PPATMPATCHREC pPatchRec;
6063 void *pvPatchCoreOffset;
6064 RTRCPTR pPrivInstrGC;
6065
6066 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
6067 Assert(!HMIsEnabled(pVM));
6068 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6069 if (pvPatchCoreOffset == 0)
6070 {
6071 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
6072 return 0;
6073 }
6074 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6075 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
6076 if (pEnmState)
6077 {
6078 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
6079 || pPatchRec->patch.uState == PATCH_DIRTY
6080 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
6081 || pPatchRec->patch.uState == PATCH_UNUSABLE),
6082 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
6083
6084 if ( !pPrivInstrGC
6085 || pPatchRec->patch.uState == PATCH_UNUSABLE
6086 || pPatchRec->patch.uState == PATCH_REFUSED)
6087 {
6088 pPrivInstrGC = 0;
6089 *pEnmState = PATMTRANS_FAILED;
6090 }
6091 else
6092 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
6093 {
6094 *pEnmState = PATMTRANS_INHIBITIRQ;
6095 }
6096 else
6097 if ( pPatchRec->patch.uState == PATCH_ENABLED
6098 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
6099 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
6100 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
6101 {
6102 *pEnmState = PATMTRANS_OVERWRITTEN;
6103 }
6104 else
6105 if (patmFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
6106 {
6107 *pEnmState = PATMTRANS_OVERWRITTEN;
6108 }
6109 else
6110 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
6111 {
6112 *pEnmState = PATMTRANS_PATCHSTART;
6113 }
6114 else
6115 *pEnmState = PATMTRANS_SAFE;
6116 }
6117 return pPrivInstrGC;
6118}
6119
6120/**
6121 * Returns the GC pointer of the patch for the specified GC address
6122 *
6123 * @returns VBox status code.
6124 * @param pVM Pointer to the VM.
6125 * @param pAddrGC Guest context address
6126 */
6127VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
6128{
6129 PPATMPATCHREC pPatchRec;
6130
6131 Assert(!HMIsEnabled(pVM));
6132
6133 /* Find the patch record. */
6134 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
6135 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
6136 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
6137 return PATCHCODE_PTR_GC(&pPatchRec->patch);
6138 return NIL_RTRCPTR;
6139}
6140
6141/**
6142 * Attempt to recover dirty instructions
6143 *
6144 * @returns VBox status code.
6145 * @param pVM Pointer to the VM.
6146 * @param pCtx Pointer to the guest CPU context.
6147 * @param pPatch Patch record.
6148 * @param pPatchToGuestRec Patch to guest address record.
6149 * @param pEip GC pointer of trapping instruction.
6150 */
6151static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
6152{
6153 DISCPUSTATE CpuOld, CpuNew;
6154 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
6155 int rc;
6156 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
6157 uint32_t cbDirty;
6158 PRECPATCHTOGUEST pRec;
6159 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
6160 PVMCPU pVCpu = VMMGetCpu0(pVM);
6161 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
6162
6163 pRec = pPatchToGuestRec;
6164 pCurInstrGC = pOrgInstrGC;
6165 pCurPatchInstrGC = pEip;
6166 cbDirty = 0;
6167 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6168
6169 /* Find all adjacent dirty instructions */
6170 while (true)
6171 {
6172 if (pRec->fJumpTarget)
6173 {
6174 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6175 pRec->fDirty = false;
6176 return VERR_PATCHING_REFUSED;
6177 }
6178
6179 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6180 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6181 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6182
6183 /* Only harmless instructions are acceptable. */
6184 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6185 if ( RT_FAILURE(rc)
6186 || !(CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS))
6187 {
6188 if (RT_SUCCESS(rc))
6189 cbDirty += CpuOld.cbInstr;
6190 else
6191 if (!cbDirty)
6192 cbDirty = 1;
6193 break;
6194 }
6195
6196#ifdef DEBUG
6197 char szBuf[256];
6198 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6199 szBuf, sizeof(szBuf), NULL);
6200 Log(("DIRTY: %s\n", szBuf));
6201#endif
6202 /* Mark as clean; if we fail we'll let it always fault. */
6203 pRec->fDirty = false;
6204
6205 /* Remove old lookup record. */
6206 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6207 pPatchToGuestRec = NULL;
6208
6209 pCurPatchInstrGC += CpuOld.cbInstr;
6210 cbDirty += CpuOld.cbInstr;
6211
6212 /* Let's see if there's another dirty instruction right after. */
6213 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6214 if (!pRec || !pRec->fDirty)
6215 break; /* no more dirty instructions */
6216
6217 /* In case of complex instructions the next guest instruction could be quite far off. */
6218 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6219 }
6220
6221 if ( RT_SUCCESS(rc)
6222 && (CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS)
6223 )
6224 {
6225 uint32_t cbLeft;
6226
6227 pCurPatchInstrHC = pPatchInstrHC;
6228 pCurPatchInstrGC = pEip;
6229 cbLeft = cbDirty;
6230
6231 while (cbLeft && RT_SUCCESS(rc))
6232 {
6233 bool fValidInstr;
6234
6235 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6236
6237 fValidInstr = !!(CpuNew.pCurInstr->fOpType & DISOPTYPE_HARMLESS);
6238 if ( !fValidInstr
6239 && (CpuNew.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
6240 )
6241 {
6242 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6243
6244 if ( pTargetGC >= pOrgInstrGC
6245 && pTargetGC <= pOrgInstrGC + cbDirty
6246 )
6247 {
6248 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6249 fValidInstr = true;
6250 }
6251 }
6252
6253 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6254 if ( rc == VINF_SUCCESS
6255 && CpuNew.cbInstr <= cbLeft /* must still fit */
6256 && fValidInstr
6257 )
6258 {
6259#ifdef DEBUG
6260 char szBuf[256];
6261 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6262 szBuf, sizeof(szBuf), NULL);
6263 Log(("NEW: %s\n", szBuf));
6264#endif
6265
6266 /* Copy the new instruction. */
6267 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.cbInstr);
6268 AssertRC(rc);
6269
6270 /* Add a new lookup record for the duplicated instruction. */
6271 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6272 }
6273 else
6274 {
6275#ifdef DEBUG
6276 char szBuf[256];
6277 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6278 szBuf, sizeof(szBuf), NULL);
6279 Log(("NEW: %s (FAILED)\n", szBuf));
6280#endif
6281 /* Restore the old lookup record for the duplicated instruction. */
6282 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6283
6284 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6285 rc = VERR_PATCHING_REFUSED;
6286 break;
6287 }
6288 pCurInstrGC += CpuNew.cbInstr;
6289 pCurPatchInstrHC += CpuNew.cbInstr;
6290 pCurPatchInstrGC += CpuNew.cbInstr;
6291 cbLeft -= CpuNew.cbInstr;
6292
6293 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6294 if (!cbLeft)
6295 {
6296 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6297 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6298 {
6299 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6300 if (pRec)
6301 {
6302 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6303 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6304
6305 Assert(!pRec->fDirty);
6306
6307 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6308 if (cbFiller >= SIZEOF_NEARJUMP32)
6309 {
6310 pPatchFillHC[0] = 0xE9;
6311 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6312#ifdef DEBUG
6313 char szBuf[256];
6314 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC,
6315 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6316 Log(("FILL: %s\n", szBuf));
6317#endif
6318 }
6319 else
6320 {
6321 for (unsigned i = 0; i < cbFiller; i++)
6322 {
6323 pPatchFillHC[i] = 0x90; /* NOP */
6324#ifdef DEBUG
6325 char szBuf[256];
6326 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC + i,
6327 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6328 Log(("FILL: %s\n", szBuf));
6329#endif
6330 }
6331 }
6332 }
6333 }
6334 }
6335 }
6336 }
6337 else
6338 rc = VERR_PATCHING_REFUSED;
6339
6340 if (RT_SUCCESS(rc))
6341 {
6342 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6343 }
6344 else
6345 {
6346 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6347 Assert(cbDirty);
6348
6349 /* Mark the whole instruction stream with breakpoints. */
6350 if (cbDirty)
6351 memset(pPatchInstrHC, 0xCC, cbDirty);
6352
6353 if ( pVM->patm.s.fOutOfMemory == false
6354 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6355 {
6356 rc = patmR3RefreshPatch(pVM, pPatch);
6357 if (RT_FAILURE(rc))
6358 {
6359 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6360 }
6361 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6362 rc = VERR_PATCHING_REFUSED;
6363 }
6364 }
6365 return rc;
6366}
6367
6368/**
6369 * Handle trap inside patch code
6370 *
6371 * @returns VBox status code.
6372 * @param pVM Pointer to the VM.
6373 * @param pCtx Pointer to the guest CPU context.
6374 * @param pEip GC pointer of trapping instruction.
6375 * @param ppNewEip GC pointer to new instruction.
6376 */
6377VMMR3_INT_DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6378{
6379 PPATMPATCHREC pPatch = 0;
6380 void *pvPatchCoreOffset;
6381 RTRCUINTPTR offset;
6382 RTRCPTR pNewEip;
6383 int rc ;
6384 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6385 PVMCPU pVCpu = VMMGetCpu0(pVM);
6386
6387 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6388 Assert(pVM->cCpus == 1);
6389
6390 pNewEip = 0;
6391 *ppNewEip = 0;
6392
6393 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6394
6395 /* Find the patch record. */
6396 /* Note: there might not be a patch to guest translation record (global function) */
6397 offset = pEip - pVM->patm.s.pPatchMemGC;
6398 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6399 if (pvPatchCoreOffset)
6400 {
6401 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6402
6403 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6404
6405 if (pPatch->patch.uState == PATCH_DIRTY)
6406 {
6407 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6408 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6409 {
6410 /* Function duplication patches set fPIF to 1 on entry */
6411 pVM->patm.s.pGCStateHC->fPIF = 1;
6412 }
6413 }
6414 else
6415 if (pPatch->patch.uState == PATCH_DISABLED)
6416 {
6417 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6418 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6419 {
6420 /* Function duplication patches set fPIF to 1 on entry */
6421 pVM->patm.s.pGCStateHC->fPIF = 1;
6422 }
6423 }
6424 else
6425 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6426 {
6427 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6428
6429 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6430 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6431 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6432 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6433 }
6434
6435 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6436 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6437
6438 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6439 pPatch->patch.cTraps++;
6440 PATM_STAT_FAULT_INC(&pPatch->patch);
6441 }
6442 else
6443 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6444
6445 /* Check if we were interrupted in PATM generated instruction code. */
6446 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6447 {
6448 DISCPUSTATE Cpu;
6449 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6450 AssertRC(rc);
6451
6452 if ( rc == VINF_SUCCESS
6453 && ( Cpu.pCurInstr->uOpcode == OP_PUSHF
6454 || Cpu.pCurInstr->uOpcode == OP_PUSH
6455 || Cpu.pCurInstr->uOpcode == OP_CALL)
6456 )
6457 {
6458 uint64_t fFlags;
6459
6460 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6461
6462 if (Cpu.pCurInstr->uOpcode == OP_PUSH)
6463 {
6464 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6465 if ( rc == VINF_SUCCESS
6466 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6467 {
6468 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6469
6470 /* Reset the PATM stack. */
6471 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6472
6473 pVM->patm.s.pGCStateHC->fPIF = 1;
6474
6475 Log(("Faulting push -> go back to the original instruction\n"));
6476
6477 /* continue at the original instruction */
6478 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6479 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6480 return VINF_SUCCESS;
6481 }
6482 }
6483
6484 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6485 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6486 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6487 if (rc == VINF_SUCCESS)
6488 {
6489 /* The guest page *must* be present. */
6490 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6491 if ( rc == VINF_SUCCESS
6492 && (fFlags & X86_PTE_P))
6493 {
6494 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6495 return VINF_PATCH_CONTINUE;
6496 }
6497 }
6498 }
6499 else
6500 if (pPatch->patch.pPrivInstrGC == pNewEip)
6501 {
6502 /* Invalidated patch or first instruction overwritten.
6503 * We can ignore the fPIF state in this case.
6504 */
6505 /* Reset the PATM stack. */
6506 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6507
6508 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6509
6510 pVM->patm.s.pGCStateHC->fPIF = 1;
6511
6512 /* continue at the original instruction */
6513 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6514 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6515 return VINF_SUCCESS;
6516 }
6517
6518 char szBuf[256];
6519 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6520
6521 /* Very bad. We crashed in emitted code. Probably stack? */
6522 if (pPatch)
6523 {
6524 AssertLogRelMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6525 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n",
6526 pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags,
6527 pPatchToGuestRec->fDirty, szBuf));
6528 }
6529 else
6530 AssertLogRelMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6531 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6532 EMR3FatalError(pVCpu, VERR_PATM_IPE_TRAP_IN_PATCH_CODE);
6533 }
6534
6535 /* From here on, we must have a valid patch to guest translation. */
6536 if (pvPatchCoreOffset == 0)
6537 {
6538 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6539 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6540 return VERR_PATCH_NOT_FOUND;
6541 }
6542
6543 /* Take care of dirty/changed instructions. */
6544 if (pPatchToGuestRec->fDirty)
6545 {
6546 Assert(pPatchToGuestRec->Core.Key == offset);
6547 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6548
6549 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6550 if (RT_SUCCESS(rc))
6551 {
6552 /* Retry the current instruction. */
6553 pNewEip = pEip;
6554 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6555 }
6556 else
6557 {
6558 /* Reset the PATM stack. */
6559 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6560
6561 rc = VINF_SUCCESS; /* Continue at original instruction. */
6562 }
6563
6564 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6565 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6566 return rc;
6567 }
6568
6569#ifdef VBOX_STRICT
6570 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6571 {
6572 DISCPUSTATE cpu;
6573 bool disret;
6574 uint32_t cbInstr;
6575 PATMP2GLOOKUPREC cacheRec;
6576 RT_ZERO(cacheRec);
6577 cacheRec.pPatch = &pPatch->patch;
6578
6579 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6580 &cpu, &cbInstr);
6581 if (cacheRec.Lock.pvMap)
6582 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6583
6584 if (disret && cpu.pCurInstr->uOpcode == OP_RETN)
6585 {
6586 RTRCPTR retaddr;
6587 PCPUMCTX pCtx2;
6588
6589 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6590
6591 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6592 AssertRC(rc);
6593
6594 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6595 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6596 }
6597 }
6598#endif
6599
6600 /* Return original address, correct by subtracting the CS base address. */
6601 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6602
6603 /* Reset the PATM stack. */
6604 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6605
6606 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6607 {
6608 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6609 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6610#ifdef VBOX_STRICT
6611 DISCPUSTATE cpu;
6612 bool disret;
6613 uint32_t cbInstr;
6614 PATMP2GLOOKUPREC cacheRec;
6615 RT_ZERO(cacheRec);
6616 cacheRec.pPatch = &pPatch->patch;
6617
6618 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
6619 &cpu, &cbInstr);
6620 if (cacheRec.Lock.pvMap)
6621 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6622
6623 if (disret && (cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_INT3))
6624 {
6625 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6626 &cpu, &cbInstr);
6627 if (cacheRec.Lock.pvMap)
6628 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6629
6630 Assert(cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_IRET);
6631 }
6632#endif
6633 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6634 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6635 }
6636
6637 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6638 DBGFR3_DISAS_INSTR_LOG(pVCpu, pCtx->cs.Sel, pNewEip, "PATCHRET: ");
6639 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6640 {
6641 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6642 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6643 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6644 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6645 return VERR_PATCH_DISABLED;
6646 }
6647
6648#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6649 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6650 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6651 {
6652 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6653 //we are only wasting time, back out the patch
6654 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6655 pTrapRec->pNextPatchInstr = 0;
6656 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6657 return VERR_PATCH_DISABLED;
6658 }
6659#endif
6660
6661 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6662 return VINF_SUCCESS;
6663}
6664
6665
6666/**
6667 * Handle page-fault in monitored page
6668 *
6669 * @returns VBox status code.
6670 * @param pVM Pointer to the VM.
6671 */
6672VMMR3_INT_DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6673{
6674 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6675
6676 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6677 addr &= PAGE_BASE_GC_MASK;
6678
6679 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6680 AssertRC(rc); NOREF(rc);
6681
6682 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6683 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6684 {
6685 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6686 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6687 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6688 if (rc == VWRN_PATCH_REMOVED)
6689 return VINF_SUCCESS;
6690
6691 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6692
6693 if (addr == pPatchRec->patch.pPrivInstrGC)
6694 addr++;
6695 }
6696
6697 for(;;)
6698 {
6699 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6700
6701 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6702 break;
6703
6704 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6705 {
6706 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6707 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6708 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6709 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6710 }
6711 addr = pPatchRec->patch.pPrivInstrGC + 1;
6712 }
6713
6714 pVM->patm.s.pvFaultMonitor = 0;
6715 return VINF_SUCCESS;
6716}
6717
6718
6719#ifdef VBOX_WITH_STATISTICS
6720
6721static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6722{
6723 if (pPatch->flags & PATMFL_SYSENTER)
6724 {
6725 return "SYSENT";
6726 }
6727 else
6728 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6729 {
6730 static char szTrap[16];
6731 uint32_t iGate;
6732
6733 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6734 if (iGate < 256)
6735 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6736 else
6737 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6738 return szTrap;
6739 }
6740 else
6741 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6742 return "DUPFUNC";
6743 else
6744 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6745 return "FUNCCALL";
6746 else
6747 if (pPatch->flags & PATMFL_TRAMPOLINE)
6748 return "TRAMP";
6749 else
6750 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6751}
6752
6753static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6754{
6755 NOREF(pVM);
6756 switch(pPatch->uState)
6757 {
6758 case PATCH_ENABLED:
6759 return "ENA";
6760 case PATCH_DISABLED:
6761 return "DIS";
6762 case PATCH_DIRTY:
6763 return "DIR";
6764 case PATCH_UNUSABLE:
6765 return "UNU";
6766 case PATCH_REFUSED:
6767 return "REF";
6768 case PATCH_DISABLE_PENDING:
6769 return "DIP";
6770 default:
6771 AssertFailed();
6772 return " ";
6773 }
6774}
6775
6776/**
6777 * Resets the sample.
6778 * @param pVM Pointer to the VM.
6779 * @param pvSample The sample registered using STAMR3RegisterCallback.
6780 */
6781static void patmResetStat(PVM pVM, void *pvSample)
6782{
6783 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6784 Assert(pPatch);
6785
6786 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6787 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6788}
6789
6790/**
6791 * Prints the sample into the buffer.
6792 *
6793 * @param pVM Pointer to the VM.
6794 * @param pvSample The sample registered using STAMR3RegisterCallback.
6795 * @param pszBuf The buffer to print into.
6796 * @param cchBuf The size of the buffer.
6797 */
6798static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6799{
6800 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6801 Assert(pPatch);
6802
6803 Assert(pPatch->uState != PATCH_REFUSED);
6804 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6805
6806 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6807 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6808 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6809}
6810
6811/**
6812 * Returns the GC address of the corresponding patch statistics counter
6813 *
6814 * @returns Stat address
6815 * @param pVM Pointer to the VM.
6816 * @param pPatch Patch structure
6817 */
6818RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6819{
6820 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6821 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6822}
6823
6824#endif /* VBOX_WITH_STATISTICS */
6825#ifdef VBOX_WITH_DEBUGGER
6826
6827/**
6828 * The '.patmoff' command.
6829 *
6830 * @returns VBox status.
6831 * @param pCmd Pointer to the command descriptor (as registered).
6832 * @param pCmdHlp Pointer to command helper functions.
6833 * @param pVM Pointer to the current VM (if any).
6834 * @param paArgs Pointer to (readonly) array of arguments.
6835 * @param cArgs Number of arguments in the array.
6836 */
6837static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6838{
6839 /*
6840 * Validate input.
6841 */
6842 NOREF(cArgs); NOREF(paArgs);
6843 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6844 PVM pVM = pUVM->pVM;
6845 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6846
6847 if (HMIsEnabled(pVM))
6848 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6849
6850 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6851 PATMR3AllowPatching(pVM->pUVM, false);
6852 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6853}
6854
6855/**
6856 * The '.patmon' command.
6857 *
6858 * @returns VBox status.
6859 * @param pCmd Pointer to the command descriptor (as registered).
6860 * @param pCmdHlp Pointer to command helper functions.
6861 * @param pVM Pointer to the current VM (if any).
6862 * @param paArgs Pointer to (readonly) array of arguments.
6863 * @param cArgs Number of arguments in the array.
6864 */
6865static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6866{
6867 /*
6868 * Validate input.
6869 */
6870 NOREF(cArgs); NOREF(paArgs);
6871 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6872 PVM pVM = pUVM->pVM;
6873 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6874
6875 if (HMIsEnabled(pVM))
6876 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6877
6878 PATMR3AllowPatching(pVM->pUVM, true);
6879 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6880 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6881}
6882
6883#endif /* VBOX_WITH_DEBUGGER */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette