VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 55896

Last change on this file since 55896 was 55895, checked in by vboxsync, 10 years ago

Added pvUser to the raw-mode context virtual handler callbacks.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 263.4 KB
Line 
1/* $Id: PATM.cpp 55895 2015-05-17 19:42:38Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * @note Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2014 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/cpum.h>
29#include <VBox/vmm/cpumdis.h>
30#include <VBox/vmm/iom.h>
31#include <VBox/vmm/mm.h>
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/hm.h>
34#include <VBox/vmm/ssm.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/cfgm.h>
37#include <VBox/param.h>
38#include <VBox/vmm/selm.h>
39#include <VBox/vmm/csam.h>
40#include <iprt/avl.h>
41#include "PATMInternal.h"
42#include "PATMPatch.h"
43#include <VBox/vmm/vm.h>
44#include <VBox/vmm/uvm.h>
45#include <VBox/dbg.h>
46#include <VBox/err.h>
47#include <VBox/log.h>
48#include <iprt/assert.h>
49#include <iprt/asm.h>
50#include <VBox/dis.h>
51#include <VBox/disopcode.h>
52#include "internal/pgm.h"
53
54#include <iprt/string.h>
55#include "PATMA.h"
56
57//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
58//#define PATM_DISABLE_ALL
59
60/**
61 * Refresh trampoline patch state.
62 */
63typedef struct PATMREFRESHPATCH
64{
65 /** Pointer to the VM structure. */
66 PVM pVM;
67 /** The trampoline patch record. */
68 PPATCHINFO pPatchTrampoline;
69 /** The new patch we want to jump to. */
70 PPATCHINFO pPatchRec;
71} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
72
73
74#define PATMREAD_RAWCODE 1 /* read code as-is */
75#define PATMREAD_ORGCODE 2 /* read original guest opcode bytes; not the patched bytes */
76#define PATMREAD_NOCHECK 4 /* don't check for patch conflicts */
77
78/*
79 * Private structure used during disassembly
80 */
81typedef struct
82{
83 PVM pVM;
84 PPATCHINFO pPatchInfo;
85 R3PTRTYPE(uint8_t *) pbInstrHC;
86 RTRCPTR pInstrGC;
87 uint32_t fReadFlags;
88} PATMDISASM, *PPATMDISASM;
89
90
91/*******************************************************************************
92* Internal Functions *
93*******************************************************************************/
94static FNPGMR3VIRTHANDLER patmR3VirtPageHandler;
95
96static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
97static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
98static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
99
100#ifdef LOG_ENABLED // keep gcc quiet
101static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
102#endif
103#ifdef VBOX_WITH_STATISTICS
104static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
105static void patmResetStat(PVM pVM, void *pvSample);
106static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
107#endif
108
109#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
110#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
111
112static int patmReinit(PVM pVM);
113static DECLCALLBACK(int) patmR3RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
114static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC);
115static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch);
116
117#ifdef VBOX_WITH_DEBUGGER
118static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
119static FNDBGCCMD patmr3CmdOn;
120static FNDBGCCMD patmr3CmdOff;
121
122/** Command descriptors. */
123static const DBGCCMD g_aCmds[] =
124{
125 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
126 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
127 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
128};
129#endif
130
131/* Don't want to break saved states, so put it here as a global variable. */
132static unsigned int cIDTHandlersDisabled = 0;
133
134/**
135 * Initializes the PATM.
136 *
137 * @returns VBox status code.
138 * @param pVM Pointer to the VM.
139 */
140VMMR3_INT_DECL(int) PATMR3Init(PVM pVM)
141{
142 int rc;
143
144 /*
145 * We only need a saved state dummy loader if HM is enabled.
146 */
147 if (HMIsEnabled(pVM))
148 {
149 pVM->fPATMEnabled = false;
150 return SSMR3RegisterStub(pVM, "PATM", 0);
151 }
152
153 /*
154 * Raw-mode.
155 */
156 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
157
158 /* These values can't change as they are hardcoded in patch code (old saved states!) */
159 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
160 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
161 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
162 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
163
164 AssertReleaseMsg(g_fPatmInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
165 ("Interrupt flags out of sync!! g_fPatmInterruptFlag=%#x expected %#x. broken assembler?\n", g_fPatmInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
166
167 /* Allocate patch memory and GC patch state memory. */
168 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
169 /* Add another page in case the generated code is much larger than expected. */
170 /** @todo bad safety precaution */
171 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
172 if (RT_FAILURE(rc))
173 {
174 Log(("MMHyperAlloc failed with %Rrc\n", rc));
175 return rc;
176 }
177 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
178
179 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
180 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
181 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
182
183 patmR3DbgInit(pVM);
184
185 /*
186 * Hypervisor memory for GC status data (read/write)
187 *
188 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
189 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
190 *
191 */
192 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
193 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
194 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
195
196 /* Hypervisor memory for patch statistics */
197 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
198 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
199
200 /* Memory for patch lookup trees. */
201 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
202 AssertRCReturn(rc, rc);
203 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
204
205#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
206 /* Check CFGM option. */
207 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
208 if (RT_FAILURE(rc))
209# ifdef PATM_DISABLE_ALL
210 pVM->fPATMEnabled = false;
211# else
212 pVM->fPATMEnabled = true;
213# endif
214#endif
215
216 rc = patmReinit(pVM);
217 AssertRC(rc);
218 if (RT_FAILURE(rc))
219 return rc;
220
221 /*
222 * Register the virtual page access handler type.
223 */
224 rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_ALL, false /*fRelocUserRC*/,
225 NULL /*pfnInvalidateR3*/,
226 patmR3VirtPageHandler,
227 "patmRCVirtPagePfHandler", NULL /*pszModRC*/,
228 "PATMMonitorPatchJump", &pVM->patm.s.hMonitorPageType);
229 AssertRCReturn(rc, rc);
230
231 /*
232 * Register save and load state notifiers.
233 */
234 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SAVED_STATE_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
235 NULL, NULL, NULL,
236 NULL, patmR3Save, NULL,
237 NULL, patmR3Load, NULL);
238 AssertRCReturn(rc, rc);
239
240#ifdef VBOX_WITH_DEBUGGER
241 /*
242 * Debugger commands.
243 */
244 static bool s_fRegisteredCmds = false;
245 if (!s_fRegisteredCmds)
246 {
247 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
248 if (RT_SUCCESS(rc2))
249 s_fRegisteredCmds = true;
250 }
251#endif
252
253#ifdef VBOX_WITH_STATISTICS
254 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
255 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
256 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
257 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
258 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
259 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
260 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
261 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
262
263 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
264 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
265
266 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
267 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
268 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
269
270 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
271 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
272 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
273 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
274 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
275
276 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
277 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
278
279 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
280 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
281
282 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
283 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
284 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
285
286 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
287 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
288 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
289
290 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
291 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
292
293 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
294 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
295 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
296 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
297
298 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
299 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
300
301 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
302 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
303
304 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
305 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
306 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
307
308 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
309 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
310 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
311 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
312
313 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
314 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
315 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
316 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
317 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
318
319 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
320#endif /* VBOX_WITH_STATISTICS */
321
322 Log(("g_patmCallRecord.cbFunction %u\n", g_patmCallRecord.cbFunction));
323 Log(("g_patmCallIndirectRecord.cbFunction %u\n", g_patmCallIndirectRecord.cbFunction));
324 Log(("g_patmRetRecord.cbFunction %u\n", g_patmRetRecord.cbFunction));
325 Log(("g_patmJumpIndirectRecord.cbFunction %u\n", g_patmJumpIndirectRecord.cbFunction));
326 Log(("g_patmPopf32Record.cbFunction %u\n", g_patmPopf32Record.cbFunction));
327 Log(("g_patmIretRecord.cbFunction %u\n", g_patmIretRecord.cbFunction));
328 Log(("g_patmStiRecord.cbFunction %u\n", g_patmStiRecord.cbFunction));
329 Log(("g_patmCheckIFRecord.cbFunction %u\n", g_patmCheckIFRecord.cbFunction));
330
331 return rc;
332}
333
334/**
335 * Finalizes HMA page attributes.
336 *
337 * @returns VBox status code.
338 * @param pVM Pointer to the VM.
339 */
340VMMR3_INT_DECL(int) PATMR3InitFinalize(PVM pVM)
341{
342 if (HMIsEnabled(pVM))
343 return VINF_SUCCESS;
344
345 /*
346 * The GC state, stack and statistics must be read/write for the guest
347 * (supervisor only of course).
348 *
349 * Remember, we run guest code at ring-1 and ring-2 levels, which are
350 * considered supervisor levels by the paging structures. We run the VMM
351 * in ring-0 with CR0.WP=0 and mapping all VMM structures as read-only
352 * pages. The following structures are exceptions and must be mapped with
353 * write access so the ring-1 and ring-2 code can modify them.
354 */
355 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
356 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the GCState accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
357
358 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
359 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the GCStack accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
360
361 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
362 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the stats struct accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
363
364 /*
365 * Find the patch helper segment so we can identify code running there as patch code.
366 */
367 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_PatchHlpBegin", &pVM->patm.s.pbPatchHelpersRC);
368 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to resolve g_PatchHlpBegin: %Rrc\n", rc), rc);
369 pVM->patm.s.pbPatchHelpersR3 = (uint8_t *)MMHyperRCToR3(pVM, pVM->patm.s.pbPatchHelpersRC);
370 AssertLogRelReturn(pVM->patm.s.pbPatchHelpersR3 != NULL, VERR_INTERNAL_ERROR_3);
371
372 RTRCPTR RCPtrEnd;
373 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_PatchHlpEnd", &RCPtrEnd);
374 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to resolve g_PatchHlpEnd: %Rrc\n", rc), rc);
375
376 pVM->patm.s.cbPatchHelpers = RCPtrEnd - pVM->patm.s.pbPatchHelpersRC;
377 AssertLogRelMsgReturn(pVM->patm.s.cbPatchHelpers < _128K,
378 ("%RRv-%RRv => %#x\n", pVM->patm.s.pbPatchHelpersRC, RCPtrEnd, pVM->patm.s.cbPatchHelpers),
379 VERR_INTERNAL_ERROR_4);
380
381
382 return VINF_SUCCESS;
383}
384
385/**
386 * (Re)initializes PATM
387 *
388 * @param pVM The VM.
389 */
390static int patmReinit(PVM pVM)
391{
392 int rc;
393
394 /*
395 * Assert alignment and sizes.
396 */
397 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
398 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
399
400 /*
401 * Setup any fixed pointers and offsets.
402 */
403 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
404
405#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
406#ifndef PATM_DISABLE_ALL
407 pVM->fPATMEnabled = true;
408#endif
409#endif
410
411 Assert(pVM->patm.s.pGCStateHC);
412 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
413 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
414
415 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
416 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
417
418 Assert(pVM->patm.s.pGCStackHC);
419 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
420 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
421 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
422 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
423
424 Assert(pVM->patm.s.pStatsHC);
425 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
426 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
427
428 Assert(pVM->patm.s.pPatchMemHC);
429 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
430 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
431 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
432
433 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
434 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
435
436 Assert(pVM->patm.s.PatchLookupTreeHC);
437 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
438
439 /*
440 * (Re)Initialize PATM structure
441 */
442 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
443 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
444 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
445 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
446 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
447 pVM->patm.s.pvFaultMonitor = 0;
448 pVM->patm.s.deltaReloc = 0;
449
450 /* Lowest and highest patched instruction */
451 pVM->patm.s.pPatchedInstrGCLowest = ~0;
452 pVM->patm.s.pPatchedInstrGCHighest = 0;
453
454 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
455 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
456 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
457
458 pVM->patm.s.pfnSysEnterPatchGC = 0;
459 pVM->patm.s.pfnSysEnterGC = 0;
460
461 pVM->patm.s.fOutOfMemory = false;
462
463 pVM->patm.s.pfnHelperCallGC = 0;
464 patmR3DbgReset(pVM);
465
466 /* Generate all global functions to be used by future patches. */
467 /* We generate a fake patch in order to use the existing code for relocation. */
468 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
469 if (RT_FAILURE(rc))
470 {
471 Log(("Out of memory!!!!\n"));
472 return VERR_NO_MEMORY;
473 }
474 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
475 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
476 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
477
478 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
479 AssertRC(rc);
480
481 /* Update free pointer in patch memory. */
482 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
483 /* Round to next 8 byte boundary. */
484 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
485
486
487 return rc;
488}
489
490
491/**
492 * Applies relocations to data and code managed by this
493 * component. This function will be called at init and
494 * whenever the VMM need to relocate it self inside the GC.
495 *
496 * The PATM will update the addresses used by the switcher.
497 *
498 * @param pVM The VM.
499 * @param offDelta The relocation delta.
500 */
501VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM, RTRCINTPTR offDelta)
502{
503 if (HMIsEnabled(pVM))
504 return;
505
506 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
507 Assert((RTRCINTPTR)(GCPtrNew - pVM->patm.s.pGCStateGC) == offDelta);
508
509 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, offDelta));
510 if (offDelta)
511 {
512 PCPUMCTX pCtx;
513
514 /* Update CPUMCTX guest context pointer. */
515 pVM->patm.s.pCPUMCtxGC += offDelta;
516
517 pVM->patm.s.deltaReloc = offDelta;
518 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmR3RelocatePatches, (void *)pVM);
519
520 pVM->patm.s.pGCStateGC = GCPtrNew;
521 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
522 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
523 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
524 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
525
526 if (pVM->patm.s.pfnSysEnterPatchGC)
527 pVM->patm.s.pfnSysEnterPatchGC += offDelta;
528
529 /* If we are running patch code right now, then also adjust EIP. */
530 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
531 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
532 pCtx->eip += offDelta;
533
534 /* Deal with the global patch functions. */
535 pVM->patm.s.pfnHelperCallGC += offDelta;
536 pVM->patm.s.pfnHelperRetGC += offDelta;
537 pVM->patm.s.pfnHelperIretGC += offDelta;
538 pVM->patm.s.pfnHelperJumpGC += offDelta;
539
540 pVM->patm.s.pbPatchHelpersRC += offDelta;
541
542 patmR3RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
543 }
544}
545
546
547/**
548 * Terminates the PATM.
549 *
550 * Termination means cleaning up and freeing all resources,
551 * the VM it self is at this point powered off or suspended.
552 *
553 * @returns VBox status code.
554 * @param pVM Pointer to the VM.
555 */
556VMMR3_INT_DECL(int) PATMR3Term(PVM pVM)
557{
558 if (HMIsEnabled(pVM))
559 return VINF_SUCCESS;
560
561 patmR3DbgTerm(pVM);
562
563 /* Memory was all allocated from the two MM heaps and requires no freeing. */
564 return VINF_SUCCESS;
565}
566
567
568/**
569 * PATM reset callback.
570 *
571 * @returns VBox status code.
572 * @param pVM The VM which is reset.
573 */
574VMMR3_INT_DECL(int) PATMR3Reset(PVM pVM)
575{
576 Log(("PATMR3Reset\n"));
577 if (HMIsEnabled(pVM))
578 return VINF_SUCCESS;
579
580 /* Free all patches. */
581 for (;;)
582 {
583 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
584 if (pPatchRec)
585 patmR3RemovePatch(pVM, pPatchRec, true);
586 else
587 break;
588 }
589 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
590 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
591 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
592 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
593
594 int rc = patmReinit(pVM);
595 if (RT_SUCCESS(rc))
596 rc = PATMR3InitFinalize(pVM); /* paranoia */
597
598 return rc;
599}
600
601/**
602 * @callback_method_impl{FNDISREADBYTES}
603 */
604static DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
605{
606 PATMDISASM *pDisInfo = (PATMDISASM *)pDis->pvUser;
607
608/** @todo change this to read more! */
609 /*
610 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
611 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
612 */
613 /** @todo could change in the future! */
614 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
615 {
616 size_t cbRead = cbMaxRead;
617 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
618 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
619 if (RT_SUCCESS(rc))
620 {
621 if (cbRead >= cbMinRead)
622 {
623 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
624 return VINF_SUCCESS;
625 }
626
627 cbMinRead -= (uint8_t)cbRead;
628 cbMaxRead -= (uint8_t)cbRead;
629 offInstr += (uint8_t)cbRead;
630 uSrcAddr += cbRead;
631 }
632
633#ifdef VBOX_STRICT
634 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
635 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
636 {
637 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr, NULL) == false);
638 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr + cbMinRead-1, NULL) == false);
639 }
640#endif
641 }
642
643 int rc = VINF_SUCCESS;
644 RTGCPTR32 uSrcAddr = (RTGCPTR32)pDis->uInstrAddr + offInstr;
645 if ( !pDisInfo->pbInstrHC
646 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(uSrcAddr + cbMinRead - 1)
647 && !PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr)))
648 {
649 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr));
650 rc = PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
651 offInstr += cbMinRead;
652 }
653 else
654 {
655 /*
656 * pbInstrHC is the base address; adjust according to the GC pointer.
657 *
658 * Try read the max number of bytes here. Since the disassembler only
659 * ever uses these bytes for the current instruction, it doesn't matter
660 * much if we accidentally read the start of the next instruction even
661 * if it happens to be a patch jump or int3.
662 */
663 uint8_t const *pbInstrHC = pDisInfo->pbInstrHC; AssertPtr(pbInstrHC);
664 pbInstrHC += uSrcAddr - pDisInfo->pInstrGC;
665
666 size_t cbMaxRead1 = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
667 size_t cbMaxRead2 = PAGE_SIZE - ((uintptr_t)pbInstrHC & PAGE_OFFSET_MASK);
668 size_t cbToRead = RT_MIN(cbMaxRead1, RT_MAX(cbMaxRead2, cbMinRead));
669 if (cbToRead > cbMaxRead)
670 cbToRead = cbMaxRead;
671
672 memcpy(&pDis->abInstr[offInstr], pbInstrHC, cbToRead);
673 offInstr += (uint8_t)cbToRead;
674 }
675
676 pDis->cbCachedInstr = offInstr;
677 return rc;
678}
679
680
681DECLINLINE(bool) patmR3DisInstrToStr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
682 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
683{
684 PATMDISASM disinfo;
685 disinfo.pVM = pVM;
686 disinfo.pPatchInfo = pPatch;
687 disinfo.pbInstrHC = pbInstrHC;
688 disinfo.pInstrGC = InstrGCPtr32;
689 disinfo.fReadFlags = fReadFlags;
690 return RT_SUCCESS(DISInstrToStrWithReader(InstrGCPtr32,
691 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
692 patmReadBytes, &disinfo,
693 pCpu, pcbInstr, pszOutput, cbOutput));
694}
695
696
697DECLINLINE(bool) patmR3DisInstr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
698 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
699{
700 PATMDISASM disinfo;
701 disinfo.pVM = pVM;
702 disinfo.pPatchInfo = pPatch;
703 disinfo.pbInstrHC = pbInstrHC;
704 disinfo.pInstrGC = InstrGCPtr32;
705 disinfo.fReadFlags = fReadFlags;
706 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32,
707 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
708 patmReadBytes, &disinfo,
709 pCpu, pcbInstr));
710}
711
712
713DECLINLINE(bool) patmR3DisInstrNoStrOpMode(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC,
714 uint32_t fReadFlags,
715 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
716{
717 PATMDISASM disinfo;
718 disinfo.pVM = pVM;
719 disinfo.pPatchInfo = pPatch;
720 disinfo.pbInstrHC = pbInstrHC;
721 disinfo.pInstrGC = InstrGCPtr32;
722 disinfo.fReadFlags = fReadFlags;
723 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32, pPatch->uOpMode, patmReadBytes, &disinfo,
724 pCpu, pcbInstr));
725}
726
727#ifdef LOG_ENABLED
728# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
729 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_ORGCODE, a_szComment, " patch:")
730# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
731 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_RAWCODE, a_szComment, " patch:")
732
733# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) \
734 do { \
735 if (LogIsEnabled()) \
736 patmLogRawPatchInstr(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2); \
737 } while (0)
738
739static void patmLogRawPatchInstr(PVM pVM, PPATCHINFO pPatch, uint32_t fFlags,
740 const char *pszComment1, const char *pszComment2)
741{
742 DISCPUSTATE DisState;
743 char szOutput[128];
744 szOutput[0] = '\0';
745 patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC, NULL, fFlags,
746 &DisState, NULL, szOutput, sizeof(szOutput));
747 Log(("%s%s %s", pszComment1, pszComment2, szOutput));
748}
749
750#else
751# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
752# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
753# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) do { } while (0)
754#endif
755
756
757/**
758 * Callback function for RTAvloU32DoWithAll
759 *
760 * Updates all fixups in the patches
761 *
762 * @returns VBox status code.
763 * @param pNode Current node
764 * @param pParam Pointer to the VM.
765 */
766static DECLCALLBACK(int) patmR3RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
767{
768 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
769 PVM pVM = (PVM)pParam;
770 RTRCINTPTR delta;
771 int rc;
772
773 /* Nothing to do if the patch is not active. */
774 if (pPatch->patch.uState == PATCH_REFUSED)
775 return 0;
776
777 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
778 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Org patch jump:", "");
779
780 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
781 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
782
783 /*
784 * Apply fixups.
785 */
786 AVLPVKEY key = NULL;
787 for (;;)
788 {
789 /* Get the record that's closest from above (after or equal to key). */
790 PRELOCREC pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
791 if (!pRec)
792 break;
793
794 key = (uint8_t *)pRec->Core.Key + 1; /* search for the next record during the next round. */
795
796 switch (pRec->uType)
797 {
798 case FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL:
799 Assert(pRec->pDest == pRec->pSource); Assert(PATM_IS_ASMFIX(pRec->pSource));
800 Log(("Absolute patch template fixup type %#x at %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
801 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
802 break;
803
804 case FIXUP_ABSOLUTE:
805 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
806 if ( !pRec->pSource
807 || PATMIsPatchGCAddr(pVM, pRec->pSource))
808 {
809 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
810 }
811 else
812 {
813 uint8_t curInstr[15];
814 uint8_t oldInstr[15];
815 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
816
817 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
818
819 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
820 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
821
822 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
823 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
824
825 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
826
827 if ( rc == VERR_PAGE_NOT_PRESENT
828 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
829 {
830 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
831
832 Log(("PATM: Patch page not present -> check later!\n"));
833 rc = PGMR3HandlerVirtualRegister(pVM, VMMGetCpu(pVM), pVM->patm.s.hMonitorPageType,
834 pPage,
835 pPage + (PAGE_SIZE - 1) /* inclusive! */,
836 (void *)(uintptr_t)pPage, pPage, NULL /*pszDesc*/);
837 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
838 }
839 else
840 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
841 {
842 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
843 /*
844 * Disable patch; this is not a good solution
845 */
846 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
847 pPatch->patch.uState = PATCH_DISABLED;
848 }
849 else
850 if (RT_SUCCESS(rc))
851 {
852 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
853 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
854 AssertRC(rc);
855 }
856 }
857 break;
858
859 case FIXUP_REL_JMPTOPATCH:
860 {
861 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
862
863 if ( pPatch->patch.uState == PATCH_ENABLED
864 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
865 {
866 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
867 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
868 RTRCPTR pJumpOffGC;
869 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
870 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
871
872#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
873 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
874#else
875 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
876#endif
877
878 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
879#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
880 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
881 {
882 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
883
884 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
885 oldJump[0] = pPatch->patch.aPrivInstr[0];
886 oldJump[1] = pPatch->patch.aPrivInstr[1];
887 *(RTRCUINTPTR *)&oldJump[2] = displOld;
888 }
889 else
890#endif
891 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
892 {
893 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
894 oldJump[0] = 0xE9;
895 *(RTRCUINTPTR *)&oldJump[1] = displOld;
896 }
897 else
898 {
899 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
900 continue; //this should never happen!!
901 }
902 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
903
904 /*
905 * Read old patch jump and compare it to the one we previously installed
906 */
907 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
908 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
909
910 if ( rc == VERR_PAGE_NOT_PRESENT
911 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
912 {
913 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
914 Log(("PATM: Patch page not present -> check later!\n"));
915 rc = PGMR3HandlerVirtualRegister(pVM, VMMGetCpu(pVM), pVM->patm.s.hMonitorPageType,
916 pPage,
917 pPage + (PAGE_SIZE - 1) /* inclusive! */,
918 (void *)(uintptr_t)pPage, pPage, NULL /*pszDesc*/);
919 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
920 }
921 else
922 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
923 {
924 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
925 /*
926 * Disable patch; this is not a good solution
927 */
928 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
929 pPatch->patch.uState = PATCH_DISABLED;
930 }
931 else
932 if (RT_SUCCESS(rc))
933 {
934 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
935 AssertRC(rc);
936 }
937 else
938 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
939 }
940 else
941 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
942
943 pRec->pDest = pTarget;
944 break;
945 }
946
947 case FIXUP_REL_JMPTOGUEST:
948 {
949 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
950 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
951
952 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
953 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
954 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
955 pRec->pSource = pSource;
956 break;
957 }
958
959 case FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL:
960 case FIXUP_CONSTANT_IN_PATCH_ASM_TMPL:
961 /* Only applicable when loading state. */
962 Assert(pRec->pDest == pRec->pSource);
963 Assert(PATM_IS_ASMFIX(pRec->pSource));
964 break;
965
966 default:
967 AssertMsg(0, ("Invalid fixup type!!\n"));
968 return VERR_INVALID_PARAMETER;
969 }
970 }
971
972 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
973 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Rel patch jump:", "");
974 return 0;
975}
976
977/**
978 * \#PF Handler callback for virtual access handler ranges.
979 *
980 * Important to realize that a physical page in a range can have aliases, and
981 * for ALL and WRITE handlers these will also trigger.
982 *
983 * @returns VINF_SUCCESS if the handler have carried out the operation.
984 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
985 * @param pVM Pointer to the VM.
986 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
987 * @param pvPtr The HC mapping of that address.
988 * @param pvBuf What the guest is reading/writing.
989 * @param cbBuf How much it's reading/writing.
990 * @param enmAccessType The access type.
991 * @param pvUser User argument.
992 */
993static DECLCALLBACK(int) patmR3VirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
994 PGMACCESSTYPE enmAccessType, void *pvUser)
995{
996 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
997 NOREF(pvPtr); NOREF(pvBuf); NOREF(cbBuf); NOREF(pvUser);
998
999 /** @todo could be the wrong virtual address (alias) */
1000 pVM->patm.s.pvFaultMonitor = GCPtr;
1001 PATMR3HandleMonitoredPage(pVM);
1002 return VINF_PGM_HANDLER_DO_DEFAULT;
1003}
1004
1005#ifdef VBOX_WITH_DEBUGGER
1006
1007/**
1008 * Callback function for RTAvloU32DoWithAll
1009 *
1010 * Enables the patch that's being enumerated
1011 *
1012 * @returns 0 (continue enumeration).
1013 * @param pNode Current node
1014 * @param pVM Pointer to the VM.
1015 */
1016static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
1017{
1018 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
1019
1020 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
1021 return 0;
1022}
1023
1024
1025/**
1026 * Callback function for RTAvloU32DoWithAll
1027 *
1028 * Disables the patch that's being enumerated
1029 *
1030 * @returns 0 (continue enumeration).
1031 * @param pNode Current node
1032 * @param pVM Pointer to the VM.
1033 */
1034static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
1035{
1036 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
1037
1038 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
1039 return 0;
1040}
1041
1042#endif /* VBOX_WITH_DEBUGGER */
1043
1044/**
1045 * Returns the host context pointer of the GC context structure
1046 *
1047 * @returns VBox status code.
1048 * @param pVM Pointer to the VM.
1049 */
1050VMMR3_INT_DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
1051{
1052 AssertReturn(!HMIsEnabled(pVM), NULL);
1053 return pVM->patm.s.pGCStateHC;
1054}
1055
1056
1057/**
1058 * Allows or disallow patching of privileged instructions executed by the guest OS
1059 *
1060 * @returns VBox status code.
1061 * @param pUVM The user mode VM handle.
1062 * @param fAllowPatching Allow/disallow patching
1063 */
1064VMMR3DECL(int) PATMR3AllowPatching(PUVM pUVM, bool fAllowPatching)
1065{
1066 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1067 PVM pVM = pUVM->pVM;
1068 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1069
1070 if (!HMIsEnabled(pVM))
1071 pVM->fPATMEnabled = fAllowPatching;
1072 else
1073 Assert(!pVM->fPATMEnabled);
1074 return VINF_SUCCESS;
1075}
1076
1077
1078/**
1079 * Checks if the patch manager is enabled or not.
1080 *
1081 * @returns true if enabled, false if not (or if invalid handle).
1082 * @param pUVM The user mode VM handle.
1083 */
1084VMMR3DECL(bool) PATMR3IsEnabled(PUVM pUVM)
1085{
1086 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1087 PVM pVM = pUVM->pVM;
1088 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1089 return PATMIsEnabled(pVM);
1090}
1091
1092
1093/**
1094 * Convert a GC patch block pointer to a HC patch pointer
1095 *
1096 * @returns HC pointer or NULL if it's not a GC patch pointer
1097 * @param pVM Pointer to the VM.
1098 * @param pAddrGC GC pointer
1099 */
1100VMMR3_INT_DECL(void *) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
1101{
1102 AssertReturn(!HMIsEnabled(pVM), NULL);
1103 RTRCUINTPTR offPatch = (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC;
1104 if (offPatch >= pVM->patm.s.cbPatchMem)
1105 {
1106 offPatch = (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC;
1107 if (offPatch >= pVM->patm.s.cbPatchHelpers)
1108 return NULL;
1109 return pVM->patm.s.pbPatchHelpersR3 + offPatch;
1110 }
1111 return pVM->patm.s.pPatchMemHC + offPatch;
1112}
1113
1114
1115/**
1116 * Convert guest context address to host context pointer
1117 *
1118 * @returns VBox status code.
1119 * @param pVM Pointer to the VM.
1120 * @param pCacheRec Address conversion cache record
1121 * @param pGCPtr Guest context pointer
1122 *
1123 * @returns Host context pointer or NULL in case of an error
1124 *
1125 */
1126R3PTRTYPE(uint8_t *) patmR3GCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
1127{
1128 int rc;
1129 R3PTRTYPE(uint8_t *) pHCPtr;
1130 uint32_t offset;
1131
1132 offset = (RTRCUINTPTR)pGCPtr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC;
1133 if (offset < pVM->patm.s.cbPatchMem)
1134 {
1135#ifdef VBOX_STRICT
1136 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1137 Assert(pPatch); Assert(offset - pPatch->pPatchBlockOffset < pPatch->cbPatchBlockSize);
1138#endif
1139 return pVM->patm.s.pPatchMemHC + offset;
1140 }
1141 /* Note! We're _not_ including the patch helpers here. */
1142
1143 offset = pGCPtr & PAGE_OFFSET_MASK;
1144 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1145 return pCacheRec->pPageLocStartHC + offset;
1146
1147 /* Release previous lock if any. */
1148 if (pCacheRec->Lock.pvMap)
1149 {
1150 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1151 pCacheRec->Lock.pvMap = NULL;
1152 }
1153
1154 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1155 if (rc != VINF_SUCCESS)
1156 {
1157 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1158 return NULL;
1159 }
1160 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1161 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1162 return pHCPtr;
1163}
1164
1165
1166/**
1167 * Calculates and fills in all branch targets
1168 *
1169 * @returns VBox status code.
1170 * @param pVM Pointer to the VM.
1171 * @param pPatch Current patch block pointer
1172 *
1173 */
1174static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1175{
1176 int32_t displ;
1177
1178 PJUMPREC pRec = 0;
1179 unsigned nrJumpRecs = 0;
1180
1181 /*
1182 * Set all branch targets inside the patch block.
1183 * We remove all jump records as they are no longer needed afterwards.
1184 */
1185 while (true)
1186 {
1187 RCPTRTYPE(uint8_t *) pInstrGC;
1188 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1189
1190 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1191 if (pRec == 0)
1192 break;
1193
1194 nrJumpRecs++;
1195
1196 /* HC in patch block to GC in patch block. */
1197 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1198
1199 if (pRec->opcode == OP_CALL)
1200 {
1201 /* Special case: call function replacement patch from this patch block.
1202 */
1203 PPATMPATCHREC pFunctionRec = patmQueryFunctionPatch(pVM, pRec->pTargetGC);
1204 if (!pFunctionRec)
1205 {
1206 int rc;
1207
1208 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1209 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1210 else
1211 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1212
1213 if (RT_FAILURE(rc))
1214 {
1215 uint8_t *pPatchHC;
1216 RTRCPTR pPatchGC;
1217 RTRCPTR pOrgInstrGC;
1218
1219 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1220 Assert(pOrgInstrGC);
1221
1222 /* Failure for some reason -> mark exit point with int 3. */
1223 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1224
1225 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1226 Assert(pPatchGC);
1227
1228 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1229
1230 /* Set a breakpoint at the very beginning of the recompiled instruction */
1231 *pPatchHC = 0xCC;
1232
1233 continue;
1234 }
1235 }
1236 else
1237 {
1238 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1239 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1240 }
1241
1242 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1243 }
1244 else
1245 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1246
1247 if (pBranchTargetGC == 0)
1248 {
1249 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1250 return VERR_PATCHING_REFUSED;
1251 }
1252 /* Our jumps *always* have a dword displacement (to make things easier). */
1253 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1254 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1255 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1256 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1257 }
1258 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1259 Assert(pPatch->JumpTree == 0);
1260 return VINF_SUCCESS;
1261}
1262
1263/**
1264 * Add an illegal instruction record
1265 *
1266 * @param pVM Pointer to the VM.
1267 * @param pPatch Patch structure ptr
1268 * @param pInstrGC Guest context pointer to privileged instruction
1269 *
1270 */
1271static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1272{
1273 PAVLPVNODECORE pRec;
1274
1275 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1276 Assert(pRec);
1277 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1278
1279 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1280 Assert(ret); NOREF(ret);
1281 pPatch->pTempInfo->nrIllegalInstr++;
1282}
1283
1284static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1285{
1286 PAVLPVNODECORE pRec;
1287
1288 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1289 if (pRec)
1290 return true;
1291 else
1292 return false;
1293}
1294
1295/**
1296 * Add a patch to guest lookup record
1297 *
1298 * @param pVM Pointer to the VM.
1299 * @param pPatch Patch structure ptr
1300 * @param pPatchInstrHC Guest context pointer to patch block
1301 * @param pInstrGC Guest context pointer to privileged instruction
1302 * @param enmType Lookup type
1303 * @param fDirty Dirty flag
1304 *
1305 * @note Be extremely careful with this function. Make absolutely sure the guest
1306 * address is correct! (to avoid executing instructions twice!)
1307 */
1308void patmR3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1309{
1310 bool ret;
1311 PRECPATCHTOGUEST pPatchToGuestRec;
1312 PRECGUESTTOPATCH pGuestToPatchRec;
1313 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1314
1315 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1316 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1317
1318 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1319 {
1320 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1321 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1322 return; /* already there */
1323
1324 Assert(!pPatchToGuestRec);
1325 }
1326#ifdef VBOX_STRICT
1327 else
1328 {
1329 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1330 Assert(!pPatchToGuestRec);
1331 }
1332#endif
1333
1334 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1335 Assert(pPatchToGuestRec);
1336 pPatchToGuestRec->Core.Key = PatchOffset;
1337 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1338 pPatchToGuestRec->enmType = enmType;
1339 pPatchToGuestRec->fDirty = fDirty;
1340
1341 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1342 Assert(ret);
1343
1344 /* GC to patch address */
1345 if (enmType == PATM_LOOKUP_BOTHDIR)
1346 {
1347 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1348 if (!pGuestToPatchRec)
1349 {
1350 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1351 pGuestToPatchRec->Core.Key = pInstrGC;
1352 pGuestToPatchRec->PatchOffset = PatchOffset;
1353
1354 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1355 Assert(ret);
1356 }
1357 }
1358
1359 pPatch->nrPatch2GuestRecs++;
1360}
1361
1362
1363/**
1364 * Removes a patch to guest lookup record
1365 *
1366 * @param pVM Pointer to the VM.
1367 * @param pPatch Patch structure ptr
1368 * @param pPatchInstrGC Guest context pointer to patch block
1369 */
1370void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1371{
1372 PAVLU32NODECORE pNode;
1373 PAVLU32NODECORE pNode2;
1374 PRECPATCHTOGUEST pPatchToGuestRec;
1375 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1376
1377 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1378 Assert(pPatchToGuestRec);
1379 if (pPatchToGuestRec)
1380 {
1381 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1382 {
1383 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1384
1385 Assert(pGuestToPatchRec->Core.Key);
1386 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1387 Assert(pNode2);
1388 }
1389 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1390 Assert(pNode);
1391
1392 MMR3HeapFree(pPatchToGuestRec);
1393 pPatch->nrPatch2GuestRecs--;
1394 }
1395}
1396
1397
1398/**
1399 * RTAvlPVDestroy callback.
1400 */
1401static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1402{
1403 MMR3HeapFree(pNode);
1404 return 0;
1405}
1406
1407/**
1408 * Empty the specified tree (PV tree, MMR3 heap)
1409 *
1410 * @param pVM Pointer to the VM.
1411 * @param ppTree Tree to empty
1412 */
1413static void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1414{
1415 NOREF(pVM);
1416 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1417}
1418
1419
1420/**
1421 * RTAvlU32Destroy callback.
1422 */
1423static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1424{
1425 MMR3HeapFree(pNode);
1426 return 0;
1427}
1428
1429/**
1430 * Empty the specified tree (U32 tree, MMR3 heap)
1431 *
1432 * @param pVM Pointer to the VM.
1433 * @param ppTree Tree to empty
1434 */
1435static void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1436{
1437 NOREF(pVM);
1438 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1439}
1440
1441
1442/**
1443 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1444 *
1445 * @returns VBox status code.
1446 * @param pVM Pointer to the VM.
1447 * @param pCpu CPU disassembly state
1448 * @param pInstrGC Guest context pointer to privileged instruction
1449 * @param pCurInstrGC Guest context pointer to the current instruction
1450 * @param pCacheRec Cache record ptr
1451 *
1452 */
1453static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1454{
1455 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1456 bool fIllegalInstr = false;
1457
1458 /*
1459 * Preliminary heuristics:
1460 *- no call instructions without a fixed displacement between cli and sti/popf
1461 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1462 *- no nested pushf/cli
1463 *- sti/popf should be the (eventual) target of all branches
1464 *- no near or far returns; no int xx, no into
1465 *
1466 * Note: Later on we can impose less stricter guidelines if the need arises
1467 */
1468
1469 /* Bail out if the patch gets too big. */
1470 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1471 {
1472 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1473 fIllegalInstr = true;
1474 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1475 }
1476 else
1477 {
1478 /* No unconditional jumps or calls without fixed displacements. */
1479 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1480 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1481 )
1482 {
1483 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1484 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1485 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1486 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1487 )
1488 {
1489 fIllegalInstr = true;
1490 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1491 }
1492 }
1493
1494 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1495 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->uOpcode == OP_JMP)
1496 {
1497 if ( pCurInstrGC > pPatch->pPrivInstrGC
1498 && pCurInstrGC + pCpu->cbInstr < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1499 {
1500 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1501 /* We turn this one into a int 3 callable patch. */
1502 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1503 }
1504 }
1505 else
1506 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1507 if (pPatch->opcode == OP_PUSHF)
1508 {
1509 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->uOpcode == OP_PUSHF)
1510 {
1511 fIllegalInstr = true;
1512 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1513 }
1514 }
1515
1516 /* no far returns */
1517 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1518 {
1519 pPatch->pTempInfo->nrRetInstr++;
1520 fIllegalInstr = true;
1521 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1522 }
1523 else if ( pCpu->pCurInstr->uOpcode == OP_INT3
1524 || pCpu->pCurInstr->uOpcode == OP_INT
1525 || pCpu->pCurInstr->uOpcode == OP_INTO)
1526 {
1527 /* No int xx or into either. */
1528 fIllegalInstr = true;
1529 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1530 }
1531 }
1532
1533 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1534
1535 /* Illegal instruction -> end of analysis phase for this code block */
1536 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1537 return VINF_SUCCESS;
1538
1539 /* Check for exit points. */
1540 switch (pCpu->pCurInstr->uOpcode)
1541 {
1542 case OP_SYSEXIT:
1543 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1544
1545 case OP_SYSENTER:
1546 case OP_ILLUD2:
1547 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1548 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1549 return VINF_SUCCESS;
1550
1551 case OP_STI:
1552 case OP_POPF:
1553 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1554 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1555 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1556 {
1557 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1558 return VERR_PATCHING_REFUSED;
1559 }
1560 if (pPatch->opcode == OP_PUSHF)
1561 {
1562 if (pCpu->pCurInstr->uOpcode == OP_POPF)
1563 {
1564 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1565 return VINF_SUCCESS;
1566
1567 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1568 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1569 pPatch->flags |= PATMFL_CHECK_SIZE;
1570 }
1571 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1572 }
1573 /* else: fall through. */
1574 case OP_RETN: /* exit point for function replacement */
1575 return VINF_SUCCESS;
1576
1577 case OP_IRET:
1578 return VINF_SUCCESS; /* exitpoint */
1579
1580 case OP_CPUID:
1581 case OP_CALL:
1582 case OP_JMP:
1583 break;
1584
1585#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1586 case OP_STR:
1587 break;
1588#endif
1589
1590 default:
1591 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1592 {
1593 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1594 return VINF_SUCCESS; /* exit point */
1595 }
1596 break;
1597 }
1598
1599 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1600 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW))
1601 {
1602 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1603 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->cbInstr));
1604 return VINF_SUCCESS;
1605 }
1606
1607 return VWRN_CONTINUE_ANALYSIS;
1608}
1609
1610/**
1611 * Analyses the instructions inside a function for compliance
1612 *
1613 * @returns VBox status code.
1614 * @param pVM Pointer to the VM.
1615 * @param pCpu CPU disassembly state
1616 * @param pInstrGC Guest context pointer to privileged instruction
1617 * @param pCurInstrGC Guest context pointer to the current instruction
1618 * @param pCacheRec Cache record ptr
1619 *
1620 */
1621static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1622{
1623 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1624 bool fIllegalInstr = false;
1625 NOREF(pInstrGC);
1626
1627 //Preliminary heuristics:
1628 //- no call instructions
1629 //- ret ends a block
1630
1631 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1632
1633 // bail out if the patch gets too big
1634 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1635 {
1636 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1637 fIllegalInstr = true;
1638 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1639 }
1640 else
1641 {
1642 // no unconditional jumps or calls without fixed displacements
1643 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1644 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1645 )
1646 {
1647 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1648 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1649 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1650 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1651 )
1652 {
1653 fIllegalInstr = true;
1654 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1655 }
1656 }
1657 else /* no far returns */
1658 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1659 {
1660 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1661 fIllegalInstr = true;
1662 }
1663 else /* no int xx or into either */
1664 if (pCpu->pCurInstr->uOpcode == OP_INT3 || pCpu->pCurInstr->uOpcode == OP_INT || pCpu->pCurInstr->uOpcode == OP_INTO)
1665 {
1666 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1667 fIllegalInstr = true;
1668 }
1669
1670 #if 0
1671 ///@todo we can handle certain in/out and privileged instructions in the guest context
1672 if (pCpu->pCurInstr->fOpType & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr->uOpcode != OP_STI)
1673 {
1674 Log(("Illegal instructions for function patch!!\n"));
1675 return VERR_PATCHING_REFUSED;
1676 }
1677 #endif
1678 }
1679
1680 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1681
1682 /* Illegal instruction -> end of analysis phase for this code block */
1683 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1684 {
1685 return VINF_SUCCESS;
1686 }
1687
1688 // Check for exit points
1689 switch (pCpu->pCurInstr->uOpcode)
1690 {
1691 case OP_ILLUD2:
1692 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1693 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1694 return VINF_SUCCESS;
1695
1696 case OP_IRET:
1697 case OP_SYSEXIT: /* will fault or emulated in GC */
1698 case OP_RETN:
1699 return VINF_SUCCESS;
1700
1701#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1702 case OP_STR:
1703 break;
1704#endif
1705
1706 case OP_POPF:
1707 case OP_STI:
1708 return VWRN_CONTINUE_ANALYSIS;
1709 default:
1710 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1711 {
1712 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1713 return VINF_SUCCESS; /* exit point */
1714 }
1715 return VWRN_CONTINUE_ANALYSIS;
1716 }
1717
1718 return VWRN_CONTINUE_ANALYSIS;
1719}
1720
1721/**
1722 * Recompiles the instructions in a code block
1723 *
1724 * @returns VBox status code.
1725 * @param pVM Pointer to the VM.
1726 * @param pCpu CPU disassembly state
1727 * @param pInstrGC Guest context pointer to privileged instruction
1728 * @param pCurInstrGC Guest context pointer to the current instruction
1729 * @param pCacheRec Cache record ptr
1730 *
1731 */
1732static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1733{
1734 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1735 int rc = VINF_SUCCESS;
1736 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1737
1738 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1739
1740 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1741 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1742 {
1743 /*
1744 * Been there, done that; so insert a jump (we don't want to duplicate code)
1745 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1746 */
1747 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1748 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1749 }
1750
1751 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1752 {
1753 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1754 }
1755 else
1756 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1757
1758 if (RT_FAILURE(rc))
1759 return rc;
1760
1761 /* Note: Never do a direct return unless a failure is encountered! */
1762
1763 /* Clear recompilation of next instruction flag; we are doing that right here. */
1764 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1765 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1766
1767 /* Add lookup record for patch to guest address translation */
1768 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1769
1770 /* Update lowest and highest instruction address for this patch */
1771 if (pCurInstrGC < pPatch->pInstrGCLowest)
1772 pPatch->pInstrGCLowest = pCurInstrGC;
1773 else
1774 if (pCurInstrGC > pPatch->pInstrGCHighest)
1775 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->cbInstr;
1776
1777 /* Illegal instruction -> end of recompile phase for this code block. */
1778 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1779 {
1780 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1781 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1782 goto end;
1783 }
1784
1785 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1786 * Indirect calls are handled below.
1787 */
1788 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1789 && (pCpu->pCurInstr->uOpcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1790 && (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J))
1791 {
1792 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1793 if (pTargetGC == 0)
1794 {
1795 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
1796 return VERR_PATCHING_REFUSED;
1797 }
1798
1799 if (pCpu->pCurInstr->uOpcode == OP_CALL)
1800 {
1801 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1802 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1803 if (RT_FAILURE(rc))
1804 goto end;
1805 }
1806 else
1807 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->uOpcode, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1808
1809 if (RT_SUCCESS(rc))
1810 rc = VWRN_CONTINUE_RECOMPILE;
1811
1812 goto end;
1813 }
1814
1815 switch (pCpu->pCurInstr->uOpcode)
1816 {
1817 case OP_CLI:
1818 {
1819 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1820 * until we've found the proper exit point(s).
1821 */
1822 if ( pCurInstrGC != pInstrGC
1823 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1824 )
1825 {
1826 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1827 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1828 }
1829 /* Set by irq inhibition; no longer valid now. */
1830 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1831
1832 rc = patmPatchGenCli(pVM, pPatch);
1833 if (RT_SUCCESS(rc))
1834 rc = VWRN_CONTINUE_RECOMPILE;
1835 break;
1836 }
1837
1838 case OP_MOV:
1839 if (pCpu->pCurInstr->fOpType & DISOPTYPE_POTENTIALLY_DANGEROUS)
1840 {
1841 /* mov ss, src? */
1842 if ( (pCpu->Param1.fUse & DISUSE_REG_SEG)
1843 && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS))
1844 {
1845 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1846 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1847 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1848 }
1849#if 0 /* necessary for Haiku */
1850 else
1851 if ( (pCpu->Param2.fUse & DISUSE_REG_SEG)
1852 && (pCpu->Param2.Base.idxSegReg == USE_REG_SS)
1853 && (pCpu->Param1.fUse & (DISUSE_REG_GEN32|DISUSE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1854 {
1855 /* mov GPR, ss */
1856 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1857 if (RT_SUCCESS(rc))
1858 rc = VWRN_CONTINUE_RECOMPILE;
1859 break;
1860 }
1861#endif
1862 }
1863 goto duplicate_instr;
1864
1865 case OP_POP:
1866 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1867 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS)
1868 {
1869 Assert(pCpu->pCurInstr->fOpType & DISOPTYPE_INHIBIT_IRQS);
1870
1871 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1872 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1873 }
1874 goto duplicate_instr;
1875
1876 case OP_STI:
1877 {
1878 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1879
1880 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1881 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1882 {
1883 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1884 fInhibitIRQInstr = true;
1885 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1886 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1887 }
1888 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1889
1890 if (RT_SUCCESS(rc))
1891 {
1892 DISCPUSTATE cpu = *pCpu;
1893 unsigned cbInstr;
1894 int disret;
1895 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1896
1897 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1898
1899 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1900 { /* Force pNextInstrHC out of scope after using it */
1901 uint8_t *pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1902 if (pNextInstrHC == NULL)
1903 {
1904 AssertFailed();
1905 return VERR_PATCHING_REFUSED;
1906 }
1907
1908 // Disassemble the next instruction
1909 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
1910 }
1911 if (disret == false)
1912 {
1913 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1914 return VERR_PATCHING_REFUSED;
1915 }
1916 pReturnInstrGC = pNextInstrGC + cbInstr;
1917
1918 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1919 || pReturnInstrGC <= pInstrGC
1920 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1921 )
1922 {
1923 /* Not an exit point for function duplication patches */
1924 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1925 && RT_SUCCESS(rc))
1926 {
1927 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1928 rc = VWRN_CONTINUE_RECOMPILE;
1929 }
1930 else
1931 rc = VINF_SUCCESS; //exit point
1932 }
1933 else {
1934 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1935 rc = VERR_PATCHING_REFUSED; //not allowed!!
1936 }
1937 }
1938 break;
1939 }
1940
1941 case OP_POPF:
1942 {
1943 bool fGenerateJmpBack = (pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32);
1944
1945 /* Not an exit point for IDT handler or function replacement patches */
1946 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1947 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1948 fGenerateJmpBack = false;
1949
1950 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->cbInstr, !!(pCpu->fPrefix & DISPREFIX_OPSIZE), fGenerateJmpBack);
1951 if (RT_SUCCESS(rc))
1952 {
1953 if (fGenerateJmpBack == false)
1954 {
1955 /* Not an exit point for IDT handler or function replacement patches */
1956 rc = VWRN_CONTINUE_RECOMPILE;
1957 }
1958 else
1959 {
1960 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1961 rc = VINF_SUCCESS; /* exit point! */
1962 }
1963 }
1964 break;
1965 }
1966
1967 case OP_PUSHF:
1968 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1969 if (RT_SUCCESS(rc))
1970 rc = VWRN_CONTINUE_RECOMPILE;
1971 break;
1972
1973 case OP_PUSH:
1974 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1975 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS)
1976 {
1977 rc = patmPatchGenPushCS(pVM, pPatch);
1978 if (RT_SUCCESS(rc))
1979 rc = VWRN_CONTINUE_RECOMPILE;
1980 break;
1981 }
1982 goto duplicate_instr;
1983
1984 case OP_IRET:
1985 Log(("IRET at %RRv\n", pCurInstrGC));
1986 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1987 if (RT_SUCCESS(rc))
1988 {
1989 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1990 rc = VINF_SUCCESS; /* exit point by definition */
1991 }
1992 break;
1993
1994 case OP_ILLUD2:
1995 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1996 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1997 if (RT_SUCCESS(rc))
1998 rc = VINF_SUCCESS; /* exit point by definition */
1999 Log(("Illegal opcode (0xf 0xb)\n"));
2000 break;
2001
2002 case OP_CPUID:
2003 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
2004 if (RT_SUCCESS(rc))
2005 rc = VWRN_CONTINUE_RECOMPILE;
2006 break;
2007
2008 case OP_STR:
2009#ifdef VBOX_WITH_SAFE_STR /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table and move OP_STR into #ifndef */
2010 /* Now safe because our shadow TR entry is identical to the guest's. */
2011 goto duplicate_instr;
2012#endif
2013 case OP_SLDT:
2014 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
2015 if (RT_SUCCESS(rc))
2016 rc = VWRN_CONTINUE_RECOMPILE;
2017 break;
2018
2019 case OP_SGDT:
2020 case OP_SIDT:
2021 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
2022 if (RT_SUCCESS(rc))
2023 rc = VWRN_CONTINUE_RECOMPILE;
2024 break;
2025
2026 case OP_RETN:
2027 /* retn is an exit point for function patches */
2028 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
2029 if (RT_SUCCESS(rc))
2030 rc = VINF_SUCCESS; /* exit point by definition */
2031 break;
2032
2033 case OP_SYSEXIT:
2034 /* Duplicate it, so it can be emulated in GC (or fault). */
2035 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2036 if (RT_SUCCESS(rc))
2037 rc = VINF_SUCCESS; /* exit point by definition */
2038 break;
2039
2040 case OP_CALL:
2041 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2042 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2043 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2044 */
2045 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2046 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far calls! */)
2047 {
2048 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
2049 if (RT_SUCCESS(rc))
2050 {
2051 rc = VWRN_CONTINUE_RECOMPILE;
2052 }
2053 break;
2054 }
2055 goto gen_illegal_instr;
2056
2057 case OP_JMP:
2058 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2059 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2060 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2061 */
2062 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2063 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far jumps! */)
2064 {
2065 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
2066 if (RT_SUCCESS(rc))
2067 rc = VINF_SUCCESS; /* end of branch */
2068 break;
2069 }
2070 goto gen_illegal_instr;
2071
2072 case OP_INT3:
2073 case OP_INT:
2074 case OP_INTO:
2075 goto gen_illegal_instr;
2076
2077 case OP_MOV_DR:
2078 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2079 if (pCpu->pCurInstr->fParam2 == OP_PARM_Dd)
2080 {
2081 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
2082 if (RT_SUCCESS(rc))
2083 rc = VWRN_CONTINUE_RECOMPILE;
2084 break;
2085 }
2086 goto duplicate_instr;
2087
2088 case OP_MOV_CR:
2089 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2090 if (pCpu->pCurInstr->fParam2 == OP_PARM_Cd)
2091 {
2092 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
2093 if (RT_SUCCESS(rc))
2094 rc = VWRN_CONTINUE_RECOMPILE;
2095 break;
2096 }
2097 goto duplicate_instr;
2098
2099 default:
2100 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW | DISOPTYPE_PRIVILEGED_NOTRAP))
2101 {
2102gen_illegal_instr:
2103 rc = patmPatchGenIllegalInstr(pVM, pPatch);
2104 if (RT_SUCCESS(rc))
2105 rc = VINF_SUCCESS; /* exit point by definition */
2106 }
2107 else
2108 {
2109duplicate_instr:
2110 Log(("patmPatchGenDuplicate\n"));
2111 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2112 if (RT_SUCCESS(rc))
2113 rc = VWRN_CONTINUE_RECOMPILE;
2114 }
2115 break;
2116 }
2117
2118end:
2119
2120 if ( !fInhibitIRQInstr
2121 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2122 {
2123 int rc2;
2124 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2125
2126 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2127 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
2128 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
2129 {
2130 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
2131
2132 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2133 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
2134 rc = VINF_SUCCESS; /* end of the line */
2135 }
2136 else
2137 {
2138 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
2139 }
2140 if (RT_FAILURE(rc2))
2141 rc = rc2;
2142 }
2143
2144 if (RT_SUCCESS(rc))
2145 {
2146 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
2147 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
2148 && pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32
2149 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
2150 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
2151 )
2152 {
2153 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2154
2155 // The end marker for this kind of patch is any instruction at a location outside our patch jump
2156 Log(("patmRecompileCallback: end found for single instruction patch at %RRv cbInstr %d\n", pNextInstrGC, pCpu->cbInstr));
2157
2158 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2159 AssertRC(rc);
2160 }
2161 }
2162 return rc;
2163}
2164
2165
2166#ifdef LOG_ENABLED
2167
2168/**
2169 * Add a disasm jump record (temporary for prevent duplicate analysis)
2170 *
2171 * @param pVM Pointer to the VM.
2172 * @param pPatch Patch structure ptr
2173 * @param pInstrGC Guest context pointer to privileged instruction
2174 *
2175 */
2176static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2177{
2178 PAVLPVNODECORE pRec;
2179
2180 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2181 Assert(pRec);
2182 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2183
2184 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2185 Assert(ret);
2186}
2187
2188/**
2189 * Checks if jump target has been analysed before.
2190 *
2191 * @returns VBox status code.
2192 * @param pPatch Patch struct
2193 * @param pInstrGC Jump target
2194 *
2195 */
2196static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2197{
2198 PAVLPVNODECORE pRec;
2199
2200 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2201 if (pRec)
2202 return true;
2203 return false;
2204}
2205
2206/**
2207 * For proper disassembly of the final patch block
2208 *
2209 * @returns VBox status code.
2210 * @param pVM Pointer to the VM.
2211 * @param pCpu CPU disassembly state
2212 * @param pInstrGC Guest context pointer to privileged instruction
2213 * @param pCurInstrGC Guest context pointer to the current instruction
2214 * @param pCacheRec Cache record ptr
2215 *
2216 */
2217int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2218{
2219 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2220 NOREF(pInstrGC);
2221
2222 if (pCpu->pCurInstr->uOpcode == OP_INT3)
2223 {
2224 /* Could be an int3 inserted in a call patch. Check to be sure */
2225 DISCPUSTATE cpu;
2226 RTRCPTR pOrgJumpGC;
2227
2228 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2229
2230 { /* Force pOrgJumpHC out of scope after using it */
2231 uint8_t *pOrgJumpHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2232
2233 bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL);
2234 if (!disret || cpu.pCurInstr->uOpcode != OP_CALL || cpu.Param1.cb != 4 /* only near calls */)
2235 return VINF_SUCCESS;
2236 }
2237 return VWRN_CONTINUE_ANALYSIS;
2238 }
2239
2240 if ( pCpu->pCurInstr->uOpcode == OP_ILLUD2
2241 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2242 {
2243 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2244 return VWRN_CONTINUE_ANALYSIS;
2245 }
2246
2247 if ( (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2248 || pCpu->pCurInstr->uOpcode == OP_INT
2249 || pCpu->pCurInstr->uOpcode == OP_IRET
2250 || pCpu->pCurInstr->uOpcode == OP_RETN
2251 || pCpu->pCurInstr->uOpcode == OP_RETF
2252 )
2253 {
2254 return VINF_SUCCESS;
2255 }
2256
2257 if (pCpu->pCurInstr->uOpcode == OP_ILLUD2)
2258 return VINF_SUCCESS;
2259
2260 return VWRN_CONTINUE_ANALYSIS;
2261}
2262
2263
2264/**
2265 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2266 *
2267 * @returns VBox status code.
2268 * @param pVM Pointer to the VM.
2269 * @param pInstrGC Guest context pointer to the initial privileged instruction
2270 * @param pCurInstrGC Guest context pointer to the current instruction
2271 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2272 * @param pCacheRec Cache record ptr
2273 *
2274 */
2275int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2276{
2277 DISCPUSTATE cpu;
2278 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2279 int rc = VWRN_CONTINUE_ANALYSIS;
2280 uint32_t cbInstr, delta;
2281 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2282 bool disret;
2283 char szOutput[256];
2284
2285 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2286
2287 /* We need this to determine branch targets (and for disassembling). */
2288 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2289
2290 while (rc == VWRN_CONTINUE_ANALYSIS)
2291 {
2292 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2293 if (pCurInstrHC == NULL)
2294 {
2295 rc = VERR_PATCHING_REFUSED;
2296 goto end;
2297 }
2298
2299 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_RAWCODE,
2300 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2301 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2302 {
2303 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2304
2305 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2306 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2307 else
2308 Log(("DIS %s", szOutput));
2309
2310 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2311 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2312 {
2313 rc = VINF_SUCCESS;
2314 goto end;
2315 }
2316 }
2317 else
2318 Log(("DIS: %s", szOutput));
2319
2320 if (disret == false)
2321 {
2322 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2323 rc = VINF_SUCCESS;
2324 goto end;
2325 }
2326
2327 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2328 if (rc != VWRN_CONTINUE_ANALYSIS) {
2329 break; //done!
2330 }
2331
2332 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2333 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2334 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2335 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2336 )
2337 {
2338 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2339 RTRCPTR pOrgTargetGC;
2340
2341 if (pTargetGC == 0)
2342 {
2343 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2344 rc = VERR_PATCHING_REFUSED;
2345 break;
2346 }
2347
2348 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2349 {
2350 //jump back to guest code
2351 rc = VINF_SUCCESS;
2352 goto end;
2353 }
2354 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2355
2356 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2357 {
2358 rc = VINF_SUCCESS;
2359 goto end;
2360 }
2361
2362 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2363 {
2364 /* New jump, let's check it. */
2365 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2366
2367 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2368 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2369 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2370
2371 if (rc != VINF_SUCCESS) {
2372 break; //done!
2373 }
2374 }
2375 if (cpu.pCurInstr->uOpcode == OP_JMP)
2376 {
2377 /* Unconditional jump; return to caller. */
2378 rc = VINF_SUCCESS;
2379 goto end;
2380 }
2381
2382 rc = VWRN_CONTINUE_ANALYSIS;
2383 }
2384 pCurInstrGC += cbInstr;
2385 }
2386end:
2387 return rc;
2388}
2389
2390/**
2391 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2392 *
2393 * @returns VBox status code.
2394 * @param pVM Pointer to the VM.
2395 * @param pInstrGC Guest context pointer to the initial privileged instruction
2396 * @param pCurInstrGC Guest context pointer to the current instruction
2397 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2398 * @param pCacheRec Cache record ptr
2399 *
2400 */
2401int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2402{
2403 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2404
2405 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2406 /* Free all disasm jump records. */
2407 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2408 return rc;
2409}
2410
2411#endif /* LOG_ENABLED */
2412
2413/**
2414 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2415 * If so, this patch is permanently disabled.
2416 *
2417 * @param pVM Pointer to the VM.
2418 * @param pInstrGC Guest context pointer to instruction
2419 * @param pConflictGC Guest context pointer to check
2420 *
2421 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2422 *
2423 */
2424VMMR3_INT_DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2425{
2426 AssertReturn(!HMIsEnabled(pVM), VERR_PATCH_NO_CONFLICT);
2427 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2428 if (pTargetPatch)
2429 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2430 return VERR_PATCH_NO_CONFLICT;
2431}
2432
2433/**
2434 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2435 *
2436 * @returns VBox status code.
2437 * @param pVM Pointer to the VM.
2438 * @param pInstrGC Guest context pointer to privileged instruction
2439 * @param pCurInstrGC Guest context pointer to the current instruction
2440 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2441 * @param pCacheRec Cache record ptr
2442 *
2443 */
2444static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2445{
2446 DISCPUSTATE cpu;
2447 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2448 int rc = VWRN_CONTINUE_ANALYSIS;
2449 uint32_t cbInstr;
2450 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2451 bool disret;
2452#ifdef LOG_ENABLED
2453 char szOutput[256];
2454#endif
2455
2456 while (rc == VWRN_CONTINUE_RECOMPILE)
2457 {
2458 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2459 if (pCurInstrHC == NULL)
2460 {
2461 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2462 goto end;
2463 }
2464#ifdef LOG_ENABLED
2465 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE,
2466 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2467 Log(("Recompile: %s", szOutput));
2468#else
2469 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
2470#endif
2471 if (disret == false)
2472 {
2473 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2474
2475 /* Add lookup record for patch to guest address translation */
2476 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2477 patmPatchGenIllegalInstr(pVM, pPatch);
2478 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2479 goto end;
2480 }
2481
2482 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2483 if (rc != VWRN_CONTINUE_RECOMPILE)
2484 {
2485 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2486 if ( rc == VINF_SUCCESS
2487 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2488 {
2489 DISCPUSTATE cpunext;
2490 uint32_t opsizenext;
2491 uint8_t *pNextInstrHC;
2492 RTRCPTR pNextInstrGC = pCurInstrGC + cbInstr;
2493
2494 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2495
2496 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2497 * Recompile the next instruction as well
2498 */
2499 pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2500 if (pNextInstrHC == NULL)
2501 {
2502 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2503 goto end;
2504 }
2505 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpunext, &opsizenext);
2506 if (disret == false)
2507 {
2508 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2509 goto end;
2510 }
2511 switch(cpunext.pCurInstr->uOpcode)
2512 {
2513 case OP_IRET: /* inhibit cleared in generated code */
2514 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2515 case OP_HLT:
2516 break; /* recompile these */
2517
2518 default:
2519 if (cpunext.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2520 {
2521 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2522
2523 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2524 AssertRC(rc);
2525 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2526 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2527 }
2528 break;
2529 }
2530
2531 /* Note: after a cli we must continue to a proper exit point */
2532 if (cpunext.pCurInstr->uOpcode != OP_CLI)
2533 {
2534 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2535 if (RT_SUCCESS(rc))
2536 {
2537 rc = VINF_SUCCESS;
2538 goto end;
2539 }
2540 break;
2541 }
2542 else
2543 rc = VWRN_CONTINUE_RECOMPILE;
2544 }
2545 else
2546 break; /* done! */
2547 }
2548
2549 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2550
2551
2552 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2553 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2554 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2555 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2556 )
2557 {
2558 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2559 if (addr == 0)
2560 {
2561 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2562 rc = VERR_PATCHING_REFUSED;
2563 break;
2564 }
2565
2566 Log(("Jump encountered target %RRv\n", addr));
2567
2568 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2569 if (!(cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW))
2570 {
2571 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2572 /* First we need to finish this linear code stream until the next exit point. */
2573 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+cbInstr, pfnPATMR3Recompile, pCacheRec);
2574 if (RT_FAILURE(rc))
2575 {
2576 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2577 break; //fatal error
2578 }
2579 }
2580
2581 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2582 {
2583 /* New code; let's recompile it. */
2584 Log(("patmRecompileCodeStream continue with jump\n"));
2585
2586 /*
2587 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2588 * this patch so we can continue our analysis
2589 *
2590 * We rely on CSAM to detect and resolve conflicts
2591 */
2592 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, addr);
2593 if(pTargetPatch)
2594 {
2595 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2596 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2597 }
2598
2599 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2600 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2601 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2602
2603 if(pTargetPatch)
2604 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2605
2606 if (RT_FAILURE(rc))
2607 {
2608 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2609 break; //done!
2610 }
2611 }
2612 /* Always return to caller here; we're done! */
2613 rc = VINF_SUCCESS;
2614 goto end;
2615 }
2616 else
2617 if (cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW)
2618 {
2619 rc = VINF_SUCCESS;
2620 goto end;
2621 }
2622 pCurInstrGC += cbInstr;
2623 }
2624end:
2625 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2626 return rc;
2627}
2628
2629
2630/**
2631 * Generate the jump from guest to patch code
2632 *
2633 * @returns VBox status code.
2634 * @param pVM Pointer to the VM.
2635 * @param pPatch Patch record
2636 * @param pCacheRec Guest translation lookup cache record
2637 */
2638static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2639{
2640 uint8_t temp[8];
2641 uint8_t *pPB;
2642 int rc;
2643
2644 Assert(pPatch->cbPatchJump <= sizeof(temp));
2645 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2646
2647 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2648 Assert(pPB);
2649
2650#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2651 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2652 {
2653 Assert(pPatch->pPatchJumpDestGC);
2654
2655 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2656 {
2657 // jmp [PatchCode]
2658 if (fAddFixup)
2659 {
2660 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
2661 pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2662 {
2663 Log(("Relocation failed for the jump in the guest code!!\n"));
2664 return VERR_PATCHING_REFUSED;
2665 }
2666 }
2667
2668 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2669 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2670 }
2671 else
2672 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2673 {
2674 // jmp [PatchCode]
2675 if (fAddFixup)
2676 {
2677 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
2678 pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2679 {
2680 Log(("Relocation failed for the jump in the guest code!!\n"));
2681 return VERR_PATCHING_REFUSED;
2682 }
2683 }
2684
2685 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2686 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2687 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2688 }
2689 else
2690 {
2691 Assert(0);
2692 return VERR_PATCHING_REFUSED;
2693 }
2694 }
2695 else
2696#endif
2697 {
2698 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2699
2700 // jmp [PatchCode]
2701 if (fAddFixup)
2702 {
2703 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32,
2704 PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2705 {
2706 Log(("Relocation failed for the jump in the guest code!!\n"));
2707 return VERR_PATCHING_REFUSED;
2708 }
2709 }
2710 temp[0] = 0xE9; //jmp
2711 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2712 }
2713 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2714 AssertRC(rc);
2715
2716 if (rc == VINF_SUCCESS)
2717 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2718
2719 return rc;
2720}
2721
2722/**
2723 * Remove the jump from guest to patch code
2724 *
2725 * @returns VBox status code.
2726 * @param pVM Pointer to the VM.
2727 * @param pPatch Patch record
2728 */
2729static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2730{
2731#ifdef DEBUG
2732 DISCPUSTATE cpu;
2733 char szOutput[256];
2734 uint32_t cbInstr, i = 0;
2735 bool disret;
2736
2737 while (i < pPatch->cbPrivInstr)
2738 {
2739 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2740 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2741 if (disret == false)
2742 break;
2743
2744 Log(("Org patch jump: %s", szOutput));
2745 Assert(cbInstr);
2746 i += cbInstr;
2747 }
2748#endif
2749
2750 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2751 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2752#ifdef DEBUG
2753 if (rc == VINF_SUCCESS)
2754 {
2755 i = 0;
2756 while (i < pPatch->cbPrivInstr)
2757 {
2758 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2759 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2760 if (disret == false)
2761 break;
2762
2763 Log(("Org instr: %s", szOutput));
2764 Assert(cbInstr);
2765 i += cbInstr;
2766 }
2767 }
2768#endif
2769 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2770 return rc;
2771}
2772
2773/**
2774 * Generate the call from guest to patch code
2775 *
2776 * @returns VBox status code.
2777 * @param pVM Pointer to the VM.
2778 * @param pPatch Patch record
2779 * @param pInstrHC HC address where to insert the jump
2780 * @param pCacheRec Guest translation cache record
2781 */
2782static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2783{
2784 uint8_t temp[8];
2785 uint8_t *pPB;
2786 int rc;
2787
2788 Assert(pPatch->cbPatchJump <= sizeof(temp));
2789
2790 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2791 Assert(pPB);
2792
2793 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2794
2795 // jmp [PatchCode]
2796 if (fAddFixup)
2797 {
2798 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH,
2799 pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2800 {
2801 Log(("Relocation failed for the jump in the guest code!!\n"));
2802 return VERR_PATCHING_REFUSED;
2803 }
2804 }
2805
2806 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2807 temp[0] = pPatch->aPrivInstr[0];
2808 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2809
2810 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2811 AssertRC(rc);
2812
2813 return rc;
2814}
2815
2816
2817/**
2818 * Patch cli/sti pushf/popf instruction block at specified location
2819 *
2820 * @returns VBox status code.
2821 * @param pVM Pointer to the VM.
2822 * @param pInstrGC Guest context point to privileged instruction
2823 * @param pInstrHC Host context point to privileged instruction
2824 * @param uOpcode Instruction opcode
2825 * @param uOpSize Size of starting instruction
2826 * @param pPatchRec Patch record
2827 *
2828 * @note returns failure if patching is not allowed or possible
2829 *
2830 */
2831static int patmR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2832 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2833{
2834 PPATCHINFO pPatch = &pPatchRec->patch;
2835 int rc = VERR_PATCHING_REFUSED;
2836 uint32_t orgOffsetPatchMem = ~0;
2837 RTRCPTR pInstrStart;
2838 bool fInserted;
2839 NOREF(pInstrHC); NOREF(uOpSize);
2840
2841 /* Save original offset (in case of failures later on) */
2842 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2843 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2844
2845 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2846 switch (uOpcode)
2847 {
2848 case OP_MOV:
2849 break;
2850
2851 case OP_CLI:
2852 case OP_PUSHF:
2853 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2854 /* Note: special precautions are taken when disabling and enabling such patches. */
2855 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2856 break;
2857
2858 default:
2859 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2860 {
2861 AssertMsg(0, ("patmR3PatchBlock: Invalid opcode %x\n", uOpcode));
2862 return VERR_INVALID_PARAMETER;
2863 }
2864 }
2865
2866 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2867 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2868
2869 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2870 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2871 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2872 )
2873 {
2874 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2875 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2876 rc = VERR_PATCHING_REFUSED;
2877 goto failure;
2878 }
2879
2880 pPatch->nrPatch2GuestRecs = 0;
2881 pInstrStart = pInstrGC;
2882
2883#ifdef PATM_ENABLE_CALL
2884 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2885#endif
2886
2887 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2888 pPatch->uCurPatchOffset = 0;
2889
2890 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2891 {
2892 Assert(pPatch->flags & PATMFL_INTHANDLER);
2893
2894 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2895 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2896 if (RT_FAILURE(rc))
2897 goto failure;
2898 }
2899
2900 /***************************************************************************************************************************/
2901 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2902 /***************************************************************************************************************************/
2903#ifdef VBOX_WITH_STATISTICS
2904 if (!(pPatch->flags & PATMFL_SYSENTER))
2905 {
2906 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2907 if (RT_FAILURE(rc))
2908 goto failure;
2909 }
2910#endif
2911
2912 PATMP2GLOOKUPREC cacheRec;
2913 RT_ZERO(cacheRec);
2914 cacheRec.pPatch = pPatch;
2915
2916 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2917 /* Free leftover lock if any. */
2918 if (cacheRec.Lock.pvMap)
2919 {
2920 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2921 cacheRec.Lock.pvMap = NULL;
2922 }
2923 if (rc != VINF_SUCCESS)
2924 {
2925 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2926 goto failure;
2927 }
2928
2929 /* Calculated during analysis. */
2930 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2931 {
2932 /* Most likely cause: we encountered an illegal instruction very early on. */
2933 /** @todo could turn it into an int3 callable patch. */
2934 Log(("patmR3PatchBlock: patch block too small -> refuse\n"));
2935 rc = VERR_PATCHING_REFUSED;
2936 goto failure;
2937 }
2938
2939 /* size of patch block */
2940 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2941
2942
2943 /* Update free pointer in patch memory. */
2944 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2945 /* Round to next 8 byte boundary. */
2946 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2947
2948 /*
2949 * Insert into patch to guest lookup tree
2950 */
2951 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2952 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2953 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2954 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2955 if (!fInserted)
2956 {
2957 rc = VERR_PATCHING_REFUSED;
2958 goto failure;
2959 }
2960
2961 /* Note that patmr3SetBranchTargets can install additional patches!! */
2962 rc = patmr3SetBranchTargets(pVM, pPatch);
2963 if (rc != VINF_SUCCESS)
2964 {
2965 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2966 goto failure;
2967 }
2968
2969#ifdef LOG_ENABLED
2970 Log(("Patch code ----------------------------------------------------------\n"));
2971 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2972 /* Free leftover lock if any. */
2973 if (cacheRec.Lock.pvMap)
2974 {
2975 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2976 cacheRec.Lock.pvMap = NULL;
2977 }
2978 Log(("Patch code ends -----------------------------------------------------\n"));
2979#endif
2980
2981 /* make a copy of the guest code bytes that will be overwritten */
2982 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2983
2984 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2985 AssertRC(rc);
2986
2987 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2988 {
2989 /*uint8_t bASMInt3 = 0xCC; - unused */
2990
2991 Log(("patmR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2992 /* Replace first opcode byte with 'int 3'. */
2993 rc = patmActivateInt3Patch(pVM, pPatch);
2994 if (RT_FAILURE(rc))
2995 goto failure;
2996
2997 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2998 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2999
3000 pPatch->flags &= ~PATMFL_INSTR_HINT;
3001 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
3002 }
3003 else
3004 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
3005 {
3006 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
3007 /* now insert a jump in the guest code */
3008 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
3009 AssertRC(rc);
3010 if (RT_FAILURE(rc))
3011 goto failure;
3012
3013 }
3014
3015 patmR3DbgAddPatch(pVM, pPatchRec);
3016
3017 PATM_LOG_RAW_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
3018
3019 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3020 pPatch->pTempInfo->nrIllegalInstr = 0;
3021
3022 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3023
3024 pPatch->uState = PATCH_ENABLED;
3025 return VINF_SUCCESS;
3026
3027failure:
3028 if (pPatchRec->CoreOffset.Key)
3029 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3030
3031 patmEmptyTree(pVM, &pPatch->FixupTree);
3032 pPatch->nrFixups = 0;
3033
3034 patmEmptyTree(pVM, &pPatch->JumpTree);
3035 pPatch->nrJumpRecs = 0;
3036
3037 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3038 pPatch->pTempInfo->nrIllegalInstr = 0;
3039
3040 /* Turn this cli patch into a dummy. */
3041 pPatch->uState = PATCH_REFUSED;
3042 pPatch->pPatchBlockOffset = 0;
3043
3044 // Give back the patch memory we no longer need
3045 Assert(orgOffsetPatchMem != (uint32_t)~0);
3046 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3047
3048 return rc;
3049}
3050
3051/**
3052 * Patch IDT handler
3053 *
3054 * @returns VBox status code.
3055 * @param pVM Pointer to the VM.
3056 * @param pInstrGC Guest context point to privileged instruction
3057 * @param uOpSize Size of starting instruction
3058 * @param pPatchRec Patch record
3059 * @param pCacheRec Cache record ptr
3060 *
3061 * @note returns failure if patching is not allowed or possible
3062 *
3063 */
3064static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3065{
3066 PPATCHINFO pPatch = &pPatchRec->patch;
3067 bool disret;
3068 DISCPUSTATE cpuPush, cpuJmp;
3069 uint32_t cbInstr;
3070 RTRCPTR pCurInstrGC = pInstrGC;
3071 uint8_t *pCurInstrHC, *pInstrHC;
3072 uint32_t orgOffsetPatchMem = ~0;
3073
3074 pInstrHC = pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
3075 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
3076
3077 /*
3078 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
3079 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
3080 * condition here and only patch the common entypoint once.
3081 */
3082 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &cbInstr);
3083 Assert(disret);
3084 if (disret && cpuPush.pCurInstr->uOpcode == OP_PUSH)
3085 {
3086 RTRCPTR pJmpInstrGC;
3087 int rc;
3088 pCurInstrGC += cbInstr;
3089
3090 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuJmp, &cbInstr);
3091 if ( disret
3092 && cpuJmp.pCurInstr->uOpcode == OP_JMP
3093 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
3094 )
3095 {
3096 bool fInserted;
3097 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3098 if (pJmpPatch == 0)
3099 {
3100 /* Patch it first! */
3101 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
3102 if (rc != VINF_SUCCESS)
3103 goto failure;
3104 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3105 Assert(pJmpPatch);
3106 }
3107 if (pJmpPatch->patch.uState != PATCH_ENABLED)
3108 goto failure;
3109
3110 /* save original offset (in case of failures later on) */
3111 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3112
3113 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3114 pPatch->uCurPatchOffset = 0;
3115 pPatch->nrPatch2GuestRecs = 0;
3116
3117#ifdef VBOX_WITH_STATISTICS
3118 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3119 if (RT_FAILURE(rc))
3120 goto failure;
3121#endif
3122
3123 /* Install fake cli patch (to clear the virtual IF) */
3124 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
3125 if (RT_FAILURE(rc))
3126 goto failure;
3127
3128 /* Add lookup record for patch to guest address translation (for the push) */
3129 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
3130
3131 /* Duplicate push. */
3132 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
3133 if (RT_FAILURE(rc))
3134 goto failure;
3135
3136 /* Generate jump to common entrypoint. */
3137 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
3138 if (RT_FAILURE(rc))
3139 goto failure;
3140
3141 /* size of patch block */
3142 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3143
3144 /* Update free pointer in patch memory. */
3145 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3146 /* Round to next 8 byte boundary */
3147 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3148
3149 /* There's no jump from guest to patch code. */
3150 pPatch->cbPatchJump = 0;
3151
3152
3153#ifdef LOG_ENABLED
3154 Log(("Patch code ----------------------------------------------------------\n"));
3155 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3156 Log(("Patch code ends -----------------------------------------------------\n"));
3157#endif
3158 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3159
3160 /*
3161 * Insert into patch to guest lookup tree
3162 */
3163 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3164 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3165 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3166 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3167 patmR3DbgAddPatch(pVM, pPatchRec);
3168
3169 pPatch->uState = PATCH_ENABLED;
3170
3171 return VINF_SUCCESS;
3172 }
3173 }
3174failure:
3175 /* Give back the patch memory we no longer need */
3176 if (orgOffsetPatchMem != (uint32_t)~0)
3177 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3178
3179 return patmR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3180}
3181
3182/**
3183 * Install a trampoline to call a guest trap handler directly
3184 *
3185 * @returns VBox status code.
3186 * @param pVM Pointer to the VM.
3187 * @param pInstrGC Guest context point to privileged instruction
3188 * @param pPatchRec Patch record
3189 * @param pCacheRec Cache record ptr
3190 *
3191 */
3192static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3193{
3194 PPATCHINFO pPatch = &pPatchRec->patch;
3195 int rc = VERR_PATCHING_REFUSED;
3196 uint32_t orgOffsetPatchMem = ~0;
3197 bool fInserted;
3198
3199 // save original offset (in case of failures later on)
3200 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3201
3202 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3203 pPatch->uCurPatchOffset = 0;
3204 pPatch->nrPatch2GuestRecs = 0;
3205
3206#ifdef VBOX_WITH_STATISTICS
3207 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3208 if (RT_FAILURE(rc))
3209 goto failure;
3210#endif
3211
3212 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3213 if (RT_FAILURE(rc))
3214 goto failure;
3215
3216 /* size of patch block */
3217 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3218
3219 /* Update free pointer in patch memory. */
3220 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3221 /* Round to next 8 byte boundary */
3222 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3223
3224 /* There's no jump from guest to patch code. */
3225 pPatch->cbPatchJump = 0;
3226
3227#ifdef LOG_ENABLED
3228 Log(("Patch code ----------------------------------------------------------\n"));
3229 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3230 Log(("Patch code ends -----------------------------------------------------\n"));
3231#endif
3232 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "TRAP handler");
3233 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3234
3235 /*
3236 * Insert into patch to guest lookup tree
3237 */
3238 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3239 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3240 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3241 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3242 patmR3DbgAddPatch(pVM, pPatchRec);
3243
3244 pPatch->uState = PATCH_ENABLED;
3245 return VINF_SUCCESS;
3246
3247failure:
3248 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3249
3250 /* Turn this cli patch into a dummy. */
3251 pPatch->uState = PATCH_REFUSED;
3252 pPatch->pPatchBlockOffset = 0;
3253
3254 /* Give back the patch memory we no longer need */
3255 Assert(orgOffsetPatchMem != (uint32_t)~0);
3256 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3257
3258 return rc;
3259}
3260
3261
3262#ifdef LOG_ENABLED
3263/**
3264 * Check if the instruction is patched as a common idt handler
3265 *
3266 * @returns true or false
3267 * @param pVM Pointer to the VM.
3268 * @param pInstrGC Guest context point to the instruction
3269 *
3270 */
3271static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3272{
3273 PPATMPATCHREC pRec;
3274
3275 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3276 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3277 return true;
3278 return false;
3279}
3280#endif //DEBUG
3281
3282
3283/**
3284 * Duplicates a complete function
3285 *
3286 * @returns VBox status code.
3287 * @param pVM Pointer to the VM.
3288 * @param pInstrGC Guest context point to privileged instruction
3289 * @param pPatchRec Patch record
3290 * @param pCacheRec Cache record ptr
3291 *
3292 */
3293static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3294{
3295 PPATCHINFO pPatch = &pPatchRec->patch;
3296 int rc = VERR_PATCHING_REFUSED;
3297 uint32_t orgOffsetPatchMem = ~0;
3298 bool fInserted;
3299
3300 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3301 /* Save original offset (in case of failures later on). */
3302 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3303
3304 /* We will not go on indefinitely with call instruction handling. */
3305 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3306 {
3307 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3308 return VERR_PATCHING_REFUSED;
3309 }
3310
3311 pVM->patm.s.ulCallDepth++;
3312
3313#ifdef PATM_ENABLE_CALL
3314 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3315#endif
3316
3317 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3318
3319 pPatch->nrPatch2GuestRecs = 0;
3320 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3321 pPatch->uCurPatchOffset = 0;
3322
3323 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3324 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3325 if (RT_FAILURE(rc))
3326 goto failure;
3327
3328#ifdef VBOX_WITH_STATISTICS
3329 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3330 if (RT_FAILURE(rc))
3331 goto failure;
3332#endif
3333
3334 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3335 if (rc != VINF_SUCCESS)
3336 {
3337 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3338 goto failure;
3339 }
3340
3341 //size of patch block
3342 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3343
3344 //update free pointer in patch memory
3345 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3346 /* Round to next 8 byte boundary. */
3347 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3348
3349 pPatch->uState = PATCH_ENABLED;
3350
3351 /*
3352 * Insert into patch to guest lookup tree
3353 */
3354 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3355 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3356 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3357 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3358 if (!fInserted)
3359 {
3360 rc = VERR_PATCHING_REFUSED;
3361 goto failure;
3362 }
3363
3364 /* Note that patmr3SetBranchTargets can install additional patches!! */
3365 rc = patmr3SetBranchTargets(pVM, pPatch);
3366 if (rc != VINF_SUCCESS)
3367 {
3368 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3369 goto failure;
3370 }
3371
3372 patmR3DbgAddPatch(pVM, pPatchRec);
3373
3374#ifdef LOG_ENABLED
3375 Log(("Patch code ----------------------------------------------------------\n"));
3376 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3377 Log(("Patch code ends -----------------------------------------------------\n"));
3378#endif
3379
3380 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3381
3382 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3383 pPatch->pTempInfo->nrIllegalInstr = 0;
3384
3385 pVM->patm.s.ulCallDepth--;
3386 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3387 return VINF_SUCCESS;
3388
3389failure:
3390 if (pPatchRec->CoreOffset.Key)
3391 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3392
3393 patmEmptyTree(pVM, &pPatch->FixupTree);
3394 pPatch->nrFixups = 0;
3395
3396 patmEmptyTree(pVM, &pPatch->JumpTree);
3397 pPatch->nrJumpRecs = 0;
3398
3399 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3400 pPatch->pTempInfo->nrIllegalInstr = 0;
3401
3402 /* Turn this cli patch into a dummy. */
3403 pPatch->uState = PATCH_REFUSED;
3404 pPatch->pPatchBlockOffset = 0;
3405
3406 // Give back the patch memory we no longer need
3407 Assert(orgOffsetPatchMem != (uint32_t)~0);
3408 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3409
3410 pVM->patm.s.ulCallDepth--;
3411 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3412 return rc;
3413}
3414
3415/**
3416 * Creates trampoline code to jump inside an existing patch
3417 *
3418 * @returns VBox status code.
3419 * @param pVM Pointer to the VM.
3420 * @param pInstrGC Guest context point to privileged instruction
3421 * @param pPatchRec Patch record
3422 *
3423 */
3424static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3425{
3426 PPATCHINFO pPatch = &pPatchRec->patch;
3427 RTRCPTR pPage, pPatchTargetGC = 0;
3428 uint32_t orgOffsetPatchMem = ~0;
3429 int rc = VERR_PATCHING_REFUSED;
3430 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3431 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3432 bool fInserted = false;
3433
3434 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3435 /* Save original offset (in case of failures later on). */
3436 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3437
3438 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3439 /** @todo we already checked this before */
3440 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3441
3442 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3443 if (pPatchPage)
3444 {
3445 uint32_t i;
3446
3447 for (i=0;i<pPatchPage->cCount;i++)
3448 {
3449 if (pPatchPage->papPatch[i])
3450 {
3451 pPatchToJmp = pPatchPage->papPatch[i];
3452
3453 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3454 && pPatchToJmp->uState == PATCH_ENABLED)
3455 {
3456 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3457 if (pPatchTargetGC)
3458 {
3459 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3460 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3461 Assert(pPatchToGuestRec);
3462
3463 pPatchToGuestRec->fJumpTarget = true;
3464 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3465 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3466 break;
3467 }
3468 }
3469 }
3470 }
3471 }
3472 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3473
3474 /*
3475 * Only record the trampoline patch if this is the first patch to the target
3476 * or we recorded other patches already.
3477 * The goal is to refuse refreshing function duplicates if the guest
3478 * modifies code after a saved state was loaded because it is not possible
3479 * to save the relation between trampoline and target without changing the
3480 * saved satte version.
3481 */
3482 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3483 || pPatchToJmp->pTrampolinePatchesHead)
3484 {
3485 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3486 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3487 if (!pTrampRec)
3488 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3489
3490 pTrampRec->pPatchTrampoline = pPatchRec;
3491 }
3492
3493 pPatch->nrPatch2GuestRecs = 0;
3494 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3495 pPatch->uCurPatchOffset = 0;
3496
3497 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3498 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3499 if (RT_FAILURE(rc))
3500 goto failure;
3501
3502#ifdef VBOX_WITH_STATISTICS
3503 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3504 if (RT_FAILURE(rc))
3505 goto failure;
3506#endif
3507
3508 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3509 if (RT_FAILURE(rc))
3510 goto failure;
3511
3512 /*
3513 * Insert into patch to guest lookup tree
3514 */
3515 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3516 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3517 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3518 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3519 if (!fInserted)
3520 {
3521 rc = VERR_PATCHING_REFUSED;
3522 goto failure;
3523 }
3524 patmR3DbgAddPatch(pVM, pPatchRec);
3525
3526 /* size of patch block */
3527 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3528
3529 /* Update free pointer in patch memory. */
3530 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3531 /* Round to next 8 byte boundary */
3532 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3533
3534 /* There's no jump from guest to patch code. */
3535 pPatch->cbPatchJump = 0;
3536
3537 /* Enable the patch. */
3538 pPatch->uState = PATCH_ENABLED;
3539 /* We allow this patch to be called as a function. */
3540 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3541
3542 if (pTrampRec)
3543 {
3544 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3545 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3546 }
3547 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3548 return VINF_SUCCESS;
3549
3550failure:
3551 if (pPatchRec->CoreOffset.Key)
3552 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3553
3554 patmEmptyTree(pVM, &pPatch->FixupTree);
3555 pPatch->nrFixups = 0;
3556
3557 patmEmptyTree(pVM, &pPatch->JumpTree);
3558 pPatch->nrJumpRecs = 0;
3559
3560 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3561 pPatch->pTempInfo->nrIllegalInstr = 0;
3562
3563 /* Turn this cli patch into a dummy. */
3564 pPatch->uState = PATCH_REFUSED;
3565 pPatch->pPatchBlockOffset = 0;
3566
3567 // Give back the patch memory we no longer need
3568 Assert(orgOffsetPatchMem != (uint32_t)~0);
3569 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3570
3571 if (pTrampRec)
3572 MMR3HeapFree(pTrampRec);
3573
3574 return rc;
3575}
3576
3577
3578/**
3579 * Patch branch target function for call/jump at specified location.
3580 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3581 *
3582 * @returns VBox status code.
3583 * @param pVM Pointer to the VM.
3584 * @param pCtx Pointer to the guest CPU context.
3585 *
3586 */
3587VMMR3_INT_DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3588{
3589 RTRCPTR pBranchTarget, pPage;
3590 int rc;
3591 RTRCPTR pPatchTargetGC = 0;
3592 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
3593
3594 pBranchTarget = pCtx->edx;
3595 pBranchTarget = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3596
3597 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3598 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3599
3600 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3601 if (pPatchPage)
3602 {
3603 uint32_t i;
3604
3605 for (i=0;i<pPatchPage->cCount;i++)
3606 {
3607 if (pPatchPage->papPatch[i])
3608 {
3609 PPATCHINFO pPatch = pPatchPage->papPatch[i];
3610
3611 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3612 && pPatch->uState == PATCH_ENABLED)
3613 {
3614 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3615 if (pPatchTargetGC)
3616 {
3617 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3618 break;
3619 }
3620 }
3621 }
3622 }
3623 }
3624
3625 if (pPatchTargetGC)
3626 {
3627 /* Create a trampoline that also sets PATM_ASMFIX_INTERRUPTFLAG. */
3628 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3629 }
3630 else
3631 {
3632 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3633 }
3634
3635 if (rc == VINF_SUCCESS)
3636 {
3637 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3638 Assert(pPatchTargetGC);
3639 }
3640
3641 if (pPatchTargetGC)
3642 {
3643 pCtx->eax = pPatchTargetGC;
3644 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3645 }
3646 else
3647 {
3648 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3649 pCtx->eax = 0;
3650 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3651 }
3652 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3653 rc = patmAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3654 AssertRC(rc);
3655
3656 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3657 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3658 return VINF_SUCCESS;
3659}
3660
3661/**
3662 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3663 *
3664 * @returns VBox status code.
3665 * @param pVM Pointer to the VM.
3666 * @param pCpu Disassembly CPU structure ptr
3667 * @param pInstrGC Guest context point to privileged instruction
3668 * @param pCacheRec Cache record ptr
3669 *
3670 */
3671static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3672{
3673 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3674 int rc = VERR_PATCHING_REFUSED;
3675 DISCPUSTATE cpu;
3676 RTRCPTR pTargetGC;
3677 PPATMPATCHREC pPatchFunction;
3678 uint32_t cbInstr;
3679 bool disret;
3680
3681 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3682 Assert((pCpu->pCurInstr->uOpcode == OP_CALL || pCpu->pCurInstr->uOpcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32);
3683
3684 if ((pCpu->pCurInstr->uOpcode != OP_CALL && pCpu->pCurInstr->uOpcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32)
3685 {
3686 rc = VERR_PATCHING_REFUSED;
3687 goto failure;
3688 }
3689
3690 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3691 if (pTargetGC == 0)
3692 {
3693 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
3694 rc = VERR_PATCHING_REFUSED;
3695 goto failure;
3696 }
3697
3698 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3699 if (pPatchFunction == NULL)
3700 {
3701 for(;;)
3702 {
3703 /* It could be an indirect call (call -> jmp dest).
3704 * Note that it's dangerous to assume the jump will never change...
3705 */
3706 uint8_t *pTmpInstrHC;
3707
3708 pTmpInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3709 Assert(pTmpInstrHC);
3710 if (pTmpInstrHC == 0)
3711 break;
3712
3713 disret = patmR3DisInstr(pVM, pPatch, pTargetGC, pTmpInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
3714 if (disret == false || cpu.pCurInstr->uOpcode != OP_JMP)
3715 break;
3716
3717 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3718 if (pTargetGC == 0)
3719 {
3720 break;
3721 }
3722
3723 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3724 break;
3725 }
3726 if (pPatchFunction == 0)
3727 {
3728 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3729 rc = VERR_PATCHING_REFUSED;
3730 goto failure;
3731 }
3732 }
3733
3734 // make a copy of the guest code bytes that will be overwritten
3735 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3736
3737 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3738 AssertRC(rc);
3739
3740 /* Now replace the original call in the guest code */
3741 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3742 AssertRC(rc);
3743 if (RT_FAILURE(rc))
3744 goto failure;
3745
3746 /* Lowest and highest address for write monitoring. */
3747 pPatch->pInstrGCLowest = pInstrGC;
3748 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3749 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "Call");
3750
3751 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3752
3753 pPatch->uState = PATCH_ENABLED;
3754 return VINF_SUCCESS;
3755
3756failure:
3757 /* Turn this patch into a dummy. */
3758 pPatch->uState = PATCH_REFUSED;
3759
3760 return rc;
3761}
3762
3763/**
3764 * Replace the address in an MMIO instruction with the cached version.
3765 *
3766 * @returns VBox status code.
3767 * @param pVM Pointer to the VM.
3768 * @param pInstrGC Guest context point to privileged instruction
3769 * @param pCpu Disassembly CPU structure ptr
3770 * @param pCacheRec Cache record ptr
3771 *
3772 * @note returns failure if patching is not allowed or possible
3773 *
3774 */
3775static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3776{
3777 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3778 uint8_t *pPB;
3779 int rc = VERR_PATCHING_REFUSED;
3780
3781 Assert(pVM->patm.s.mmio.pCachedData);
3782 if (!pVM->patm.s.mmio.pCachedData)
3783 goto failure;
3784
3785 if (pCpu->Param2.fUse != DISUSE_DISPLACEMENT32)
3786 goto failure;
3787
3788 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3789 if (pPB == 0)
3790 goto failure;
3791
3792 /* Add relocation record for cached data access. */
3793 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC,
3794 pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3795 {
3796 Log(("Relocation failed for cached mmio address!!\n"));
3797 return VERR_PATCHING_REFUSED;
3798 }
3799 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "MMIO patch old instruction:", "");
3800
3801 /* Save original instruction. */
3802 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3803 AssertRC(rc);
3804
3805 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3806
3807 /* Replace address with that of the cached item. */
3808 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR),
3809 &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3810 AssertRC(rc);
3811 if (RT_FAILURE(rc))
3812 {
3813 goto failure;
3814 }
3815
3816 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3817 pVM->patm.s.mmio.pCachedData = 0;
3818 pVM->patm.s.mmio.GCPhys = 0;
3819 pPatch->uState = PATCH_ENABLED;
3820 return VINF_SUCCESS;
3821
3822failure:
3823 /* Turn this patch into a dummy. */
3824 pPatch->uState = PATCH_REFUSED;
3825
3826 return rc;
3827}
3828
3829
3830/**
3831 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3832 *
3833 * @returns VBox status code.
3834 * @param pVM Pointer to the VM.
3835 * @param pInstrGC Guest context point to privileged instruction
3836 * @param pPatch Patch record
3837 *
3838 * @note returns failure if patching is not allowed or possible
3839 *
3840 */
3841static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3842{
3843 DISCPUSTATE cpu;
3844 uint32_t cbInstr;
3845 bool disret;
3846 uint8_t *pInstrHC;
3847
3848 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3849
3850 /* Convert GC to HC address. */
3851 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3852 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3853
3854 /* Disassemble mmio instruction. */
3855 disret = patmR3DisInstrNoStrOpMode(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE,
3856 &cpu, &cbInstr);
3857 if (disret == false)
3858 {
3859 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3860 return VERR_PATCHING_REFUSED;
3861 }
3862
3863 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
3864 if (cbInstr > MAX_INSTR_SIZE)
3865 return VERR_PATCHING_REFUSED;
3866 if (cpu.Param2.fUse != DISUSE_DISPLACEMENT32)
3867 return VERR_PATCHING_REFUSED;
3868
3869 /* Add relocation record for cached data access. */
3870 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3871 {
3872 Log(("Relocation failed for cached mmio address!!\n"));
3873 return VERR_PATCHING_REFUSED;
3874 }
3875 /* Replace address with that of the cached item. */
3876 *(RTRCPTR *)&pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3877
3878 /* Lowest and highest address for write monitoring. */
3879 pPatch->pInstrGCLowest = pInstrGC;
3880 pPatch->pInstrGCHighest = pInstrGC + cpu.cbInstr;
3881
3882 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3883 pVM->patm.s.mmio.pCachedData = 0;
3884 pVM->patm.s.mmio.GCPhys = 0;
3885 return VINF_SUCCESS;
3886}
3887
3888/**
3889 * Activates an int3 patch
3890 *
3891 * @returns VBox status code.
3892 * @param pVM Pointer to the VM.
3893 * @param pPatch Patch record
3894 */
3895static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3896{
3897 uint8_t bASMInt3 = 0xCC;
3898 int rc;
3899
3900 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3901 Assert(pPatch->uState != PATCH_ENABLED);
3902
3903 /* Replace first opcode byte with 'int 3'. */
3904 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
3905 AssertRC(rc);
3906
3907 pPatch->cbPatchJump = sizeof(bASMInt3);
3908
3909 return rc;
3910}
3911
3912/**
3913 * Deactivates an int3 patch
3914 *
3915 * @returns VBox status code.
3916 * @param pVM Pointer to the VM.
3917 * @param pPatch Patch record
3918 */
3919static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3920{
3921 uint8_t ASMInt3 = 0xCC;
3922 int rc;
3923
3924 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3925 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3926
3927 /* Restore first opcode byte. */
3928 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3929 AssertRC(rc);
3930 return rc;
3931}
3932
3933/**
3934 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically
3935 * in the raw-mode context.
3936 *
3937 * @returns VBox status code.
3938 * @param pVM Pointer to the VM.
3939 * @param pInstrGC Guest context point to privileged instruction
3940 * @param pInstrHC Host context point to privileged instruction
3941 * @param pCpu Disassembly CPU structure ptr
3942 * @param pPatch Patch record
3943 *
3944 * @note returns failure if patching is not allowed or possible
3945 *
3946 */
3947int patmR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3948{
3949 uint8_t bASMInt3 = 0xCC;
3950 int rc;
3951
3952 /* Note: Do not use patch memory here! It might called during patch installation too. */
3953 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "patmR3PatchInstrInt3:", "");
3954
3955 /* Save the original instruction. */
3956 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3957 AssertRC(rc);
3958 pPatch->cbPatchJump = sizeof(bASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3959
3960 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3961
3962 /* Replace first opcode byte with 'int 3'. */
3963 rc = patmActivateInt3Patch(pVM, pPatch);
3964 if (RT_FAILURE(rc))
3965 goto failure;
3966
3967 /* Lowest and highest address for write monitoring. */
3968 pPatch->pInstrGCLowest = pInstrGC;
3969 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3970
3971 pPatch->uState = PATCH_ENABLED;
3972 return VINF_SUCCESS;
3973
3974failure:
3975 /* Turn this patch into a dummy. */
3976 return VERR_PATCHING_REFUSED;
3977}
3978
3979#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3980/**
3981 * Patch a jump instruction at specified location
3982 *
3983 * @returns VBox status code.
3984 * @param pVM Pointer to the VM.
3985 * @param pInstrGC Guest context point to privileged instruction
3986 * @param pInstrHC Host context point to privileged instruction
3987 * @param pCpu Disassembly CPU structure ptr
3988 * @param pPatchRec Patch record
3989 *
3990 * @note returns failure if patching is not allowed or possible
3991 *
3992 */
3993int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3994{
3995 PPATCHINFO pPatch = &pPatchRec->patch;
3996 int rc = VERR_PATCHING_REFUSED;
3997
3998 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3999 pPatch->uCurPatchOffset = 0;
4000 pPatch->cbPatchBlockSize = 0;
4001 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
4002
4003 /*
4004 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
4005 * make sure this never happens. (unless a trap is triggered (intentionally or not))
4006 */
4007 switch (pCpu->pCurInstr->uOpcode)
4008 {
4009 case OP_JO:
4010 case OP_JNO:
4011 case OP_JC:
4012 case OP_JNC:
4013 case OP_JE:
4014 case OP_JNE:
4015 case OP_JBE:
4016 case OP_JNBE:
4017 case OP_JS:
4018 case OP_JNS:
4019 case OP_JP:
4020 case OP_JNP:
4021 case OP_JL:
4022 case OP_JNL:
4023 case OP_JLE:
4024 case OP_JNLE:
4025 case OP_JMP:
4026 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
4027 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL);
4028 if (!(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL))
4029 goto failure;
4030
4031 Assert(pCpu->cbInstr == SIZEOF_NEARJUMP32 || pCpu->cbInstr == SIZEOF_NEAR_COND_JUMP32);
4032 if (pCpu->cbInstr != SIZEOF_NEARJUMP32 && pCpu->cbInstr != SIZEOF_NEAR_COND_JUMP32)
4033 goto failure;
4034
4035 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->cbInstr))
4036 {
4037 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
4038 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
4039 rc = VERR_PATCHING_REFUSED;
4040 goto failure;
4041 }
4042
4043 break;
4044
4045 default:
4046 goto failure;
4047 }
4048
4049 // make a copy of the guest code bytes that will be overwritten
4050 Assert(pCpu->cbInstr <= sizeof(pPatch->aPrivInstr));
4051 Assert(pCpu->cbInstr >= SIZEOF_NEARJUMP32);
4052 pPatch->cbPatchJump = pCpu->cbInstr;
4053
4054 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
4055 AssertRC(rc);
4056
4057 /* Now insert a jump in the guest code. */
4058 /*
4059 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
4060 * references the target instruction in the conflict patch.
4061 */
4062 RTRCPTR pJmpDest = patmR3GuestGCPtrToPatchGCPtrSimple(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue);
4063
4064 AssertMsg(pJmpDest, ("patmR3GuestGCPtrToPatchGCPtrSimple failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue));
4065 pPatch->pPatchJumpDestGC = pJmpDest;
4066
4067 PATMP2GLOOKUPREC cacheRec;
4068 RT_ZERO(cacheRec);
4069 cacheRec.pPatch = pPatch;
4070
4071 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
4072 /* Free leftover lock if any. */
4073 if (cacheRec.Lock.pvMap)
4074 {
4075 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4076 cacheRec.Lock.pvMap = NULL;
4077 }
4078 AssertRC(rc);
4079 if (RT_FAILURE(rc))
4080 goto failure;
4081
4082 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
4083
4084 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
4085 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
4086
4087 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
4088
4089 /* Lowest and highest address for write monitoring. */
4090 pPatch->pInstrGCLowest = pInstrGC;
4091 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
4092
4093 pPatch->uState = PATCH_ENABLED;
4094 return VINF_SUCCESS;
4095
4096failure:
4097 /* Turn this cli patch into a dummy. */
4098 pPatch->uState = PATCH_REFUSED;
4099
4100 return rc;
4101}
4102#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
4103
4104
4105/**
4106 * Gives hint to PATM about supervisor guest instructions
4107 *
4108 * @returns VBox status code.
4109 * @param pVM Pointer to the VM.
4110 * @param pInstr Guest context point to privileged instruction
4111 * @param flags Patch flags
4112 */
4113VMMR3_INT_DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4114{
4115 Assert(pInstrGC);
4116 Assert(flags == PATMFL_CODE32);
4117
4118 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4119 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4120}
4121
4122/**
4123 * Patch privileged instruction at specified location
4124 *
4125 * @returns VBox status code.
4126 * @param pVM Pointer to the VM.
4127 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4128 * @param flags Patch flags
4129 *
4130 * @note returns failure if patching is not allowed or possible
4131 */
4132VMMR3_INT_DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4133{
4134 DISCPUSTATE cpu;
4135 R3PTRTYPE(uint8_t *) pInstrHC;
4136 uint32_t cbInstr;
4137 PPATMPATCHREC pPatchRec;
4138 PCPUMCTX pCtx = 0;
4139 bool disret;
4140 int rc;
4141 PVMCPU pVCpu = VMMGetCpu0(pVM);
4142 LogFlow(("PATMR3InstallPatch: %08x (%#llx)\n", pInstrGC, flags));
4143
4144 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4145
4146 if ( !pVM
4147 || pInstrGC == 0
4148 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4149 {
4150 AssertFailed();
4151 return VERR_INVALID_PARAMETER;
4152 }
4153
4154 if (PATMIsEnabled(pVM) == false)
4155 return VERR_PATCHING_REFUSED;
4156
4157 /* Test for patch conflict only with patches that actually change guest code. */
4158 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4159 {
4160 PPATCHINFO pConflictPatch = patmFindActivePatchByEntrypoint(pVM, pInstrGC);
4161 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4162 if (pConflictPatch != 0)
4163 return VERR_PATCHING_REFUSED;
4164 }
4165
4166 if (!(flags & PATMFL_CODE32))
4167 {
4168 /** @todo Only 32 bits code right now */
4169 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4170 return VERR_NOT_IMPLEMENTED;
4171 }
4172
4173 /* We ran out of patch memory; don't bother anymore. */
4174 if (pVM->patm.s.fOutOfMemory == true)
4175 return VERR_PATCHING_REFUSED;
4176
4177#if 1 /* DONT COMMIT ENABLED! */
4178 /* Blacklisted NT4SP1 areas - debugging why we sometimes crash early on, */
4179 if ( 0
4180 //|| (pInstrGC - 0x80010000U) < 0x10000U // NT4SP1 HAL
4181 //|| (pInstrGC - 0x80010000U) < 0x5000U // NT4SP1 HAL
4182 //|| (pInstrGC - 0x80013000U) < 0x2000U // NT4SP1 HAL
4183 //|| (pInstrGC - 0x80014000U) < 0x1000U // NT4SP1 HAL
4184 //|| (pInstrGC - 0x80014000U) < 0x800U // NT4SP1 HAL
4185 //|| (pInstrGC - 0x80014400U) < 0x400U // NT4SP1 HAL
4186 //|| (pInstrGC - 0x80014400U) < 0x200U // NT4SP1 HAL
4187 //|| (pInstrGC - 0x80014400U) < 0x100U // NT4SP1 HAL
4188 //|| (pInstrGC - 0x80014500U) < 0x100U // NT4SP1 HAL - negative
4189 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4190 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4191 //|| (pInstrGC - 0x80014440U) < 0x40U // NT4SP1 HAL
4192 //|| (pInstrGC - 0x80014440U) < 0x20U // NT4SP1 HAL
4193 || pInstrGC == 0x80014447 /* KfLowerIrql */
4194 || 0)
4195 {
4196 Log(("PATMR3InstallPatch: %08x is blacklisted\n", pInstrGC));
4197 return VERR_PATCHING_REFUSED;
4198 }
4199#endif
4200
4201 /* Make sure the code selector is wide open; otherwise refuse. */
4202 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4203 if (CPUMGetGuestCPL(pVCpu) == 0)
4204 {
4205 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4206 if (pInstrGCFlat != pInstrGC)
4207 {
4208 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs.Sel, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4209 return VERR_PATCHING_REFUSED;
4210 }
4211 }
4212
4213 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4214 if (!(flags & PATMFL_GUEST_SPECIFIC))
4215 {
4216 /* New code. Make sure CSAM has a go at it first. */
4217 CSAMR3CheckCode(pVM, pInstrGC);
4218 }
4219
4220 /* Note: obsolete */
4221 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4222 && (flags & PATMFL_MMIO_ACCESS))
4223 {
4224 RTRCUINTPTR offset;
4225 void *pvPatchCoreOffset;
4226
4227 /* Find the patch record. */
4228 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4229 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4230 if (pvPatchCoreOffset == NULL)
4231 {
4232 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4233 return VERR_PATCH_NOT_FOUND; //fatal error
4234 }
4235 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4236
4237 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4238 }
4239
4240 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4241
4242 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4243 if (pPatchRec)
4244 {
4245 Assert(!(flags & PATMFL_TRAMPOLINE));
4246
4247 /* Hints about existing patches are ignored. */
4248 if (flags & PATMFL_INSTR_HINT)
4249 return VERR_PATCHING_REFUSED;
4250
4251 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4252 {
4253 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4254 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4255 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4256 }
4257
4258 if (pPatchRec->patch.uState == PATCH_DISABLED)
4259 {
4260 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4261 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4262 {
4263 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4264 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4265 }
4266 else
4267 Log(("Enabling patch %RRv again\n", pInstrGC));
4268
4269 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4270 rc = PATMR3EnablePatch(pVM, pInstrGC);
4271 if (RT_SUCCESS(rc))
4272 return VWRN_PATCH_ENABLED;
4273
4274 return rc;
4275 }
4276 if ( pPatchRec->patch.uState == PATCH_ENABLED
4277 || pPatchRec->patch.uState == PATCH_DIRTY)
4278 {
4279 /*
4280 * The patch might have been overwritten.
4281 */
4282 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4283 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4284 {
4285 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4286 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4287 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4288 {
4289 if (flags & PATMFL_IDTHANDLER)
4290 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4291
4292 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4293 }
4294 }
4295 rc = PATMR3RemovePatch(pVM, pInstrGC);
4296 if (RT_FAILURE(rc))
4297 return VERR_PATCHING_REFUSED;
4298 }
4299 else
4300 {
4301 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4302 /* already tried it once! */
4303 return VERR_PATCHING_REFUSED;
4304 }
4305 }
4306
4307 RTGCPHYS GCPhys;
4308 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4309 if (rc != VINF_SUCCESS)
4310 {
4311 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4312 return rc;
4313 }
4314 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4315 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4316 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4317 {
4318 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4319 return VERR_PATCHING_REFUSED;
4320 }
4321
4322 /* Initialize cache record for guest address translations. */
4323 bool fInserted;
4324 PATMP2GLOOKUPREC cacheRec;
4325 RT_ZERO(cacheRec);
4326
4327 pInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4328 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4329
4330 /* Allocate patch record. */
4331 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4332 if (RT_FAILURE(rc))
4333 {
4334 Log(("Out of memory!!!!\n"));
4335 return VERR_NO_MEMORY;
4336 }
4337 pPatchRec->Core.Key = pInstrGC;
4338 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4339 /* Insert patch record into the lookup tree. */
4340 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4341 Assert(fInserted);
4342
4343 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4344 pPatchRec->patch.flags = flags;
4345 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
4346 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4347
4348 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4349 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4350
4351 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4352 {
4353 /*
4354 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4355 */
4356 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4357 if (pPatchNear)
4358 {
4359 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4360 {
4361 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4362
4363 pPatchRec->patch.uState = PATCH_UNUSABLE;
4364 /*
4365 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4366 */
4367 return VERR_PATCHING_REFUSED;
4368 }
4369 }
4370 }
4371
4372 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4373 if (pPatchRec->patch.pTempInfo == 0)
4374 {
4375 Log(("Out of memory!!!!\n"));
4376 return VERR_NO_MEMORY;
4377 }
4378
4379 disret = patmR3DisInstrNoStrOpMode(pVM, &pPatchRec->patch, pInstrGC, NULL, PATMREAD_ORGCODE, &cpu, &cbInstr);
4380 if (disret == false)
4381 {
4382 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4383 return VERR_PATCHING_REFUSED;
4384 }
4385
4386 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
4387 if (cbInstr > MAX_INSTR_SIZE)
4388 return VERR_PATCHING_REFUSED;
4389
4390 pPatchRec->patch.cbPrivInstr = cbInstr;
4391 pPatchRec->patch.opcode = cpu.pCurInstr->uOpcode;
4392
4393 /* Restricted hinting for now. */
4394 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->uOpcode == OP_CLI);
4395
4396 /* Initialize cache record patch pointer. */
4397 cacheRec.pPatch = &pPatchRec->patch;
4398
4399 /* Allocate statistics slot */
4400 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4401 {
4402 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4403 }
4404 else
4405 {
4406 Log(("WARNING: Patch index wrap around!!\n"));
4407 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4408 }
4409
4410 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4411 {
4412 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4413 }
4414 else
4415 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4416 {
4417 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4418 }
4419 else
4420 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4421 {
4422 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4423 }
4424 else
4425 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4426 {
4427 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4428 }
4429 else
4430 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4431 {
4432 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4433 }
4434 else
4435 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4436 {
4437 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4438 }
4439 else
4440 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4441 {
4442 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4443 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4444
4445 rc = patmIdtHandler(pVM, pInstrGC, cbInstr, pPatchRec, &cacheRec);
4446#ifdef VBOX_WITH_STATISTICS
4447 if ( rc == VINF_SUCCESS
4448 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4449 {
4450 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4451 }
4452#endif
4453 }
4454 else
4455 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4456 {
4457 switch (cpu.pCurInstr->uOpcode)
4458 {
4459 case OP_SYSENTER:
4460 case OP_PUSH:
4461 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4462 if (rc == VINF_SUCCESS)
4463 {
4464 if (rc == VINF_SUCCESS)
4465 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4466 return rc;
4467 }
4468 break;
4469
4470 default:
4471 rc = VERR_NOT_IMPLEMENTED;
4472 break;
4473 }
4474 }
4475 else
4476 {
4477 switch (cpu.pCurInstr->uOpcode)
4478 {
4479 case OP_SYSENTER:
4480 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4481 if (rc == VINF_SUCCESS)
4482 {
4483 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4484 return VINF_SUCCESS;
4485 }
4486 break;
4487
4488#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4489 case OP_JO:
4490 case OP_JNO:
4491 case OP_JC:
4492 case OP_JNC:
4493 case OP_JE:
4494 case OP_JNE:
4495 case OP_JBE:
4496 case OP_JNBE:
4497 case OP_JS:
4498 case OP_JNS:
4499 case OP_JP:
4500 case OP_JNP:
4501 case OP_JL:
4502 case OP_JNL:
4503 case OP_JLE:
4504 case OP_JNLE:
4505 case OP_JECXZ:
4506 case OP_LOOP:
4507 case OP_LOOPNE:
4508 case OP_LOOPE:
4509 case OP_JMP:
4510 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4511 {
4512 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4513 break;
4514 }
4515 return VERR_NOT_IMPLEMENTED;
4516#endif
4517
4518 case OP_PUSHF:
4519 case OP_CLI:
4520 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4521 rc = patmR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec);
4522 break;
4523
4524#ifndef VBOX_WITH_SAFE_STR
4525 case OP_STR:
4526#endif
4527 case OP_SGDT:
4528 case OP_SLDT:
4529 case OP_SIDT:
4530 case OP_CPUID:
4531 case OP_LSL:
4532 case OP_LAR:
4533 case OP_SMSW:
4534 case OP_VERW:
4535 case OP_VERR:
4536 case OP_IRET:
4537#ifdef VBOX_WITH_RAW_RING1
4538 case OP_MOV:
4539#endif
4540 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4541 break;
4542
4543 default:
4544 return VERR_NOT_IMPLEMENTED;
4545 }
4546 }
4547
4548 if (rc != VINF_SUCCESS)
4549 {
4550 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4551 {
4552 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4553 pPatchRec->patch.nrPatch2GuestRecs = 0;
4554 }
4555 pVM->patm.s.uCurrentPatchIdx--;
4556 }
4557 else
4558 {
4559 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4560 AssertRCReturn(rc, rc);
4561
4562 /* Keep track upper and lower boundaries of patched instructions */
4563 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4564 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4565 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4566 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4567
4568 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4569 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4570
4571 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4572 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4573
4574 rc = VINF_SUCCESS;
4575
4576 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4577 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4578 {
4579 rc = PATMR3DisablePatch(pVM, pInstrGC);
4580 AssertRCReturn(rc, rc);
4581 }
4582
4583#ifdef VBOX_WITH_STATISTICS
4584 /* Register statistics counter */
4585 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4586 {
4587 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4588 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4589#ifndef DEBUG_sandervl
4590 /* Full breakdown for the GUI. */
4591 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4592 "/PATM/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4593 STAMR3RegisterF(pVM, &pPatchRec->patch.pPatchBlockOffset,STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/offPatchBlock", pPatchRec->patch.pPrivInstrGC);
4594 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4595 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4596 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4597 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4598 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4599 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4600 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4601 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4602 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4603 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4604 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4605 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4606 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4607 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4608#endif
4609 }
4610#endif
4611
4612 /* Add debug symbol. */
4613 patmR3DbgAddPatch(pVM, pPatchRec);
4614 }
4615 /* Free leftover lock if any. */
4616 if (cacheRec.Lock.pvMap)
4617 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4618 return rc;
4619}
4620
4621/**
4622 * Query instruction size
4623 *
4624 * @returns VBox status code.
4625 * @param pVM Pointer to the VM.
4626 * @param pPatch Patch record
4627 * @param pInstrGC Instruction address
4628 */
4629static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4630{
4631 uint8_t *pInstrHC;
4632 PGMPAGEMAPLOCK Lock;
4633
4634 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4635 if (rc == VINF_SUCCESS)
4636 {
4637 DISCPUSTATE cpu;
4638 bool disret;
4639 uint32_t cbInstr;
4640
4641 disret = patmR3DisInstr(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE | PATMREAD_NOCHECK, &cpu, &cbInstr);
4642 PGMPhysReleasePageMappingLock(pVM, &Lock);
4643 if (disret)
4644 return cbInstr;
4645 }
4646 return 0;
4647}
4648
4649/**
4650 * Add patch to page record
4651 *
4652 * @returns VBox status code.
4653 * @param pVM Pointer to the VM.
4654 * @param pPage Page address
4655 * @param pPatch Patch record
4656 */
4657int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4658{
4659 PPATMPATCHPAGE pPatchPage;
4660 int rc;
4661
4662 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4663
4664 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4665 if (pPatchPage)
4666 {
4667 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4668 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4669 {
4670 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4671 PPATCHINFO *papPatchOld = pPatchPage->papPatch;
4672
4673 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4674 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH,
4675 (void **)&pPatchPage->papPatch);
4676 if (RT_FAILURE(rc))
4677 {
4678 Log(("Out of memory!!!!\n"));
4679 return VERR_NO_MEMORY;
4680 }
4681 memcpy(pPatchPage->papPatch, papPatchOld, cMaxPatchesOld * sizeof(pPatchPage->papPatch[0]));
4682 MMHyperFree(pVM, papPatchOld);
4683 }
4684 pPatchPage->papPatch[pPatchPage->cCount] = pPatch;
4685 pPatchPage->cCount++;
4686 }
4687 else
4688 {
4689 bool fInserted;
4690
4691 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4692 if (RT_FAILURE(rc))
4693 {
4694 Log(("Out of memory!!!!\n"));
4695 return VERR_NO_MEMORY;
4696 }
4697 pPatchPage->Core.Key = pPage;
4698 pPatchPage->cCount = 1;
4699 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4700
4701 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH,
4702 (void **)&pPatchPage->papPatch);
4703 if (RT_FAILURE(rc))
4704 {
4705 Log(("Out of memory!!!!\n"));
4706 MMHyperFree(pVM, pPatchPage);
4707 return VERR_NO_MEMORY;
4708 }
4709 pPatchPage->papPatch[0] = pPatch;
4710
4711 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4712 Assert(fInserted);
4713 pVM->patm.s.cPageRecords++;
4714
4715 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4716 }
4717 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4718
4719 /* Get the closest guest instruction (from below) */
4720 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4721 Assert(pGuestToPatchRec);
4722 if (pGuestToPatchRec)
4723 {
4724 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4725 if ( pPatchPage->pLowestAddrGC == 0
4726 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4727 {
4728 RTRCUINTPTR offset;
4729
4730 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4731
4732 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4733 /* If we're too close to the page boundary, then make sure an
4734 instruction from the previous page doesn't cross the
4735 boundary itself. */
4736 if (offset && offset < MAX_INSTR_SIZE)
4737 {
4738 /* Get the closest guest instruction (from above) */
4739 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4740
4741 if (pGuestToPatchRec)
4742 {
4743 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4744 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4745 {
4746 pPatchPage->pLowestAddrGC = pPage;
4747 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4748 }
4749 }
4750 }
4751 }
4752 }
4753
4754 /* Get the closest guest instruction (from above) */
4755 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4756 Assert(pGuestToPatchRec);
4757 if (pGuestToPatchRec)
4758 {
4759 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4760 if ( pPatchPage->pHighestAddrGC == 0
4761 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4762 {
4763 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4764 /* Increase by instruction size. */
4765 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4766//// Assert(size);
4767 pPatchPage->pHighestAddrGC += size;
4768 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4769 }
4770 }
4771
4772 return VINF_SUCCESS;
4773}
4774
4775/**
4776 * Remove patch from page record
4777 *
4778 * @returns VBox status code.
4779 * @param pVM Pointer to the VM.
4780 * @param pPage Page address
4781 * @param pPatch Patch record
4782 */
4783int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4784{
4785 PPATMPATCHPAGE pPatchPage;
4786 int rc;
4787
4788 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4789 Assert(pPatchPage);
4790
4791 if (!pPatchPage)
4792 return VERR_INVALID_PARAMETER;
4793
4794 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4795
4796 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4797 if (pPatchPage->cCount > 1)
4798 {
4799 uint32_t i;
4800
4801 /* Used by multiple patches */
4802 for (i = 0; i < pPatchPage->cCount; i++)
4803 {
4804 if (pPatchPage->papPatch[i] == pPatch)
4805 {
4806 /* close the gap between the remaining pointers. */
4807 uint32_t cNew = --pPatchPage->cCount;
4808 if (i < cNew)
4809 pPatchPage->papPatch[i] = pPatchPage->papPatch[cNew];
4810 pPatchPage->papPatch[cNew] = NULL;
4811 return VINF_SUCCESS;
4812 }
4813 }
4814 AssertMsgFailed(("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4815 }
4816 else
4817 {
4818 PPATMPATCHPAGE pPatchNode;
4819
4820 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4821
4822 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4823 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4824 Assert(pPatchNode && pPatchNode == pPatchPage);
4825
4826 Assert(pPatchPage->papPatch);
4827 rc = MMHyperFree(pVM, pPatchPage->papPatch);
4828 AssertRC(rc);
4829 rc = MMHyperFree(pVM, pPatchPage);
4830 AssertRC(rc);
4831 pVM->patm.s.cPageRecords--;
4832 }
4833 return VINF_SUCCESS;
4834}
4835
4836/**
4837 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4838 *
4839 * @returns VBox status code.
4840 * @param pVM Pointer to the VM.
4841 * @param pPatch Patch record
4842 */
4843int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4844{
4845 int rc;
4846 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4847
4848 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4849 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4850 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4851
4852 /** @todo optimize better (large gaps between current and next used page) */
4853 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4854 {
4855 /* Get the closest guest instruction (from above) */
4856 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4857 if ( pGuestToPatchRec
4858 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4859 )
4860 {
4861 /* Code in page really patched -> add record */
4862 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4863 AssertRC(rc);
4864 }
4865 }
4866 pPatch->flags |= PATMFL_CODE_MONITORED;
4867 return VINF_SUCCESS;
4868}
4869
4870/**
4871 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4872 *
4873 * @returns VBox status code.
4874 * @param pVM Pointer to the VM.
4875 * @param pPatch Patch record
4876 */
4877static int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4878{
4879 int rc;
4880 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4881
4882 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4883 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4884 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4885
4886 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4887 {
4888 /* Get the closest guest instruction (from above) */
4889 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4890 if ( pGuestToPatchRec
4891 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4892 )
4893 {
4894 /* Code in page really patched -> remove record */
4895 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4896 AssertRC(rc);
4897 }
4898 }
4899 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4900 return VINF_SUCCESS;
4901}
4902
4903/**
4904 * Notifies PATM about a (potential) write to code that has been patched.
4905 *
4906 * @returns VBox status code.
4907 * @param pVM Pointer to the VM.
4908 * @param GCPtr GC pointer to write address
4909 * @param cbWrite Nr of bytes to write
4910 *
4911 */
4912VMMR3_INT_DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4913{
4914 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4915
4916 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4917
4918 Assert(VM_IS_EMT(pVM));
4919 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4920
4921 /* Quick boundary check */
4922 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4923 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4924 )
4925 return VINF_SUCCESS;
4926
4927 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4928
4929 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4930 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4931
4932 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4933 {
4934loop_start:
4935 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4936 if (pPatchPage)
4937 {
4938 uint32_t i;
4939 bool fValidPatchWrite = false;
4940
4941 /* Quick check to see if the write is in the patched part of the page */
4942 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4943 || pPatchPage->pHighestAddrGC < GCPtr)
4944 {
4945 break;
4946 }
4947
4948 for (i=0;i<pPatchPage->cCount;i++)
4949 {
4950 if (pPatchPage->papPatch[i])
4951 {
4952 PPATCHINFO pPatch = pPatchPage->papPatch[i];
4953 RTRCPTR pPatchInstrGC;
4954 //unused: bool fForceBreak = false;
4955
4956 Assert(pPatchPage->papPatch[i]->flags & PATMFL_CODE_MONITORED);
4957 /** @todo inefficient and includes redundant checks for multiple pages. */
4958 for (uint32_t j=0; j<cbWrite; j++)
4959 {
4960 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4961
4962 if ( pPatch->cbPatchJump
4963 && pGuestPtrGC >= pPatch->pPrivInstrGC
4964 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4965 {
4966 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4967 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4968 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4969 if (rc == VINF_SUCCESS)
4970 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4971 goto loop_start;
4972
4973 continue;
4974 }
4975
4976 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4977 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4978 if (!pPatchInstrGC)
4979 {
4980 RTRCPTR pClosestInstrGC;
4981 uint32_t size;
4982
4983 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4984 if (pPatchInstrGC)
4985 {
4986 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4987 Assert(pClosestInstrGC <= pGuestPtrGC);
4988 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4989 /* Check if this is not a write into a gap between two patches */
4990 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4991 pPatchInstrGC = 0;
4992 }
4993 }
4994 if (pPatchInstrGC)
4995 {
4996 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4997
4998 fValidPatchWrite = true;
4999
5000 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
5001 Assert(pPatchToGuestRec);
5002 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
5003 {
5004 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
5005
5006 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
5007 {
5008 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
5009
5010 patmR3MarkDirtyPatch(pVM, pPatch);
5011
5012 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
5013 goto loop_start;
5014 }
5015 else
5016 {
5017 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
5018 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
5019
5020 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
5021 pPatchToGuestRec->fDirty = true;
5022
5023 *pInstrHC = 0xCC;
5024
5025 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
5026 }
5027 }
5028 /* else already marked dirty */
5029 }
5030 }
5031 }
5032 } /* for each patch */
5033
5034 if (fValidPatchWrite == false)
5035 {
5036 /* Write to a part of the page that either:
5037 * - doesn't contain any code (shared code/data); rather unlikely
5038 * - old code page that's no longer in active use.
5039 */
5040invalid_write_loop_start:
5041 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
5042
5043 if (pPatchPage)
5044 {
5045 for (i=0;i<pPatchPage->cCount;i++)
5046 {
5047 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5048
5049 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
5050 {
5051 /* Note: possibly dangerous assumption that all future writes will be harmless. */
5052 if (pPatch->flags & PATMFL_IDTHANDLER)
5053 {
5054 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5055
5056 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
5057 int rc = patmRemovePatchPages(pVM, pPatch);
5058 AssertRC(rc);
5059 }
5060 else
5061 {
5062 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5063 patmR3MarkDirtyPatch(pVM, pPatch);
5064 }
5065 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
5066 goto invalid_write_loop_start;
5067 }
5068 } /* for */
5069 }
5070 }
5071 }
5072 }
5073 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
5074 return VINF_SUCCESS;
5075
5076}
5077
5078/**
5079 * Disable all patches in a flushed page
5080 *
5081 * @returns VBox status code
5082 * @param pVM Pointer to the VM.
5083 * @param addr GC address of the page to flush
5084 * @note Currently only called by CSAMR3FlushPage; optimization to avoid
5085 * having to double check if the physical address has changed
5086 */
5087VMMR3_INT_DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
5088{
5089 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5090
5091 addr &= PAGE_BASE_GC_MASK;
5092
5093 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
5094 if (pPatchPage)
5095 {
5096 int i;
5097
5098 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
5099 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
5100 {
5101 if (pPatchPage->papPatch[i])
5102 {
5103 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5104
5105 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
5106 patmR3MarkDirtyPatch(pVM, pPatch);
5107 }
5108 }
5109 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
5110 }
5111 return VINF_SUCCESS;
5112}
5113
5114/**
5115 * Checks if the instructions at the specified address has been patched already.
5116 *
5117 * @returns boolean, patched or not
5118 * @param pVM Pointer to the VM.
5119 * @param pInstrGC Guest context pointer to instruction
5120 */
5121VMMR3_INT_DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
5122{
5123 Assert(!HMIsEnabled(pVM));
5124 PPATMPATCHREC pPatchRec;
5125 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5126 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
5127 return true;
5128 return false;
5129}
5130
5131/**
5132 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
5133 *
5134 * @returns VBox status code.
5135 * @param pVM Pointer to the VM.
5136 * @param pInstrGC GC address of instr
5137 * @param pByte opcode byte pointer (OUT)
5138 *
5139 */
5140VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
5141{
5142 PPATMPATCHREC pPatchRec;
5143
5144 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5145
5146 /* Shortcut. */
5147 if (!PATMIsEnabled(pVM))
5148 return VERR_PATCH_NOT_FOUND;
5149 Assert(!HMIsEnabled(pVM));
5150 if ( pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5151 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5152 return VERR_PATCH_NOT_FOUND;
5153
5154 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5155 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5156 if ( pPatchRec
5157 && pPatchRec->patch.uState == PATCH_ENABLED
5158 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5159 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5160 {
5161 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5162 *pByte = pPatchRec->patch.aPrivInstr[offset];
5163
5164 if (pPatchRec->patch.cbPatchJump == 1)
5165 {
5166 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5167 }
5168 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5169 return VINF_SUCCESS;
5170 }
5171 return VERR_PATCH_NOT_FOUND;
5172}
5173
5174/**
5175 * Read instruction bytes of the original code that was overwritten by the 5
5176 * bytes patch jump.
5177 *
5178 * @returns VINF_SUCCESS or VERR_PATCH_NOT_FOUND.
5179 * @param pVM Pointer to the VM.
5180 * @param GCPtrInstr GC address of instr
5181 * @param pbDst The output buffer.
5182 * @param cbToRead The maximum number bytes to read.
5183 * @param pcbRead Where to return the acutal number of bytes read.
5184 */
5185VMMR3_INT_DECL(int) PATMR3ReadOrgInstr(PVM pVM, RTGCPTR32 GCPtrInstr, uint8_t *pbDst, size_t cbToRead, size_t *pcbRead)
5186{
5187 /* Shortcut. */
5188 if (!PATMIsEnabled(pVM))
5189 return VERR_PATCH_NOT_FOUND;
5190 Assert(!HMIsEnabled(pVM));
5191 if ( GCPtrInstr < pVM->patm.s.pPatchedInstrGCLowest
5192 || GCPtrInstr > pVM->patm.s.pPatchedInstrGCHighest)
5193 return VERR_PATCH_NOT_FOUND;
5194
5195 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5196
5197 /*
5198 * If the patch is enabled and the pointer lies within 5 bytes of this
5199 * priv instr ptr, then we've got a hit!
5200 */
5201 RTGCPTR32 off;
5202 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree,
5203 GCPtrInstr, false /*fAbove*/);
5204 if ( pPatchRec
5205 && pPatchRec->patch.uState == PATCH_ENABLED
5206 && (off = GCPtrInstr - pPatchRec->patch.pPrivInstrGC) < pPatchRec->patch.cbPatchJump)
5207 {
5208 uint8_t const *pbSrc = &pPatchRec->patch.aPrivInstr[off];
5209 uint32_t const cbMax = pPatchRec->patch.cbPatchJump - off;
5210 if (cbToRead > cbMax)
5211 cbToRead = cbMax;
5212 switch (cbToRead)
5213 {
5214 case 5: pbDst[4] = pbSrc[4];
5215 case 4: pbDst[3] = pbSrc[3];
5216 case 3: pbDst[2] = pbSrc[2];
5217 case 2: pbDst[1] = pbSrc[1];
5218 case 1: pbDst[0] = pbSrc[0];
5219 break;
5220 default:
5221 memcpy(pbDst, pbSrc, cbToRead);
5222 }
5223 *pcbRead = cbToRead;
5224
5225 if (pPatchRec->patch.cbPatchJump == 1)
5226 Log(("PATMR3ReadOrgInstr: returning opcode %.*Rhxs for instruction at %RX32\n", cbToRead, pbSrc, GCPtrInstr));
5227 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5228 return VINF_SUCCESS;
5229 }
5230
5231 return VERR_PATCH_NOT_FOUND;
5232}
5233
5234/**
5235 * Disable patch for privileged instruction at specified location
5236 *
5237 * @returns VBox status code.
5238 * @param pVM Pointer to the VM.
5239 * @param pInstr Guest context point to privileged instruction
5240 *
5241 * @note returns failure if patching is not allowed or possible
5242 *
5243 */
5244VMMR3_INT_DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5245{
5246 PPATMPATCHREC pPatchRec;
5247 PPATCHINFO pPatch;
5248
5249 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5250 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5251 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5252 if (pPatchRec)
5253 {
5254 int rc = VINF_SUCCESS;
5255
5256 pPatch = &pPatchRec->patch;
5257
5258 /* Already disabled? */
5259 if (pPatch->uState == PATCH_DISABLED)
5260 return VINF_SUCCESS;
5261
5262 /* Clear the IDT entries for the patch we're disabling. */
5263 /* Note: very important as we clear IF in the patch itself */
5264 /** @todo this needs to be changed */
5265 if (pPatch->flags & PATMFL_IDTHANDLER)
5266 {
5267 uint32_t iGate;
5268
5269 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5270 if (iGate != (uint32_t)~0)
5271 {
5272 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5273 if (++cIDTHandlersDisabled < 256)
5274 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5275 }
5276 }
5277
5278 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5279 if ( pPatch->pPatchBlockOffset
5280 && pPatch->uState == PATCH_ENABLED)
5281 {
5282 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5283 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5284 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5285 }
5286
5287 /* IDT or function patches haven't changed any guest code. */
5288 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5289 {
5290 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5291 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5292
5293 if (pPatch->uState != PATCH_REFUSED)
5294 {
5295 uint8_t temp[16];
5296
5297 Assert(pPatch->cbPatchJump < sizeof(temp));
5298
5299 /* Let's first check if the guest code is still the same. */
5300 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5301 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5302 if (rc == VINF_SUCCESS)
5303 {
5304 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5305
5306 if ( temp[0] != 0xE9 /* jmp opcode */
5307 || *(RTRCINTPTR *)(&temp[1]) != displ
5308 )
5309 {
5310 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5311 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5312 /* Remove it completely */
5313 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5314 rc = PATMR3RemovePatch(pVM, pInstrGC);
5315 AssertRC(rc);
5316 return VWRN_PATCH_REMOVED;
5317 }
5318 patmRemoveJumpToPatch(pVM, pPatch);
5319 }
5320 else
5321 {
5322 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5323 pPatch->uState = PATCH_DISABLE_PENDING;
5324 }
5325 }
5326 else
5327 {
5328 AssertMsgFailed(("Patch was refused!\n"));
5329 return VERR_PATCH_ALREADY_DISABLED;
5330 }
5331 }
5332 else
5333 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5334 {
5335 uint8_t temp[16];
5336
5337 Assert(pPatch->cbPatchJump < sizeof(temp));
5338
5339 /* Let's first check if the guest code is still the same. */
5340 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5341 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5342 if (rc == VINF_SUCCESS)
5343 {
5344 if (temp[0] != 0xCC)
5345 {
5346 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5347 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5348 /* Remove it completely */
5349 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5350 rc = PATMR3RemovePatch(pVM, pInstrGC);
5351 AssertRC(rc);
5352 return VWRN_PATCH_REMOVED;
5353 }
5354 patmDeactivateInt3Patch(pVM, pPatch);
5355 }
5356 }
5357
5358 if (rc == VINF_SUCCESS)
5359 {
5360 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5361 if (pPatch->uState == PATCH_DISABLE_PENDING)
5362 {
5363 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5364 pPatch->uState = PATCH_UNUSABLE;
5365 }
5366 else
5367 if (pPatch->uState != PATCH_DIRTY)
5368 {
5369 pPatch->uOldState = pPatch->uState;
5370 pPatch->uState = PATCH_DISABLED;
5371 }
5372 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5373 }
5374
5375 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5376 return VINF_SUCCESS;
5377 }
5378 Log(("Patch not found!\n"));
5379 return VERR_PATCH_NOT_FOUND;
5380}
5381
5382/**
5383 * Permanently disable patch for privileged instruction at specified location
5384 *
5385 * @returns VBox status code.
5386 * @param pVM Pointer to the VM.
5387 * @param pInstr Guest context instruction pointer
5388 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5389 * @param pConflictPatch Conflicting patch
5390 *
5391 */
5392static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5393{
5394 NOREF(pConflictAddr);
5395#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5396 PATCHINFO patch;
5397 DISCPUSTATE cpu;
5398 R3PTRTYPE(uint8_t *) pInstrHC;
5399 uint32_t cbInstr;
5400 bool disret;
5401 int rc;
5402
5403 RT_ZERO(patch);
5404 pInstrHC = patmR3GCVirtToHCVirt(pVM, &patch, pInstrGC);
5405 disret = patmR3DisInstr(pVM, &patch, pInstrGC, pInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
5406 /*
5407 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5408 * with one that jumps right into the conflict patch.
5409 * Otherwise we must disable the conflicting patch to avoid serious problems.
5410 */
5411 if ( disret == true
5412 && (pConflictPatch->flags & PATMFL_CODE32)
5413 && (cpu.pCurInstr->uOpcode == OP_JMP || (cpu.pCurInstr->fOpType & DISOPTYPE_COND_CONTROLFLOW))
5414 && (cpu.Param1.fUse & DISUSE_IMMEDIATE32_REL))
5415 {
5416 /* Hint patches must be enabled first. */
5417 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5418 {
5419 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5420 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5421 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5422 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5423 /* Enabling might fail if the patched code has changed in the meantime. */
5424 if (rc != VINF_SUCCESS)
5425 return rc;
5426 }
5427
5428 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5429 if (RT_SUCCESS(rc))
5430 {
5431 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5432 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5433 return VINF_SUCCESS;
5434 }
5435 }
5436#endif
5437
5438 if (pConflictPatch->opcode == OP_CLI)
5439 {
5440 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5441 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5442 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5443 if (rc == VWRN_PATCH_REMOVED)
5444 return VINF_SUCCESS;
5445 if (RT_SUCCESS(rc))
5446 {
5447 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5448 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5449 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5450 if (rc == VERR_PATCH_NOT_FOUND)
5451 return VINF_SUCCESS; /* removed already */
5452
5453 AssertRC(rc);
5454 if (RT_SUCCESS(rc))
5455 {
5456 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5457 return VINF_SUCCESS;
5458 }
5459 }
5460 /* else turned into unusable patch (see below) */
5461 }
5462 else
5463 {
5464 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5465 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5466 if (rc == VWRN_PATCH_REMOVED)
5467 return VINF_SUCCESS;
5468 }
5469
5470 /* No need to monitor the code anymore. */
5471 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5472 {
5473 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5474 AssertRC(rc);
5475 }
5476 pConflictPatch->uState = PATCH_UNUSABLE;
5477 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5478 return VERR_PATCH_DISABLED;
5479}
5480
5481/**
5482 * Enable patch for privileged instruction at specified location
5483 *
5484 * @returns VBox status code.
5485 * @param pVM Pointer to the VM.
5486 * @param pInstr Guest context point to privileged instruction
5487 *
5488 * @note returns failure if patching is not allowed or possible
5489 *
5490 */
5491VMMR3_INT_DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5492{
5493 PPATMPATCHREC pPatchRec;
5494 PPATCHINFO pPatch;
5495
5496 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5497 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5498 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5499 if (pPatchRec)
5500 {
5501 int rc = VINF_SUCCESS;
5502
5503 pPatch = &pPatchRec->patch;
5504
5505 if (pPatch->uState == PATCH_DISABLED)
5506 {
5507 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5508 {
5509 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5510 uint8_t temp[16];
5511
5512 Assert(pPatch->cbPatchJump < sizeof(temp));
5513
5514 /* Let's first check if the guest code is still the same. */
5515 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5516 AssertRC(rc2);
5517 if (rc2 == VINF_SUCCESS)
5518 {
5519 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5520 {
5521 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5522 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5523 /* Remove it completely */
5524 rc = PATMR3RemovePatch(pVM, pInstrGC);
5525 AssertRC(rc);
5526 return VERR_PATCH_NOT_FOUND;
5527 }
5528
5529 PATMP2GLOOKUPREC cacheRec;
5530 RT_ZERO(cacheRec);
5531 cacheRec.pPatch = pPatch;
5532
5533 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5534 /* Free leftover lock if any. */
5535 if (cacheRec.Lock.pvMap)
5536 {
5537 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5538 cacheRec.Lock.pvMap = NULL;
5539 }
5540 AssertRC(rc2);
5541 if (RT_FAILURE(rc2))
5542 return rc2;
5543
5544#ifdef DEBUG
5545 {
5546 DISCPUSTATE cpu;
5547 char szOutput[256];
5548 uint32_t cbInstr;
5549 uint32_t i = 0;
5550 bool disret;
5551 while(i < pPatch->cbPatchJump)
5552 {
5553 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
5554 &cpu, &cbInstr, szOutput, sizeof(szOutput));
5555 Log(("Renewed patch instr: %s", szOutput));
5556 i += cbInstr;
5557 }
5558 }
5559#endif
5560 }
5561 }
5562 else
5563 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5564 {
5565 uint8_t temp[16];
5566
5567 Assert(pPatch->cbPatchJump < sizeof(temp));
5568
5569 /* Let's first check if the guest code is still the same. */
5570 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5571 AssertRC(rc2);
5572
5573 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5574 {
5575 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5576 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5577 rc = PATMR3RemovePatch(pVM, pInstrGC);
5578 AssertRC(rc);
5579 return VERR_PATCH_NOT_FOUND;
5580 }
5581
5582 rc2 = patmActivateInt3Patch(pVM, pPatch);
5583 if (RT_FAILURE(rc2))
5584 return rc2;
5585 }
5586
5587 pPatch->uState = pPatch->uOldState; //restore state
5588
5589 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5590 if (pPatch->pPatchBlockOffset)
5591 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5592
5593 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5594 }
5595 else
5596 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5597
5598 return rc;
5599 }
5600 return VERR_PATCH_NOT_FOUND;
5601}
5602
5603/**
5604 * Remove patch for privileged instruction at specified location
5605 *
5606 * @returns VBox status code.
5607 * @param pVM Pointer to the VM.
5608 * @param pPatchRec Patch record
5609 * @param fForceRemove Remove *all* patches
5610 */
5611int patmR3RemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5612{
5613 PPATCHINFO pPatch;
5614
5615 pPatch = &pPatchRec->patch;
5616
5617 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5618 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5619 {
5620 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5621 return VERR_ACCESS_DENIED;
5622 }
5623 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5624
5625 /* Note: NEVER EVER REUSE PATCH MEMORY */
5626 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5627
5628 if (pPatchRec->patch.pPatchBlockOffset)
5629 {
5630 PAVLOU32NODECORE pNode;
5631
5632 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5633 Assert(pNode);
5634 }
5635
5636 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5637 {
5638 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5639 AssertRC(rc);
5640 }
5641
5642#ifdef VBOX_WITH_STATISTICS
5643 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5644 {
5645 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
5646 STAMR3DeregisterF(pVM->pUVM, "/PATM/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
5647 }
5648#endif
5649
5650 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5651 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5652 pPatch->nrPatch2GuestRecs = 0;
5653 Assert(pPatch->Patch2GuestAddrTree == 0);
5654
5655 patmEmptyTree(pVM, &pPatch->FixupTree);
5656 pPatch->nrFixups = 0;
5657 Assert(pPatch->FixupTree == 0);
5658
5659 if (pPatchRec->patch.pTempInfo)
5660 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5661
5662 /* Note: might fail, because it has already been removed (e.g. during reset). */
5663 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5664
5665 /* Free the patch record */
5666 MMHyperFree(pVM, pPatchRec);
5667 return VINF_SUCCESS;
5668}
5669
5670/**
5671 * RTAvlU32DoWithAll() worker.
5672 * Checks whether the current trampoline instruction is the jump to the target patch
5673 * and updates the displacement to jump to the new target.
5674 *
5675 * @returns VBox status code.
5676 * @retval VERR_ALREADY_EXISTS if the jump was found.
5677 * @param pNode The current patch to guest record to check.
5678 * @param pvUser The refresh state.
5679 */
5680static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5681{
5682 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5683 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5684 PVM pVM = pRefreshPatchState->pVM;
5685
5686 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5687
5688 /*
5689 * Check if the patch instruction starts with a jump.
5690 * ASSUMES that there is no other patch to guest record that starts
5691 * with a jump.
5692 */
5693 if (*pPatchInstr == 0xE9)
5694 {
5695 /* Jump found, update the displacement. */
5696 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5697 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5698 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5699
5700 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5701 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5702
5703 *(uint32_t *)&pPatchInstr[1] = displ;
5704 return VERR_ALREADY_EXISTS; /** @todo better return code */
5705 }
5706
5707 return VINF_SUCCESS;
5708}
5709
5710/**
5711 * Attempt to refresh the patch by recompiling its entire code block
5712 *
5713 * @returns VBox status code.
5714 * @param pVM Pointer to the VM.
5715 * @param pPatchRec Patch record
5716 */
5717int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5718{
5719 PPATCHINFO pPatch;
5720 int rc;
5721 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5722 PTRAMPREC pTrampolinePatchesHead = NULL;
5723
5724 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5725
5726 pPatch = &pPatchRec->patch;
5727 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5728 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5729 {
5730 if (!pPatch->pTrampolinePatchesHead)
5731 {
5732 /*
5733 * It is sometimes possible that there are trampoline patches to this patch
5734 * but they are not recorded (after a saved state load for example).
5735 * Refuse to refresh those patches.
5736 * Can hurt performance in theory if the patched code is modified by the guest
5737 * and is executed often. However most of the time states are saved after the guest
5738 * code was modified and is not updated anymore afterwards so this shouldn't be a
5739 * big problem.
5740 */
5741 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5742 return VERR_PATCHING_REFUSED;
5743 }
5744 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5745 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5746 }
5747
5748 /* Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5749
5750 rc = PATMR3DisablePatch(pVM, pInstrGC);
5751 AssertRC(rc);
5752
5753 /* Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5754 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5755#ifdef VBOX_WITH_STATISTICS
5756 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5757 {
5758 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
5759 STAMR3DeregisterF(pVM->pUVM, "/PATM/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
5760 }
5761#endif
5762
5763 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5764
5765 /* Attempt to install a new patch. */
5766 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5767 if (RT_SUCCESS(rc))
5768 {
5769 RTRCPTR pPatchTargetGC;
5770 PPATMPATCHREC pNewPatchRec;
5771
5772 /* Determine target address in new patch */
5773 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5774 Assert(pPatchTargetGC);
5775 if (!pPatchTargetGC)
5776 {
5777 rc = VERR_PATCHING_REFUSED;
5778 goto failure;
5779 }
5780
5781 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5782 pPatch->uCurPatchOffset = 0;
5783
5784 /* insert jump to new patch in old patch block */
5785 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5786 if (RT_FAILURE(rc))
5787 goto failure;
5788
5789 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5790 Assert(pNewPatchRec); /* can't fail */
5791
5792 /* Remove old patch (only do that when everything is finished) */
5793 int rc2 = patmR3RemovePatch(pVM, pPatchRec, true /* force removal */);
5794 AssertRC(rc2);
5795
5796 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5797 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5798 Assert(fInserted); NOREF(fInserted);
5799
5800 Log(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5801 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5802
5803 /* Used by another patch, so don't remove it! */
5804 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5805
5806 if (pTrampolinePatchesHead)
5807 {
5808 /* Update all trampoline patches to jump to the new patch. */
5809 PTRAMPREC pTrampRec = NULL;
5810 PATMREFRESHPATCH RefreshPatch;
5811
5812 RefreshPatch.pVM = pVM;
5813 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5814
5815 pTrampRec = pTrampolinePatchesHead;
5816
5817 while (pTrampRec)
5818 {
5819 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5820
5821 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5822 /*
5823 * We have to find the right patch2guest record because there might be others
5824 * for statistics.
5825 */
5826 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5827 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5828 Assert(rc == VERR_ALREADY_EXISTS);
5829 rc = VINF_SUCCESS;
5830 pTrampRec = pTrampRec->pNext;
5831 }
5832 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5833 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5834 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5835 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5836 }
5837 }
5838
5839failure:
5840 if (RT_FAILURE(rc))
5841 {
5842 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5843
5844 /* Remove the new inactive patch */
5845 rc = PATMR3RemovePatch(pVM, pInstrGC);
5846 AssertRC(rc);
5847
5848 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5849 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5850 Assert(fInserted); NOREF(fInserted);
5851
5852 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5853 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5854 AssertRC(rc2);
5855
5856 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5857 }
5858 return rc;
5859}
5860
5861/**
5862 * Find patch for privileged instruction at specified location
5863 *
5864 * @returns Patch structure pointer if found; else NULL
5865 * @param pVM Pointer to the VM.
5866 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5867 * @param fIncludeHints Include hinted patches or not
5868 *
5869 */
5870PPATCHINFO patmFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5871{
5872 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5873 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5874 if (pPatchRec)
5875 {
5876 if ( pPatchRec->patch.uState == PATCH_ENABLED
5877 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5878 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5879 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5880 {
5881 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5882 return &pPatchRec->patch;
5883 }
5884 else
5885 if ( fIncludeHints
5886 && pPatchRec->patch.uState == PATCH_DISABLED
5887 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5888 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5889 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5890 {
5891 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5892 return &pPatchRec->patch;
5893 }
5894 }
5895 return NULL;
5896}
5897
5898/**
5899 * Checks whether the GC address is inside a generated patch jump
5900 *
5901 * @returns true -> yes, false -> no
5902 * @param pVM Pointer to the VM.
5903 * @param pAddr Guest context address.
5904 * @param pPatchAddr Guest context patch address (if true).
5905 */
5906VMMR3_INT_DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5907{
5908 RTRCPTR addr;
5909 PPATCHINFO pPatch;
5910
5911 Assert(!HMIsEnabled(pVM));
5912 if (PATMIsEnabled(pVM) == false)
5913 return false;
5914
5915 if (pPatchAddr == NULL)
5916 pPatchAddr = &addr;
5917
5918 *pPatchAddr = 0;
5919
5920 pPatch = patmFindActivePatchByEntrypoint(pVM, pAddr);
5921 if (pPatch)
5922 *pPatchAddr = pPatch->pPrivInstrGC;
5923
5924 return *pPatchAddr == 0 ? false : true;
5925}
5926
5927/**
5928 * Remove patch for privileged instruction at specified location
5929 *
5930 * @returns VBox status code.
5931 * @param pVM Pointer to the VM.
5932 * @param pInstr Guest context point to privileged instruction
5933 *
5934 * @note returns failure if patching is not allowed or possible
5935 *
5936 */
5937VMMR3_INT_DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5938{
5939 PPATMPATCHREC pPatchRec;
5940
5941 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5942 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5943 if (pPatchRec)
5944 {
5945 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5946 if (rc == VWRN_PATCH_REMOVED)
5947 return VINF_SUCCESS;
5948
5949 return patmR3RemovePatch(pVM, pPatchRec, false);
5950 }
5951 AssertFailed();
5952 return VERR_PATCH_NOT_FOUND;
5953}
5954
5955/**
5956 * Mark patch as dirty
5957 *
5958 * @returns VBox status code.
5959 * @param pVM Pointer to the VM.
5960 * @param pPatch Patch record
5961 *
5962 * @note returns failure if patching is not allowed or possible
5963 *
5964 */
5965static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5966{
5967 if (pPatch->pPatchBlockOffset)
5968 {
5969 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5970 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5971 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5972 }
5973
5974 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5975 /* Put back the replaced instruction. */
5976 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5977 if (rc == VWRN_PATCH_REMOVED)
5978 return VINF_SUCCESS;
5979
5980 /* Note: we don't restore patch pages for patches that are not enabled! */
5981 /* Note: be careful when changing this behaviour!! */
5982
5983 /* The patch pages are no longer marked for self-modifying code detection */
5984 if (pPatch->flags & PATMFL_CODE_MONITORED)
5985 {
5986 rc = patmRemovePatchPages(pVM, pPatch);
5987 AssertRCReturn(rc, rc);
5988 }
5989 pPatch->uState = PATCH_DIRTY;
5990
5991 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5992 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5993
5994 return VINF_SUCCESS;
5995}
5996
5997/**
5998 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5999 *
6000 * @returns VBox status code.
6001 * @param pVM Pointer to the VM.
6002 * @param pPatch Patch block structure pointer
6003 * @param pPatchGC GC address in patch block
6004 */
6005RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
6006{
6007 Assert(pPatch->Patch2GuestAddrTree);
6008 /* Get the closest record from below. */
6009 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6010 if (pPatchToGuestRec)
6011 return pPatchToGuestRec->pOrgInstrGC;
6012
6013 return 0;
6014}
6015
6016/**
6017 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6018 *
6019 * @returns corresponding GC pointer in patch block
6020 * @param pVM Pointer to the VM.
6021 * @param pPatch Current patch block pointer
6022 * @param pInstrGC Guest context pointer to privileged instruction
6023 *
6024 */
6025RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6026{
6027 if (pPatch->Guest2PatchAddrTree)
6028 {
6029 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
6030 if (pGuestToPatchRec)
6031 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6032 }
6033
6034 return 0;
6035}
6036
6037/**
6038 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6039 *
6040 * @returns corresponding GC pointer in patch block
6041 * @param pVM Pointer to the VM.
6042 * @param pInstrGC Guest context pointer to privileged instruction
6043 */
6044static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
6045{
6046 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
6047 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
6048 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
6049 return NIL_RTRCPTR;
6050}
6051
6052/**
6053 * Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no
6054 * identical match)
6055 *
6056 * @returns corresponding GC pointer in patch block
6057 * @param pVM Pointer to the VM.
6058 * @param pPatch Current patch block pointer
6059 * @param pInstrGC Guest context pointer to privileged instruction
6060 *
6061 */
6062RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6063{
6064 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
6065 if (pGuestToPatchRec)
6066 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6067 return NIL_RTRCPTR;
6068}
6069
6070/**
6071 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
6072 *
6073 * @returns original GC instruction pointer or 0 if not found
6074 * @param pVM Pointer to the VM.
6075 * @param pPatchGC GC address in patch block
6076 * @param pEnmState State of the translated address (out)
6077 *
6078 */
6079VMMR3_INT_DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
6080{
6081 PPATMPATCHREC pPatchRec;
6082 void *pvPatchCoreOffset;
6083 RTRCPTR pPrivInstrGC;
6084
6085 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
6086 Assert(!HMIsEnabled(pVM));
6087 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6088 if (pvPatchCoreOffset == 0)
6089 {
6090 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
6091 return 0;
6092 }
6093 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6094 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
6095 if (pEnmState)
6096 {
6097 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
6098 || pPatchRec->patch.uState == PATCH_DIRTY
6099 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
6100 || pPatchRec->patch.uState == PATCH_UNUSABLE),
6101 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
6102
6103 if ( !pPrivInstrGC
6104 || pPatchRec->patch.uState == PATCH_UNUSABLE
6105 || pPatchRec->patch.uState == PATCH_REFUSED)
6106 {
6107 pPrivInstrGC = 0;
6108 *pEnmState = PATMTRANS_FAILED;
6109 }
6110 else
6111 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
6112 {
6113 *pEnmState = PATMTRANS_INHIBITIRQ;
6114 }
6115 else
6116 if ( pPatchRec->patch.uState == PATCH_ENABLED
6117 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
6118 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
6119 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
6120 {
6121 *pEnmState = PATMTRANS_OVERWRITTEN;
6122 }
6123 else
6124 if (patmFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
6125 {
6126 *pEnmState = PATMTRANS_OVERWRITTEN;
6127 }
6128 else
6129 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
6130 {
6131 *pEnmState = PATMTRANS_PATCHSTART;
6132 }
6133 else
6134 *pEnmState = PATMTRANS_SAFE;
6135 }
6136 return pPrivInstrGC;
6137}
6138
6139/**
6140 * Returns the GC pointer of the patch for the specified GC address
6141 *
6142 * @returns VBox status code.
6143 * @param pVM Pointer to the VM.
6144 * @param pAddrGC Guest context address
6145 */
6146VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
6147{
6148 PPATMPATCHREC pPatchRec;
6149
6150 Assert(!HMIsEnabled(pVM));
6151
6152 /* Find the patch record. */
6153 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
6154 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
6155 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
6156 return PATCHCODE_PTR_GC(&pPatchRec->patch);
6157 return NIL_RTRCPTR;
6158}
6159
6160/**
6161 * Attempt to recover dirty instructions
6162 *
6163 * @returns VBox status code.
6164 * @param pVM Pointer to the VM.
6165 * @param pCtx Pointer to the guest CPU context.
6166 * @param pPatch Patch record.
6167 * @param pPatchToGuestRec Patch to guest address record.
6168 * @param pEip GC pointer of trapping instruction.
6169 */
6170static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
6171{
6172 DISCPUSTATE CpuOld, CpuNew;
6173 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
6174 int rc;
6175 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
6176 uint32_t cbDirty;
6177 PRECPATCHTOGUEST pRec;
6178 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
6179 PVMCPU pVCpu = VMMGetCpu0(pVM);
6180 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
6181
6182 pRec = pPatchToGuestRec;
6183 pCurInstrGC = pOrgInstrGC;
6184 pCurPatchInstrGC = pEip;
6185 cbDirty = 0;
6186 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6187
6188 /* Find all adjacent dirty instructions */
6189 while (true)
6190 {
6191 if (pRec->fJumpTarget)
6192 {
6193 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6194 pRec->fDirty = false;
6195 return VERR_PATCHING_REFUSED;
6196 }
6197
6198 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6199 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6200 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6201
6202 /* Only harmless instructions are acceptable. */
6203 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6204 if ( RT_FAILURE(rc)
6205 || !(CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS))
6206 {
6207 if (RT_SUCCESS(rc))
6208 cbDirty += CpuOld.cbInstr;
6209 else
6210 if (!cbDirty)
6211 cbDirty = 1;
6212 break;
6213 }
6214
6215#ifdef DEBUG
6216 char szBuf[256];
6217 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6218 szBuf, sizeof(szBuf), NULL);
6219 Log(("DIRTY: %s\n", szBuf));
6220#endif
6221 /* Mark as clean; if we fail we'll let it always fault. */
6222 pRec->fDirty = false;
6223
6224 /* Remove old lookup record. */
6225 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6226 pPatchToGuestRec = NULL;
6227
6228 pCurPatchInstrGC += CpuOld.cbInstr;
6229 cbDirty += CpuOld.cbInstr;
6230
6231 /* Let's see if there's another dirty instruction right after. */
6232 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6233 if (!pRec || !pRec->fDirty)
6234 break; /* no more dirty instructions */
6235
6236 /* In case of complex instructions the next guest instruction could be quite far off. */
6237 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6238 }
6239
6240 if ( RT_SUCCESS(rc)
6241 && (CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS)
6242 )
6243 {
6244 uint32_t cbLeft;
6245
6246 pCurPatchInstrHC = pPatchInstrHC;
6247 pCurPatchInstrGC = pEip;
6248 cbLeft = cbDirty;
6249
6250 while (cbLeft && RT_SUCCESS(rc))
6251 {
6252 bool fValidInstr;
6253
6254 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6255
6256 fValidInstr = !!(CpuNew.pCurInstr->fOpType & DISOPTYPE_HARMLESS);
6257 if ( !fValidInstr
6258 && (CpuNew.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
6259 )
6260 {
6261 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6262
6263 if ( pTargetGC >= pOrgInstrGC
6264 && pTargetGC <= pOrgInstrGC + cbDirty
6265 )
6266 {
6267 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6268 fValidInstr = true;
6269 }
6270 }
6271
6272 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6273 if ( rc == VINF_SUCCESS
6274 && CpuNew.cbInstr <= cbLeft /* must still fit */
6275 && fValidInstr
6276 )
6277 {
6278#ifdef DEBUG
6279 char szBuf[256];
6280 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6281 szBuf, sizeof(szBuf), NULL);
6282 Log(("NEW: %s\n", szBuf));
6283#endif
6284
6285 /* Copy the new instruction. */
6286 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.cbInstr);
6287 AssertRC(rc);
6288
6289 /* Add a new lookup record for the duplicated instruction. */
6290 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6291 }
6292 else
6293 {
6294#ifdef DEBUG
6295 char szBuf[256];
6296 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6297 szBuf, sizeof(szBuf), NULL);
6298 Log(("NEW: %s (FAILED)\n", szBuf));
6299#endif
6300 /* Restore the old lookup record for the duplicated instruction. */
6301 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6302
6303 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6304 rc = VERR_PATCHING_REFUSED;
6305 break;
6306 }
6307 pCurInstrGC += CpuNew.cbInstr;
6308 pCurPatchInstrHC += CpuNew.cbInstr;
6309 pCurPatchInstrGC += CpuNew.cbInstr;
6310 cbLeft -= CpuNew.cbInstr;
6311
6312 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6313 if (!cbLeft)
6314 {
6315 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6316 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6317 {
6318 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6319 if (pRec)
6320 {
6321 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6322 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6323
6324 Assert(!pRec->fDirty);
6325
6326 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6327 if (cbFiller >= SIZEOF_NEARJUMP32)
6328 {
6329 pPatchFillHC[0] = 0xE9;
6330 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6331#ifdef DEBUG
6332 char szBuf[256];
6333 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC,
6334 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6335 Log(("FILL: %s\n", szBuf));
6336#endif
6337 }
6338 else
6339 {
6340 for (unsigned i = 0; i < cbFiller; i++)
6341 {
6342 pPatchFillHC[i] = 0x90; /* NOP */
6343#ifdef DEBUG
6344 char szBuf[256];
6345 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC + i,
6346 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6347 Log(("FILL: %s\n", szBuf));
6348#endif
6349 }
6350 }
6351 }
6352 }
6353 }
6354 }
6355 }
6356 else
6357 rc = VERR_PATCHING_REFUSED;
6358
6359 if (RT_SUCCESS(rc))
6360 {
6361 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6362 }
6363 else
6364 {
6365 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6366 Assert(cbDirty);
6367
6368 /* Mark the whole instruction stream with breakpoints. */
6369 if (cbDirty)
6370 memset(pPatchInstrHC, 0xCC, cbDirty);
6371
6372 if ( pVM->patm.s.fOutOfMemory == false
6373 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6374 {
6375 rc = patmR3RefreshPatch(pVM, pPatch);
6376 if (RT_FAILURE(rc))
6377 {
6378 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6379 }
6380 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6381 rc = VERR_PATCHING_REFUSED;
6382 }
6383 }
6384 return rc;
6385}
6386
6387/**
6388 * Handle trap inside patch code
6389 *
6390 * @returns VBox status code.
6391 * @param pVM Pointer to the VM.
6392 * @param pCtx Pointer to the guest CPU context.
6393 * @param pEip GC pointer of trapping instruction.
6394 * @param ppNewEip GC pointer to new instruction.
6395 */
6396VMMR3_INT_DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6397{
6398 PPATMPATCHREC pPatch = 0;
6399 void *pvPatchCoreOffset;
6400 RTRCUINTPTR offset;
6401 RTRCPTR pNewEip;
6402 int rc ;
6403 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6404 PVMCPU pVCpu = VMMGetCpu0(pVM);
6405
6406 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6407 Assert(pVM->cCpus == 1);
6408
6409 pNewEip = 0;
6410 *ppNewEip = 0;
6411
6412 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6413
6414 /* Find the patch record. */
6415 /* Note: there might not be a patch to guest translation record (global function) */
6416 offset = pEip - pVM->patm.s.pPatchMemGC;
6417 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6418 if (pvPatchCoreOffset)
6419 {
6420 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6421
6422 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6423
6424 if (pPatch->patch.uState == PATCH_DIRTY)
6425 {
6426 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6427 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6428 {
6429 /* Function duplication patches set fPIF to 1 on entry */
6430 pVM->patm.s.pGCStateHC->fPIF = 1;
6431 }
6432 }
6433 else
6434 if (pPatch->patch.uState == PATCH_DISABLED)
6435 {
6436 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6437 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6438 {
6439 /* Function duplication patches set fPIF to 1 on entry */
6440 pVM->patm.s.pGCStateHC->fPIF = 1;
6441 }
6442 }
6443 else
6444 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6445 {
6446 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6447
6448 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6449 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6450 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6451 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6452 }
6453
6454 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6455 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6456
6457 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6458 pPatch->patch.cTraps++;
6459 PATM_STAT_FAULT_INC(&pPatch->patch);
6460 }
6461 else
6462 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6463
6464 /* Check if we were interrupted in PATM generated instruction code. */
6465 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6466 {
6467 DISCPUSTATE Cpu;
6468 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6469 AssertRC(rc);
6470
6471 if ( rc == VINF_SUCCESS
6472 && ( Cpu.pCurInstr->uOpcode == OP_PUSHF
6473 || Cpu.pCurInstr->uOpcode == OP_PUSH
6474 || Cpu.pCurInstr->uOpcode == OP_CALL)
6475 )
6476 {
6477 uint64_t fFlags;
6478
6479 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6480
6481 if (Cpu.pCurInstr->uOpcode == OP_PUSH)
6482 {
6483 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6484 if ( rc == VINF_SUCCESS
6485 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6486 {
6487 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6488
6489 /* Reset the PATM stack. */
6490 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6491
6492 pVM->patm.s.pGCStateHC->fPIF = 1;
6493
6494 Log(("Faulting push -> go back to the original instruction\n"));
6495
6496 /* continue at the original instruction */
6497 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6498 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6499 return VINF_SUCCESS;
6500 }
6501 }
6502
6503 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6504 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6505 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6506 if (rc == VINF_SUCCESS)
6507 {
6508 /* The guest page *must* be present. */
6509 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6510 if ( rc == VINF_SUCCESS
6511 && (fFlags & X86_PTE_P))
6512 {
6513 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6514 return VINF_PATCH_CONTINUE;
6515 }
6516 }
6517 }
6518 else
6519 if (pPatch->patch.pPrivInstrGC == pNewEip)
6520 {
6521 /* Invalidated patch or first instruction overwritten.
6522 * We can ignore the fPIF state in this case.
6523 */
6524 /* Reset the PATM stack. */
6525 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6526
6527 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6528
6529 pVM->patm.s.pGCStateHC->fPIF = 1;
6530
6531 /* continue at the original instruction */
6532 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6533 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6534 return VINF_SUCCESS;
6535 }
6536
6537 char szBuf[256];
6538 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6539
6540 /* Very bad. We crashed in emitted code. Probably stack? */
6541 if (pPatch)
6542 {
6543 AssertLogRelMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6544 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n",
6545 pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags,
6546 pPatchToGuestRec->fDirty, szBuf));
6547 }
6548 else
6549 AssertLogRelMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6550 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6551 EMR3FatalError(pVCpu, VERR_PATM_IPE_TRAP_IN_PATCH_CODE);
6552 }
6553
6554 /* From here on, we must have a valid patch to guest translation. */
6555 if (pvPatchCoreOffset == 0)
6556 {
6557 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6558 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6559 return VERR_PATCH_NOT_FOUND;
6560 }
6561
6562 /* Take care of dirty/changed instructions. */
6563 if (pPatchToGuestRec->fDirty)
6564 {
6565 Assert(pPatchToGuestRec->Core.Key == offset);
6566 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6567
6568 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6569 if (RT_SUCCESS(rc))
6570 {
6571 /* Retry the current instruction. */
6572 pNewEip = pEip;
6573 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6574 }
6575 else
6576 {
6577 /* Reset the PATM stack. */
6578 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6579
6580 rc = VINF_SUCCESS; /* Continue at original instruction. */
6581 }
6582
6583 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6584 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6585 return rc;
6586 }
6587
6588#ifdef VBOX_STRICT
6589 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6590 {
6591 DISCPUSTATE cpu;
6592 bool disret;
6593 uint32_t cbInstr;
6594 PATMP2GLOOKUPREC cacheRec;
6595 RT_ZERO(cacheRec);
6596 cacheRec.pPatch = &pPatch->patch;
6597
6598 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6599 &cpu, &cbInstr);
6600 if (cacheRec.Lock.pvMap)
6601 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6602
6603 if (disret && cpu.pCurInstr->uOpcode == OP_RETN)
6604 {
6605 RTRCPTR retaddr;
6606 PCPUMCTX pCtx2;
6607
6608 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6609
6610 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6611 AssertRC(rc);
6612
6613 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6614 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6615 }
6616 }
6617#endif
6618
6619 /* Return original address, correct by subtracting the CS base address. */
6620 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6621
6622 /* Reset the PATM stack. */
6623 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6624
6625 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6626 {
6627 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6628 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6629#ifdef VBOX_STRICT
6630 DISCPUSTATE cpu;
6631 bool disret;
6632 uint32_t cbInstr;
6633 PATMP2GLOOKUPREC cacheRec;
6634 RT_ZERO(cacheRec);
6635 cacheRec.pPatch = &pPatch->patch;
6636
6637 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
6638 &cpu, &cbInstr);
6639 if (cacheRec.Lock.pvMap)
6640 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6641
6642 if (disret && (cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_INT3))
6643 {
6644 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6645 &cpu, &cbInstr);
6646 if (cacheRec.Lock.pvMap)
6647 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6648
6649 Assert(cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_IRET);
6650 }
6651#endif
6652 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6653 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6654 }
6655
6656 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6657 DBGFR3_DISAS_INSTR_LOG(pVCpu, pCtx->cs.Sel, pNewEip, "PATCHRET: ");
6658 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6659 {
6660 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6661 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6662 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6663 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6664 return VERR_PATCH_DISABLED;
6665 }
6666
6667#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6668 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6669 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6670 {
6671 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6672 //we are only wasting time, back out the patch
6673 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6674 pTrapRec->pNextPatchInstr = 0;
6675 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6676 return VERR_PATCH_DISABLED;
6677 }
6678#endif
6679
6680 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6681 return VINF_SUCCESS;
6682}
6683
6684
6685/**
6686 * Handle page-fault in monitored page
6687 *
6688 * @returns VBox status code.
6689 * @param pVM Pointer to the VM.
6690 */
6691VMMR3_INT_DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6692{
6693 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6694 PVMCPU pVCpu = VMMGetCpu0(pVM);
6695
6696 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6697 addr &= PAGE_BASE_GC_MASK;
6698
6699 int rc = PGMHandlerVirtualDeregister(pVM, pVCpu, addr, false /*fHypervisor*/);
6700 AssertRC(rc); NOREF(rc);
6701
6702 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6703 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6704 {
6705 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6706 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6707 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6708 if (rc == VWRN_PATCH_REMOVED)
6709 return VINF_SUCCESS;
6710
6711 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6712
6713 if (addr == pPatchRec->patch.pPrivInstrGC)
6714 addr++;
6715 }
6716
6717 for(;;)
6718 {
6719 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6720
6721 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6722 break;
6723
6724 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6725 {
6726 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6727 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6728 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6729 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6730 }
6731 addr = pPatchRec->patch.pPrivInstrGC + 1;
6732 }
6733
6734 pVM->patm.s.pvFaultMonitor = 0;
6735 return VINF_SUCCESS;
6736}
6737
6738
6739#ifdef VBOX_WITH_STATISTICS
6740
6741static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6742{
6743 if (pPatch->flags & PATMFL_SYSENTER)
6744 {
6745 return "SYSENT";
6746 }
6747 else
6748 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6749 {
6750 static char szTrap[16];
6751 uint32_t iGate;
6752
6753 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6754 if (iGate < 256)
6755 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6756 else
6757 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6758 return szTrap;
6759 }
6760 else
6761 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6762 return "DUPFUNC";
6763 else
6764 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6765 return "FUNCCALL";
6766 else
6767 if (pPatch->flags & PATMFL_TRAMPOLINE)
6768 return "TRAMP";
6769 else
6770 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6771}
6772
6773static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6774{
6775 NOREF(pVM);
6776 switch(pPatch->uState)
6777 {
6778 case PATCH_ENABLED:
6779 return "ENA";
6780 case PATCH_DISABLED:
6781 return "DIS";
6782 case PATCH_DIRTY:
6783 return "DIR";
6784 case PATCH_UNUSABLE:
6785 return "UNU";
6786 case PATCH_REFUSED:
6787 return "REF";
6788 case PATCH_DISABLE_PENDING:
6789 return "DIP";
6790 default:
6791 AssertFailed();
6792 return " ";
6793 }
6794}
6795
6796/**
6797 * Resets the sample.
6798 * @param pVM Pointer to the VM.
6799 * @param pvSample The sample registered using STAMR3RegisterCallback.
6800 */
6801static void patmResetStat(PVM pVM, void *pvSample)
6802{
6803 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6804 Assert(pPatch);
6805
6806 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6807 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6808}
6809
6810/**
6811 * Prints the sample into the buffer.
6812 *
6813 * @param pVM Pointer to the VM.
6814 * @param pvSample The sample registered using STAMR3RegisterCallback.
6815 * @param pszBuf The buffer to print into.
6816 * @param cchBuf The size of the buffer.
6817 */
6818static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6819{
6820 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6821 Assert(pPatch);
6822
6823 Assert(pPatch->uState != PATCH_REFUSED);
6824 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6825
6826 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6827 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6828 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6829}
6830
6831/**
6832 * Returns the GC address of the corresponding patch statistics counter
6833 *
6834 * @returns Stat address
6835 * @param pVM Pointer to the VM.
6836 * @param pPatch Patch structure
6837 */
6838RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6839{
6840 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6841 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6842}
6843
6844#endif /* VBOX_WITH_STATISTICS */
6845#ifdef VBOX_WITH_DEBUGGER
6846
6847/**
6848 * The '.patmoff' command.
6849 *
6850 * @returns VBox status.
6851 * @param pCmd Pointer to the command descriptor (as registered).
6852 * @param pCmdHlp Pointer to command helper functions.
6853 * @param pVM Pointer to the current VM (if any).
6854 * @param paArgs Pointer to (readonly) array of arguments.
6855 * @param cArgs Number of arguments in the array.
6856 */
6857static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6858{
6859 /*
6860 * Validate input.
6861 */
6862 NOREF(cArgs); NOREF(paArgs);
6863 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6864 PVM pVM = pUVM->pVM;
6865 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6866
6867 if (HMIsEnabled(pVM))
6868 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6869
6870 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6871 PATMR3AllowPatching(pVM->pUVM, false);
6872 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6873}
6874
6875/**
6876 * The '.patmon' command.
6877 *
6878 * @returns VBox status.
6879 * @param pCmd Pointer to the command descriptor (as registered).
6880 * @param pCmdHlp Pointer to command helper functions.
6881 * @param pVM Pointer to the current VM (if any).
6882 * @param paArgs Pointer to (readonly) array of arguments.
6883 * @param cArgs Number of arguments in the array.
6884 */
6885static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6886{
6887 /*
6888 * Validate input.
6889 */
6890 NOREF(cArgs); NOREF(paArgs);
6891 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6892 PVM pVM = pUVM->pVM;
6893 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6894
6895 if (HMIsEnabled(pVM))
6896 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6897
6898 PATMR3AllowPatching(pVM->pUVM, true);
6899 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6900 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6901}
6902
6903#endif /* VBOX_WITH_DEBUGGER */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette