VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 55889

Last change on this file since 55889 was 55889, checked in by vboxsync, 10 years ago

VMM: Split up virtual handlers just like the physical ones, such that the kind+callbacks are stored seprately from the actual handler registration. This should hopefully save a byte or two when adding more callbacks. Implemented the pvUser for ring-3 callbacks.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 263.3 KB
Line 
1/* $Id: PATM.cpp 55889 2015-05-17 18:01:37Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * @note Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2014 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/cpum.h>
29#include <VBox/vmm/cpumdis.h>
30#include <VBox/vmm/iom.h>
31#include <VBox/vmm/mm.h>
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/hm.h>
34#include <VBox/vmm/ssm.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/cfgm.h>
37#include <VBox/param.h>
38#include <VBox/vmm/selm.h>
39#include <VBox/vmm/csam.h>
40#include <iprt/avl.h>
41#include "PATMInternal.h"
42#include "PATMPatch.h"
43#include <VBox/vmm/vm.h>
44#include <VBox/vmm/uvm.h>
45#include <VBox/dbg.h>
46#include <VBox/err.h>
47#include <VBox/log.h>
48#include <iprt/assert.h>
49#include <iprt/asm.h>
50#include <VBox/dis.h>
51#include <VBox/disopcode.h>
52#include "internal/pgm.h"
53
54#include <iprt/string.h>
55#include "PATMA.h"
56
57//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
58//#define PATM_DISABLE_ALL
59
60/**
61 * Refresh trampoline patch state.
62 */
63typedef struct PATMREFRESHPATCH
64{
65 /** Pointer to the VM structure. */
66 PVM pVM;
67 /** The trampoline patch record. */
68 PPATCHINFO pPatchTrampoline;
69 /** The new patch we want to jump to. */
70 PPATCHINFO pPatchRec;
71} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
72
73
74#define PATMREAD_RAWCODE 1 /* read code as-is */
75#define PATMREAD_ORGCODE 2 /* read original guest opcode bytes; not the patched bytes */
76#define PATMREAD_NOCHECK 4 /* don't check for patch conflicts */
77
78/*
79 * Private structure used during disassembly
80 */
81typedef struct
82{
83 PVM pVM;
84 PPATCHINFO pPatchInfo;
85 R3PTRTYPE(uint8_t *) pbInstrHC;
86 RTRCPTR pInstrGC;
87 uint32_t fReadFlags;
88} PATMDISASM, *PPATMDISASM;
89
90
91/*******************************************************************************
92* Internal Functions *
93*******************************************************************************/
94
95static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
96static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
97static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
98
99#ifdef LOG_ENABLED // keep gcc quiet
100static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
101#endif
102#ifdef VBOX_WITH_STATISTICS
103static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
104static void patmResetStat(PVM pVM, void *pvSample);
105static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
106#endif
107
108#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
109#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
110
111static int patmReinit(PVM pVM);
112static DECLCALLBACK(int) patmR3RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
113static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC);
114static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch);
115
116#ifdef VBOX_WITH_DEBUGGER
117static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
118static FNDBGCCMD patmr3CmdOn;
119static FNDBGCCMD patmr3CmdOff;
120
121/** Command descriptors. */
122static const DBGCCMD g_aCmds[] =
123{
124 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
125 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
126 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
127};
128#endif
129
130/* Don't want to break saved states, so put it here as a global variable. */
131static unsigned int cIDTHandlersDisabled = 0;
132
133/**
134 * Initializes the PATM.
135 *
136 * @returns VBox status code.
137 * @param pVM Pointer to the VM.
138 */
139VMMR3_INT_DECL(int) PATMR3Init(PVM pVM)
140{
141 int rc;
142
143 /*
144 * We only need a saved state dummy loader if HM is enabled.
145 */
146 if (HMIsEnabled(pVM))
147 {
148 pVM->fPATMEnabled = false;
149 return SSMR3RegisterStub(pVM, "PATM", 0);
150 }
151
152 /*
153 * Raw-mode.
154 */
155 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
156
157 /* These values can't change as they are hardcoded in patch code (old saved states!) */
158 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
159 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
160 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
161 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
162
163 AssertReleaseMsg(g_fPatmInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
164 ("Interrupt flags out of sync!! g_fPatmInterruptFlag=%#x expected %#x. broken assembler?\n", g_fPatmInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
165
166 /* Allocate patch memory and GC patch state memory. */
167 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
168 /* Add another page in case the generated code is much larger than expected. */
169 /** @todo bad safety precaution */
170 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
171 if (RT_FAILURE(rc))
172 {
173 Log(("MMHyperAlloc failed with %Rrc\n", rc));
174 return rc;
175 }
176 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
177
178 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
179 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
180 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
181
182 patmR3DbgInit(pVM);
183
184 /*
185 * Hypervisor memory for GC status data (read/write)
186 *
187 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
188 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
189 *
190 */
191 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
192 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
193 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
194
195 /* Hypervisor memory for patch statistics */
196 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
197 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
198
199 /* Memory for patch lookup trees. */
200 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
201 AssertRCReturn(rc, rc);
202 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
203
204#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
205 /* Check CFGM option. */
206 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
207 if (RT_FAILURE(rc))
208# ifdef PATM_DISABLE_ALL
209 pVM->fPATMEnabled = false;
210# else
211 pVM->fPATMEnabled = true;
212# endif
213#endif
214
215 rc = patmReinit(pVM);
216 AssertRC(rc);
217 if (RT_FAILURE(rc))
218 return rc;
219
220 /*
221 * Register save and load state notifiers.
222 */
223 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SAVED_STATE_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
224 NULL, NULL, NULL,
225 NULL, patmR3Save, NULL,
226 NULL, patmR3Load, NULL);
227 AssertRCReturn(rc, rc);
228
229#ifdef VBOX_WITH_DEBUGGER
230 /*
231 * Debugger commands.
232 */
233 static bool s_fRegisteredCmds = false;
234 if (!s_fRegisteredCmds)
235 {
236 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
237 if (RT_SUCCESS(rc2))
238 s_fRegisteredCmds = true;
239 }
240#endif
241
242#ifdef VBOX_WITH_STATISTICS
243 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
244 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
245 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
246 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
247 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
248 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
249 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
250 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
251
252 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
253 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
254
255 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
256 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
257 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
258
259 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
260 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
261 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
262 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
263 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
264
265 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
266 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
267
268 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
269 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
270
271 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
272 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
273 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
274
275 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
276 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
277 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
278
279 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
280 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
281
282 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
283 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
284 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
285 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
286
287 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
288 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
289
290 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
291 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
292
293 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
294 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
295 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
296
297 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
298 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
299 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
300 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
301
302 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
303 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
304 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
305 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
306 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
307
308 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
309#endif /* VBOX_WITH_STATISTICS */
310
311 Log(("g_patmCallRecord.cbFunction %u\n", g_patmCallRecord.cbFunction));
312 Log(("g_patmCallIndirectRecord.cbFunction %u\n", g_patmCallIndirectRecord.cbFunction));
313 Log(("g_patmRetRecord.cbFunction %u\n", g_patmRetRecord.cbFunction));
314 Log(("g_patmJumpIndirectRecord.cbFunction %u\n", g_patmJumpIndirectRecord.cbFunction));
315 Log(("g_patmPopf32Record.cbFunction %u\n", g_patmPopf32Record.cbFunction));
316 Log(("g_patmIretRecord.cbFunction %u\n", g_patmIretRecord.cbFunction));
317 Log(("g_patmStiRecord.cbFunction %u\n", g_patmStiRecord.cbFunction));
318 Log(("g_patmCheckIFRecord.cbFunction %u\n", g_patmCheckIFRecord.cbFunction));
319
320 return rc;
321}
322
323/**
324 * Finalizes HMA page attributes.
325 *
326 * @returns VBox status code.
327 * @param pVM Pointer to the VM.
328 */
329VMMR3_INT_DECL(int) PATMR3InitFinalize(PVM pVM)
330{
331 if (HMIsEnabled(pVM))
332 return VINF_SUCCESS;
333
334 /*
335 * The GC state, stack and statistics must be read/write for the guest
336 * (supervisor only of course).
337 *
338 * Remember, we run guest code at ring-1 and ring-2 levels, which are
339 * considered supervisor levels by the paging structures. We run the VMM
340 * in ring-0 with CR0.WP=0 and mapping all VMM structures as read-only
341 * pages. The following structures are exceptions and must be mapped with
342 * write access so the ring-1 and ring-2 code can modify them.
343 */
344 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
345 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the GCState accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
346
347 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
348 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the GCStack accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
349
350 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
351 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to make the stats struct accessible to ring-1 and ring-2 code: %Rrc\n", rc), rc);
352
353 /*
354 * Find the patch helper segment so we can identify code running there as patch code.
355 */
356 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_PatchHlpBegin", &pVM->patm.s.pbPatchHelpersRC);
357 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to resolve g_PatchHlpBegin: %Rrc\n", rc), rc);
358 pVM->patm.s.pbPatchHelpersR3 = (uint8_t *)MMHyperRCToR3(pVM, pVM->patm.s.pbPatchHelpersRC);
359 AssertLogRelReturn(pVM->patm.s.pbPatchHelpersR3 != NULL, VERR_INTERNAL_ERROR_3);
360
361 RTRCPTR RCPtrEnd;
362 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_PatchHlpEnd", &RCPtrEnd);
363 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("Failed to resolve g_PatchHlpEnd: %Rrc\n", rc), rc);
364
365 pVM->patm.s.cbPatchHelpers = RCPtrEnd - pVM->patm.s.pbPatchHelpersRC;
366 AssertLogRelMsgReturn(pVM->patm.s.cbPatchHelpers < _128K,
367 ("%RRv-%RRv => %#x\n", pVM->patm.s.pbPatchHelpersRC, RCPtrEnd, pVM->patm.s.cbPatchHelpers),
368 VERR_INTERNAL_ERROR_4);
369
370
371 /*
372 * Register the virtual page access handler type.
373 */
374 rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_ALL, false /*fRelocUserRC*/,
375 NULL /*pfnInvalidateR3*/,
376 patmVirtPageHandler,
377 "PATMGCMonitorPage", NULL /*pszModRC*/,
378 "PATMMonitorPatchJump", &pVM->patm.s.hMonitorPageType);
379 AssertRCReturn(rc, rc);
380
381 return VINF_SUCCESS;
382}
383
384/**
385 * (Re)initializes PATM
386 *
387 * @param pVM The VM.
388 */
389static int patmReinit(PVM pVM)
390{
391 int rc;
392
393 /*
394 * Assert alignment and sizes.
395 */
396 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
397 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
398
399 /*
400 * Setup any fixed pointers and offsets.
401 */
402 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
403
404#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
405#ifndef PATM_DISABLE_ALL
406 pVM->fPATMEnabled = true;
407#endif
408#endif
409
410 Assert(pVM->patm.s.pGCStateHC);
411 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
412 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
413
414 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
415 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
416
417 Assert(pVM->patm.s.pGCStackHC);
418 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
419 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
420 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
421 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
422
423 Assert(pVM->patm.s.pStatsHC);
424 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
425 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
426
427 Assert(pVM->patm.s.pPatchMemHC);
428 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
429 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
430 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
431
432 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
433 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
434
435 Assert(pVM->patm.s.PatchLookupTreeHC);
436 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
437
438 /*
439 * (Re)Initialize PATM structure
440 */
441 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
442 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
443 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
444 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
445 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
446 pVM->patm.s.pvFaultMonitor = 0;
447 pVM->patm.s.deltaReloc = 0;
448
449 /* Lowest and highest patched instruction */
450 pVM->patm.s.pPatchedInstrGCLowest = ~0;
451 pVM->patm.s.pPatchedInstrGCHighest = 0;
452
453 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
454 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
455 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
456
457 pVM->patm.s.pfnSysEnterPatchGC = 0;
458 pVM->patm.s.pfnSysEnterGC = 0;
459
460 pVM->patm.s.fOutOfMemory = false;
461
462 pVM->patm.s.pfnHelperCallGC = 0;
463 patmR3DbgReset(pVM);
464
465 /* Generate all global functions to be used by future patches. */
466 /* We generate a fake patch in order to use the existing code for relocation. */
467 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
468 if (RT_FAILURE(rc))
469 {
470 Log(("Out of memory!!!!\n"));
471 return VERR_NO_MEMORY;
472 }
473 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
474 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
475 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
476
477 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
478 AssertRC(rc);
479
480 /* Update free pointer in patch memory. */
481 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
482 /* Round to next 8 byte boundary. */
483 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
484
485
486 return rc;
487}
488
489
490/**
491 * Applies relocations to data and code managed by this
492 * component. This function will be called at init and
493 * whenever the VMM need to relocate it self inside the GC.
494 *
495 * The PATM will update the addresses used by the switcher.
496 *
497 * @param pVM The VM.
498 * @param offDelta The relocation delta.
499 */
500VMMR3_INT_DECL(void) PATMR3Relocate(PVM pVM, RTRCINTPTR offDelta)
501{
502 if (HMIsEnabled(pVM))
503 return;
504
505 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
506 Assert((RTRCINTPTR)(GCPtrNew - pVM->patm.s.pGCStateGC) == offDelta);
507
508 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, offDelta));
509 if (offDelta)
510 {
511 PCPUMCTX pCtx;
512
513 /* Update CPUMCTX guest context pointer. */
514 pVM->patm.s.pCPUMCtxGC += offDelta;
515
516 pVM->patm.s.deltaReloc = offDelta;
517 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmR3RelocatePatches, (void *)pVM);
518
519 pVM->patm.s.pGCStateGC = GCPtrNew;
520 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
521 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
522 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
523 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
524
525 if (pVM->patm.s.pfnSysEnterPatchGC)
526 pVM->patm.s.pfnSysEnterPatchGC += offDelta;
527
528 /* If we are running patch code right now, then also adjust EIP. */
529 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
530 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
531 pCtx->eip += offDelta;
532
533 /* Deal with the global patch functions. */
534 pVM->patm.s.pfnHelperCallGC += offDelta;
535 pVM->patm.s.pfnHelperRetGC += offDelta;
536 pVM->patm.s.pfnHelperIretGC += offDelta;
537 pVM->patm.s.pfnHelperJumpGC += offDelta;
538
539 pVM->patm.s.pbPatchHelpersRC += offDelta;
540
541 patmR3RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
542 }
543}
544
545
546/**
547 * Terminates the PATM.
548 *
549 * Termination means cleaning up and freeing all resources,
550 * the VM it self is at this point powered off or suspended.
551 *
552 * @returns VBox status code.
553 * @param pVM Pointer to the VM.
554 */
555VMMR3_INT_DECL(int) PATMR3Term(PVM pVM)
556{
557 if (HMIsEnabled(pVM))
558 return VINF_SUCCESS;
559
560 patmR3DbgTerm(pVM);
561
562 /* Memory was all allocated from the two MM heaps and requires no freeing. */
563 return VINF_SUCCESS;
564}
565
566
567/**
568 * PATM reset callback.
569 *
570 * @returns VBox status code.
571 * @param pVM The VM which is reset.
572 */
573VMMR3_INT_DECL(int) PATMR3Reset(PVM pVM)
574{
575 Log(("PATMR3Reset\n"));
576 if (HMIsEnabled(pVM))
577 return VINF_SUCCESS;
578
579 /* Free all patches. */
580 for (;;)
581 {
582 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
583 if (pPatchRec)
584 patmR3RemovePatch(pVM, pPatchRec, true);
585 else
586 break;
587 }
588 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
589 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
590 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
591 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
592
593 int rc = patmReinit(pVM);
594 if (RT_SUCCESS(rc))
595 rc = PATMR3InitFinalize(pVM); /* paranoia */
596
597 return rc;
598}
599
600/**
601 * @callback_method_impl{FNDISREADBYTES}
602 */
603static DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
604{
605 PATMDISASM *pDisInfo = (PATMDISASM *)pDis->pvUser;
606
607/** @todo change this to read more! */
608 /*
609 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
610 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
611 */
612 /** @todo could change in the future! */
613 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
614 {
615 size_t cbRead = cbMaxRead;
616 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
617 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
618 if (RT_SUCCESS(rc))
619 {
620 if (cbRead >= cbMinRead)
621 {
622 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
623 return VINF_SUCCESS;
624 }
625
626 cbMinRead -= (uint8_t)cbRead;
627 cbMaxRead -= (uint8_t)cbRead;
628 offInstr += (uint8_t)cbRead;
629 uSrcAddr += cbRead;
630 }
631
632#ifdef VBOX_STRICT
633 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
634 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
635 {
636 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr, NULL) == false);
637 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pDis->uInstrAddr + offInstr + cbMinRead-1, NULL) == false);
638 }
639#endif
640 }
641
642 int rc = VINF_SUCCESS;
643 RTGCPTR32 uSrcAddr = (RTGCPTR32)pDis->uInstrAddr + offInstr;
644 if ( !pDisInfo->pbInstrHC
645 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(uSrcAddr + cbMinRead - 1)
646 && !PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr)))
647 {
648 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr));
649 rc = PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
650 offInstr += cbMinRead;
651 }
652 else
653 {
654 /*
655 * pbInstrHC is the base address; adjust according to the GC pointer.
656 *
657 * Try read the max number of bytes here. Since the disassembler only
658 * ever uses these bytes for the current instruction, it doesn't matter
659 * much if we accidentally read the start of the next instruction even
660 * if it happens to be a patch jump or int3.
661 */
662 uint8_t const *pbInstrHC = pDisInfo->pbInstrHC; AssertPtr(pbInstrHC);
663 pbInstrHC += uSrcAddr - pDisInfo->pInstrGC;
664
665 size_t cbMaxRead1 = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
666 size_t cbMaxRead2 = PAGE_SIZE - ((uintptr_t)pbInstrHC & PAGE_OFFSET_MASK);
667 size_t cbToRead = RT_MIN(cbMaxRead1, RT_MAX(cbMaxRead2, cbMinRead));
668 if (cbToRead > cbMaxRead)
669 cbToRead = cbMaxRead;
670
671 memcpy(&pDis->abInstr[offInstr], pbInstrHC, cbToRead);
672 offInstr += (uint8_t)cbToRead;
673 }
674
675 pDis->cbCachedInstr = offInstr;
676 return rc;
677}
678
679
680DECLINLINE(bool) patmR3DisInstrToStr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
681 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
682{
683 PATMDISASM disinfo;
684 disinfo.pVM = pVM;
685 disinfo.pPatchInfo = pPatch;
686 disinfo.pbInstrHC = pbInstrHC;
687 disinfo.pInstrGC = InstrGCPtr32;
688 disinfo.fReadFlags = fReadFlags;
689 return RT_SUCCESS(DISInstrToStrWithReader(InstrGCPtr32,
690 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
691 patmReadBytes, &disinfo,
692 pCpu, pcbInstr, pszOutput, cbOutput));
693}
694
695
696DECLINLINE(bool) patmR3DisInstr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
697 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
698{
699 PATMDISASM disinfo;
700 disinfo.pVM = pVM;
701 disinfo.pPatchInfo = pPatch;
702 disinfo.pbInstrHC = pbInstrHC;
703 disinfo.pInstrGC = InstrGCPtr32;
704 disinfo.fReadFlags = fReadFlags;
705 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32,
706 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
707 patmReadBytes, &disinfo,
708 pCpu, pcbInstr));
709}
710
711
712DECLINLINE(bool) patmR3DisInstrNoStrOpMode(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC,
713 uint32_t fReadFlags,
714 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
715{
716 PATMDISASM disinfo;
717 disinfo.pVM = pVM;
718 disinfo.pPatchInfo = pPatch;
719 disinfo.pbInstrHC = pbInstrHC;
720 disinfo.pInstrGC = InstrGCPtr32;
721 disinfo.fReadFlags = fReadFlags;
722 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32, pPatch->uOpMode, patmReadBytes, &disinfo,
723 pCpu, pcbInstr));
724}
725
726#ifdef LOG_ENABLED
727# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
728 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_ORGCODE, a_szComment, " patch:")
729# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
730 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_RAWCODE, a_szComment, " patch:")
731
732# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) \
733 do { \
734 if (LogIsEnabled()) \
735 patmLogRawPatchInstr(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2); \
736 } while (0)
737
738static void patmLogRawPatchInstr(PVM pVM, PPATCHINFO pPatch, uint32_t fFlags,
739 const char *pszComment1, const char *pszComment2)
740{
741 DISCPUSTATE DisState;
742 char szOutput[128];
743 szOutput[0] = '\0';
744 patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC, NULL, fFlags,
745 &DisState, NULL, szOutput, sizeof(szOutput));
746 Log(("%s%s %s", pszComment1, pszComment2, szOutput));
747}
748
749#else
750# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
751# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
752# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) do { } while (0)
753#endif
754
755
756/**
757 * Callback function for RTAvloU32DoWithAll
758 *
759 * Updates all fixups in the patches
760 *
761 * @returns VBox status code.
762 * @param pNode Current node
763 * @param pParam Pointer to the VM.
764 */
765static DECLCALLBACK(int) patmR3RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
766{
767 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
768 PVM pVM = (PVM)pParam;
769 RTRCINTPTR delta;
770 int rc;
771
772 /* Nothing to do if the patch is not active. */
773 if (pPatch->patch.uState == PATCH_REFUSED)
774 return 0;
775
776 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
777 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Org patch jump:", "");
778
779 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
780 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
781
782 /*
783 * Apply fixups.
784 */
785 AVLPVKEY key = NULL;
786 for (;;)
787 {
788 /* Get the record that's closest from above (after or equal to key). */
789 PRELOCREC pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
790 if (!pRec)
791 break;
792
793 key = (uint8_t *)pRec->Core.Key + 1; /* search for the next record during the next round. */
794
795 switch (pRec->uType)
796 {
797 case FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL:
798 Assert(pRec->pDest == pRec->pSource); Assert(PATM_IS_ASMFIX(pRec->pSource));
799 Log(("Absolute patch template fixup type %#x at %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
800 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
801 break;
802
803 case FIXUP_ABSOLUTE:
804 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
805 if ( !pRec->pSource
806 || PATMIsPatchGCAddr(pVM, pRec->pSource))
807 {
808 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
809 }
810 else
811 {
812 uint8_t curInstr[15];
813 uint8_t oldInstr[15];
814 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
815
816 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
817
818 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
819 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
820
821 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
822 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
823
824 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
825
826 if ( rc == VERR_PAGE_NOT_PRESENT
827 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
828 {
829 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
830
831 Log(("PATM: Patch page not present -> check later!\n"));
832 rc = PGMR3HandlerVirtualRegister(pVM, VMMGetCpu(pVM), pVM->patm.s.hMonitorPageType,
833 pPage,
834 pPage + (PAGE_SIZE - 1) /* inclusive! */,
835 (void *)(uintptr_t)pPage, pPage, NULL /*pszDesc*/);
836 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
837 }
838 else
839 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
840 {
841 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
842 /*
843 * Disable patch; this is not a good solution
844 */
845 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
846 pPatch->patch.uState = PATCH_DISABLED;
847 }
848 else
849 if (RT_SUCCESS(rc))
850 {
851 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
852 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
853 AssertRC(rc);
854 }
855 }
856 break;
857
858 case FIXUP_REL_JMPTOPATCH:
859 {
860 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
861
862 if ( pPatch->patch.uState == PATCH_ENABLED
863 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
864 {
865 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
866 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
867 RTRCPTR pJumpOffGC;
868 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
869 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
870
871#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
872 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
873#else
874 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
875#endif
876
877 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
878#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
879 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
880 {
881 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
882
883 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
884 oldJump[0] = pPatch->patch.aPrivInstr[0];
885 oldJump[1] = pPatch->patch.aPrivInstr[1];
886 *(RTRCUINTPTR *)&oldJump[2] = displOld;
887 }
888 else
889#endif
890 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
891 {
892 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
893 oldJump[0] = 0xE9;
894 *(RTRCUINTPTR *)&oldJump[1] = displOld;
895 }
896 else
897 {
898 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
899 continue; //this should never happen!!
900 }
901 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
902
903 /*
904 * Read old patch jump and compare it to the one we previously installed
905 */
906 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
907 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
908
909 if ( rc == VERR_PAGE_NOT_PRESENT
910 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
911 {
912 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
913 Log(("PATM: Patch page not present -> check later!\n"));
914 rc = PGMR3HandlerVirtualRegister(pVM, VMMGetCpu(pVM), pVM->patm.s.hMonitorPageType,
915 pPage,
916 pPage + (PAGE_SIZE - 1) /* inclusive! */,
917 (void *)(uintptr_t)pPage, pPage, NULL /*pszDesc*/);
918 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
919 }
920 else
921 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
922 {
923 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
924 /*
925 * Disable patch; this is not a good solution
926 */
927 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
928 pPatch->patch.uState = PATCH_DISABLED;
929 }
930 else
931 if (RT_SUCCESS(rc))
932 {
933 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
934 AssertRC(rc);
935 }
936 else
937 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
938 }
939 else
940 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
941
942 pRec->pDest = pTarget;
943 break;
944 }
945
946 case FIXUP_REL_JMPTOGUEST:
947 {
948 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
949 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
950
951 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
952 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
953 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
954 pRec->pSource = pSource;
955 break;
956 }
957
958 case FIXUP_REL_HELPER_IN_PATCH_ASM_TMPL:
959 case FIXUP_CONSTANT_IN_PATCH_ASM_TMPL:
960 /* Only applicable when loading state. */
961 Assert(pRec->pDest == pRec->pSource);
962 Assert(PATM_IS_ASMFIX(pRec->pSource));
963 break;
964
965 default:
966 AssertMsg(0, ("Invalid fixup type!!\n"));
967 return VERR_INVALID_PARAMETER;
968 }
969 }
970
971 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
972 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Rel patch jump:", "");
973 return 0;
974}
975
976/**
977 * \#PF Handler callback for virtual access handler ranges.
978 *
979 * Important to realize that a physical page in a range can have aliases, and
980 * for ALL and WRITE handlers these will also trigger.
981 *
982 * @returns VINF_SUCCESS if the handler have carried out the operation.
983 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
984 * @param pVM Pointer to the VM.
985 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
986 * @param pvPtr The HC mapping of that address.
987 * @param pvBuf What the guest is reading/writing.
988 * @param cbBuf How much it's reading/writing.
989 * @param enmAccessType The access type.
990 * @param pvUser User argument.
991 */
992DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
993 PGMACCESSTYPE enmAccessType, void *pvUser)
994{
995 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
996 NOREF(pvPtr); NOREF(pvBuf); NOREF(cbBuf); NOREF(pvUser);
997
998 /** @todo could be the wrong virtual address (alias) */
999 pVM->patm.s.pvFaultMonitor = GCPtr;
1000 PATMR3HandleMonitoredPage(pVM);
1001 return VINF_PGM_HANDLER_DO_DEFAULT;
1002}
1003
1004#ifdef VBOX_WITH_DEBUGGER
1005
1006/**
1007 * Callback function for RTAvloU32DoWithAll
1008 *
1009 * Enables the patch that's being enumerated
1010 *
1011 * @returns 0 (continue enumeration).
1012 * @param pNode Current node
1013 * @param pVM Pointer to the VM.
1014 */
1015static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
1016{
1017 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
1018
1019 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
1020 return 0;
1021}
1022
1023
1024/**
1025 * Callback function for RTAvloU32DoWithAll
1026 *
1027 * Disables the patch that's being enumerated
1028 *
1029 * @returns 0 (continue enumeration).
1030 * @param pNode Current node
1031 * @param pVM Pointer to the VM.
1032 */
1033static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
1034{
1035 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
1036
1037 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
1038 return 0;
1039}
1040
1041#endif /* VBOX_WITH_DEBUGGER */
1042
1043/**
1044 * Returns the host context pointer of the GC context structure
1045 *
1046 * @returns VBox status code.
1047 * @param pVM Pointer to the VM.
1048 */
1049VMMR3_INT_DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
1050{
1051 AssertReturn(!HMIsEnabled(pVM), NULL);
1052 return pVM->patm.s.pGCStateHC;
1053}
1054
1055
1056/**
1057 * Allows or disallow patching of privileged instructions executed by the guest OS
1058 *
1059 * @returns VBox status code.
1060 * @param pUVM The user mode VM handle.
1061 * @param fAllowPatching Allow/disallow patching
1062 */
1063VMMR3DECL(int) PATMR3AllowPatching(PUVM pUVM, bool fAllowPatching)
1064{
1065 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1066 PVM pVM = pUVM->pVM;
1067 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1068
1069 if (!HMIsEnabled(pVM))
1070 pVM->fPATMEnabled = fAllowPatching;
1071 else
1072 Assert(!pVM->fPATMEnabled);
1073 return VINF_SUCCESS;
1074}
1075
1076
1077/**
1078 * Checks if the patch manager is enabled or not.
1079 *
1080 * @returns true if enabled, false if not (or if invalid handle).
1081 * @param pUVM The user mode VM handle.
1082 */
1083VMMR3DECL(bool) PATMR3IsEnabled(PUVM pUVM)
1084{
1085 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
1086 PVM pVM = pUVM->pVM;
1087 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
1088 return PATMIsEnabled(pVM);
1089}
1090
1091
1092/**
1093 * Convert a GC patch block pointer to a HC patch pointer
1094 *
1095 * @returns HC pointer or NULL if it's not a GC patch pointer
1096 * @param pVM Pointer to the VM.
1097 * @param pAddrGC GC pointer
1098 */
1099VMMR3_INT_DECL(void *) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
1100{
1101 AssertReturn(!HMIsEnabled(pVM), NULL);
1102 RTRCUINTPTR offPatch = (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC;
1103 if (offPatch >= pVM->patm.s.cbPatchMem)
1104 {
1105 offPatch = (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC;
1106 if (offPatch >= pVM->patm.s.cbPatchHelpers)
1107 return NULL;
1108 return pVM->patm.s.pbPatchHelpersR3 + offPatch;
1109 }
1110 return pVM->patm.s.pPatchMemHC + offPatch;
1111}
1112
1113
1114/**
1115 * Convert guest context address to host context pointer
1116 *
1117 * @returns VBox status code.
1118 * @param pVM Pointer to the VM.
1119 * @param pCacheRec Address conversion cache record
1120 * @param pGCPtr Guest context pointer
1121 *
1122 * @returns Host context pointer or NULL in case of an error
1123 *
1124 */
1125R3PTRTYPE(uint8_t *) patmR3GCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
1126{
1127 int rc;
1128 R3PTRTYPE(uint8_t *) pHCPtr;
1129 uint32_t offset;
1130
1131 offset = (RTRCUINTPTR)pGCPtr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC;
1132 if (offset < pVM->patm.s.cbPatchMem)
1133 {
1134#ifdef VBOX_STRICT
1135 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1136 Assert(pPatch); Assert(offset - pPatch->pPatchBlockOffset < pPatch->cbPatchBlockSize);
1137#endif
1138 return pVM->patm.s.pPatchMemHC + offset;
1139 }
1140 /* Note! We're _not_ including the patch helpers here. */
1141
1142 offset = pGCPtr & PAGE_OFFSET_MASK;
1143 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1144 return pCacheRec->pPageLocStartHC + offset;
1145
1146 /* Release previous lock if any. */
1147 if (pCacheRec->Lock.pvMap)
1148 {
1149 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1150 pCacheRec->Lock.pvMap = NULL;
1151 }
1152
1153 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1154 if (rc != VINF_SUCCESS)
1155 {
1156 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1157 return NULL;
1158 }
1159 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1160 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1161 return pHCPtr;
1162}
1163
1164
1165/**
1166 * Calculates and fills in all branch targets
1167 *
1168 * @returns VBox status code.
1169 * @param pVM Pointer to the VM.
1170 * @param pPatch Current patch block pointer
1171 *
1172 */
1173static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1174{
1175 int32_t displ;
1176
1177 PJUMPREC pRec = 0;
1178 unsigned nrJumpRecs = 0;
1179
1180 /*
1181 * Set all branch targets inside the patch block.
1182 * We remove all jump records as they are no longer needed afterwards.
1183 */
1184 while (true)
1185 {
1186 RCPTRTYPE(uint8_t *) pInstrGC;
1187 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1188
1189 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1190 if (pRec == 0)
1191 break;
1192
1193 nrJumpRecs++;
1194
1195 /* HC in patch block to GC in patch block. */
1196 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1197
1198 if (pRec->opcode == OP_CALL)
1199 {
1200 /* Special case: call function replacement patch from this patch block.
1201 */
1202 PPATMPATCHREC pFunctionRec = patmQueryFunctionPatch(pVM, pRec->pTargetGC);
1203 if (!pFunctionRec)
1204 {
1205 int rc;
1206
1207 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1208 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1209 else
1210 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1211
1212 if (RT_FAILURE(rc))
1213 {
1214 uint8_t *pPatchHC;
1215 RTRCPTR pPatchGC;
1216 RTRCPTR pOrgInstrGC;
1217
1218 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1219 Assert(pOrgInstrGC);
1220
1221 /* Failure for some reason -> mark exit point with int 3. */
1222 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1223
1224 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1225 Assert(pPatchGC);
1226
1227 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1228
1229 /* Set a breakpoint at the very beginning of the recompiled instruction */
1230 *pPatchHC = 0xCC;
1231
1232 continue;
1233 }
1234 }
1235 else
1236 {
1237 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1238 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1239 }
1240
1241 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1242 }
1243 else
1244 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1245
1246 if (pBranchTargetGC == 0)
1247 {
1248 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1249 return VERR_PATCHING_REFUSED;
1250 }
1251 /* Our jumps *always* have a dword displacement (to make things easier). */
1252 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1253 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1254 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1255 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1256 }
1257 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1258 Assert(pPatch->JumpTree == 0);
1259 return VINF_SUCCESS;
1260}
1261
1262/**
1263 * Add an illegal instruction record
1264 *
1265 * @param pVM Pointer to the VM.
1266 * @param pPatch Patch structure ptr
1267 * @param pInstrGC Guest context pointer to privileged instruction
1268 *
1269 */
1270static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1271{
1272 PAVLPVNODECORE pRec;
1273
1274 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1275 Assert(pRec);
1276 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1277
1278 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1279 Assert(ret); NOREF(ret);
1280 pPatch->pTempInfo->nrIllegalInstr++;
1281}
1282
1283static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1284{
1285 PAVLPVNODECORE pRec;
1286
1287 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1288 if (pRec)
1289 return true;
1290 else
1291 return false;
1292}
1293
1294/**
1295 * Add a patch to guest lookup record
1296 *
1297 * @param pVM Pointer to the VM.
1298 * @param pPatch Patch structure ptr
1299 * @param pPatchInstrHC Guest context pointer to patch block
1300 * @param pInstrGC Guest context pointer to privileged instruction
1301 * @param enmType Lookup type
1302 * @param fDirty Dirty flag
1303 *
1304 * @note Be extremely careful with this function. Make absolutely sure the guest
1305 * address is correct! (to avoid executing instructions twice!)
1306 */
1307void patmR3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1308{
1309 bool ret;
1310 PRECPATCHTOGUEST pPatchToGuestRec;
1311 PRECGUESTTOPATCH pGuestToPatchRec;
1312 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1313
1314 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1315 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1316
1317 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1318 {
1319 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1320 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1321 return; /* already there */
1322
1323 Assert(!pPatchToGuestRec);
1324 }
1325#ifdef VBOX_STRICT
1326 else
1327 {
1328 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1329 Assert(!pPatchToGuestRec);
1330 }
1331#endif
1332
1333 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1334 Assert(pPatchToGuestRec);
1335 pPatchToGuestRec->Core.Key = PatchOffset;
1336 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1337 pPatchToGuestRec->enmType = enmType;
1338 pPatchToGuestRec->fDirty = fDirty;
1339
1340 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1341 Assert(ret);
1342
1343 /* GC to patch address */
1344 if (enmType == PATM_LOOKUP_BOTHDIR)
1345 {
1346 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1347 if (!pGuestToPatchRec)
1348 {
1349 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1350 pGuestToPatchRec->Core.Key = pInstrGC;
1351 pGuestToPatchRec->PatchOffset = PatchOffset;
1352
1353 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1354 Assert(ret);
1355 }
1356 }
1357
1358 pPatch->nrPatch2GuestRecs++;
1359}
1360
1361
1362/**
1363 * Removes a patch to guest lookup record
1364 *
1365 * @param pVM Pointer to the VM.
1366 * @param pPatch Patch structure ptr
1367 * @param pPatchInstrGC Guest context pointer to patch block
1368 */
1369void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1370{
1371 PAVLU32NODECORE pNode;
1372 PAVLU32NODECORE pNode2;
1373 PRECPATCHTOGUEST pPatchToGuestRec;
1374 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1375
1376 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1377 Assert(pPatchToGuestRec);
1378 if (pPatchToGuestRec)
1379 {
1380 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1381 {
1382 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1383
1384 Assert(pGuestToPatchRec->Core.Key);
1385 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1386 Assert(pNode2);
1387 }
1388 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1389 Assert(pNode);
1390
1391 MMR3HeapFree(pPatchToGuestRec);
1392 pPatch->nrPatch2GuestRecs--;
1393 }
1394}
1395
1396
1397/**
1398 * RTAvlPVDestroy callback.
1399 */
1400static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1401{
1402 MMR3HeapFree(pNode);
1403 return 0;
1404}
1405
1406/**
1407 * Empty the specified tree (PV tree, MMR3 heap)
1408 *
1409 * @param pVM Pointer to the VM.
1410 * @param ppTree Tree to empty
1411 */
1412static void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1413{
1414 NOREF(pVM);
1415 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1416}
1417
1418
1419/**
1420 * RTAvlU32Destroy callback.
1421 */
1422static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1423{
1424 MMR3HeapFree(pNode);
1425 return 0;
1426}
1427
1428/**
1429 * Empty the specified tree (U32 tree, MMR3 heap)
1430 *
1431 * @param pVM Pointer to the VM.
1432 * @param ppTree Tree to empty
1433 */
1434static void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1435{
1436 NOREF(pVM);
1437 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1438}
1439
1440
1441/**
1442 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1443 *
1444 * @returns VBox status code.
1445 * @param pVM Pointer to the VM.
1446 * @param pCpu CPU disassembly state
1447 * @param pInstrGC Guest context pointer to privileged instruction
1448 * @param pCurInstrGC Guest context pointer to the current instruction
1449 * @param pCacheRec Cache record ptr
1450 *
1451 */
1452static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1453{
1454 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1455 bool fIllegalInstr = false;
1456
1457 /*
1458 * Preliminary heuristics:
1459 *- no call instructions without a fixed displacement between cli and sti/popf
1460 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1461 *- no nested pushf/cli
1462 *- sti/popf should be the (eventual) target of all branches
1463 *- no near or far returns; no int xx, no into
1464 *
1465 * Note: Later on we can impose less stricter guidelines if the need arises
1466 */
1467
1468 /* Bail out if the patch gets too big. */
1469 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1470 {
1471 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1472 fIllegalInstr = true;
1473 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1474 }
1475 else
1476 {
1477 /* No unconditional jumps or calls without fixed displacements. */
1478 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1479 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1480 )
1481 {
1482 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1483 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1484 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1485 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1486 )
1487 {
1488 fIllegalInstr = true;
1489 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1490 }
1491 }
1492
1493 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1494 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->uOpcode == OP_JMP)
1495 {
1496 if ( pCurInstrGC > pPatch->pPrivInstrGC
1497 && pCurInstrGC + pCpu->cbInstr < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1498 {
1499 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1500 /* We turn this one into a int 3 callable patch. */
1501 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1502 }
1503 }
1504 else
1505 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1506 if (pPatch->opcode == OP_PUSHF)
1507 {
1508 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->uOpcode == OP_PUSHF)
1509 {
1510 fIllegalInstr = true;
1511 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1512 }
1513 }
1514
1515 /* no far returns */
1516 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1517 {
1518 pPatch->pTempInfo->nrRetInstr++;
1519 fIllegalInstr = true;
1520 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1521 }
1522 else if ( pCpu->pCurInstr->uOpcode == OP_INT3
1523 || pCpu->pCurInstr->uOpcode == OP_INT
1524 || pCpu->pCurInstr->uOpcode == OP_INTO)
1525 {
1526 /* No int xx or into either. */
1527 fIllegalInstr = true;
1528 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1529 }
1530 }
1531
1532 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1533
1534 /* Illegal instruction -> end of analysis phase for this code block */
1535 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1536 return VINF_SUCCESS;
1537
1538 /* Check for exit points. */
1539 switch (pCpu->pCurInstr->uOpcode)
1540 {
1541 case OP_SYSEXIT:
1542 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1543
1544 case OP_SYSENTER:
1545 case OP_ILLUD2:
1546 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1547 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1548 return VINF_SUCCESS;
1549
1550 case OP_STI:
1551 case OP_POPF:
1552 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1553 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1554 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1555 {
1556 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1557 return VERR_PATCHING_REFUSED;
1558 }
1559 if (pPatch->opcode == OP_PUSHF)
1560 {
1561 if (pCpu->pCurInstr->uOpcode == OP_POPF)
1562 {
1563 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1564 return VINF_SUCCESS;
1565
1566 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1567 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1568 pPatch->flags |= PATMFL_CHECK_SIZE;
1569 }
1570 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1571 }
1572 /* else: fall through. */
1573 case OP_RETN: /* exit point for function replacement */
1574 return VINF_SUCCESS;
1575
1576 case OP_IRET:
1577 return VINF_SUCCESS; /* exitpoint */
1578
1579 case OP_CPUID:
1580 case OP_CALL:
1581 case OP_JMP:
1582 break;
1583
1584#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1585 case OP_STR:
1586 break;
1587#endif
1588
1589 default:
1590 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1591 {
1592 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1593 return VINF_SUCCESS; /* exit point */
1594 }
1595 break;
1596 }
1597
1598 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1599 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW))
1600 {
1601 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1602 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->cbInstr));
1603 return VINF_SUCCESS;
1604 }
1605
1606 return VWRN_CONTINUE_ANALYSIS;
1607}
1608
1609/**
1610 * Analyses the instructions inside a function for compliance
1611 *
1612 * @returns VBox status code.
1613 * @param pVM Pointer to the VM.
1614 * @param pCpu CPU disassembly state
1615 * @param pInstrGC Guest context pointer to privileged instruction
1616 * @param pCurInstrGC Guest context pointer to the current instruction
1617 * @param pCacheRec Cache record ptr
1618 *
1619 */
1620static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1621{
1622 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1623 bool fIllegalInstr = false;
1624 NOREF(pInstrGC);
1625
1626 //Preliminary heuristics:
1627 //- no call instructions
1628 //- ret ends a block
1629
1630 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1631
1632 // bail out if the patch gets too big
1633 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1634 {
1635 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1636 fIllegalInstr = true;
1637 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1638 }
1639 else
1640 {
1641 // no unconditional jumps or calls without fixed displacements
1642 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1643 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1644 )
1645 {
1646 Assert(pCpu->Param1.cb <= 4 || pCpu->Param1.cb == 6);
1647 if ( pCpu->Param1.cb == 6 /* far call/jmp */
1648 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1649 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1650 )
1651 {
1652 fIllegalInstr = true;
1653 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1654 }
1655 }
1656 else /* no far returns */
1657 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1658 {
1659 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1660 fIllegalInstr = true;
1661 }
1662 else /* no int xx or into either */
1663 if (pCpu->pCurInstr->uOpcode == OP_INT3 || pCpu->pCurInstr->uOpcode == OP_INT || pCpu->pCurInstr->uOpcode == OP_INTO)
1664 {
1665 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1666 fIllegalInstr = true;
1667 }
1668
1669 #if 0
1670 ///@todo we can handle certain in/out and privileged instructions in the guest context
1671 if (pCpu->pCurInstr->fOpType & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr->uOpcode != OP_STI)
1672 {
1673 Log(("Illegal instructions for function patch!!\n"));
1674 return VERR_PATCHING_REFUSED;
1675 }
1676 #endif
1677 }
1678
1679 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1680
1681 /* Illegal instruction -> end of analysis phase for this code block */
1682 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1683 {
1684 return VINF_SUCCESS;
1685 }
1686
1687 // Check for exit points
1688 switch (pCpu->pCurInstr->uOpcode)
1689 {
1690 case OP_ILLUD2:
1691 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1692 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1693 return VINF_SUCCESS;
1694
1695 case OP_IRET:
1696 case OP_SYSEXIT: /* will fault or emulated in GC */
1697 case OP_RETN:
1698 return VINF_SUCCESS;
1699
1700#ifdef VBOX_WITH_SAFE_STR /** @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */
1701 case OP_STR:
1702 break;
1703#endif
1704
1705 case OP_POPF:
1706 case OP_STI:
1707 return VWRN_CONTINUE_ANALYSIS;
1708 default:
1709 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1710 {
1711 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1712 return VINF_SUCCESS; /* exit point */
1713 }
1714 return VWRN_CONTINUE_ANALYSIS;
1715 }
1716
1717 return VWRN_CONTINUE_ANALYSIS;
1718}
1719
1720/**
1721 * Recompiles the instructions in a code block
1722 *
1723 * @returns VBox status code.
1724 * @param pVM Pointer to the VM.
1725 * @param pCpu CPU disassembly state
1726 * @param pInstrGC Guest context pointer to privileged instruction
1727 * @param pCurInstrGC Guest context pointer to the current instruction
1728 * @param pCacheRec Cache record ptr
1729 *
1730 */
1731static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1732{
1733 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1734 int rc = VINF_SUCCESS;
1735 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1736
1737 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1738
1739 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1740 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1741 {
1742 /*
1743 * Been there, done that; so insert a jump (we don't want to duplicate code)
1744 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1745 */
1746 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1747 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1748 }
1749
1750 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1751 {
1752 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1753 }
1754 else
1755 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1756
1757 if (RT_FAILURE(rc))
1758 return rc;
1759
1760 /* Note: Never do a direct return unless a failure is encountered! */
1761
1762 /* Clear recompilation of next instruction flag; we are doing that right here. */
1763 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1764 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1765
1766 /* Add lookup record for patch to guest address translation */
1767 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1768
1769 /* Update lowest and highest instruction address for this patch */
1770 if (pCurInstrGC < pPatch->pInstrGCLowest)
1771 pPatch->pInstrGCLowest = pCurInstrGC;
1772 else
1773 if (pCurInstrGC > pPatch->pInstrGCHighest)
1774 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->cbInstr;
1775
1776 /* Illegal instruction -> end of recompile phase for this code block. */
1777 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1778 {
1779 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1780 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1781 goto end;
1782 }
1783
1784 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1785 * Indirect calls are handled below.
1786 */
1787 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1788 && (pCpu->pCurInstr->uOpcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1789 && (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J))
1790 {
1791 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1792 if (pTargetGC == 0)
1793 {
1794 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
1795 return VERR_PATCHING_REFUSED;
1796 }
1797
1798 if (pCpu->pCurInstr->uOpcode == OP_CALL)
1799 {
1800 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1801 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1802 if (RT_FAILURE(rc))
1803 goto end;
1804 }
1805 else
1806 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->uOpcode, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1807
1808 if (RT_SUCCESS(rc))
1809 rc = VWRN_CONTINUE_RECOMPILE;
1810
1811 goto end;
1812 }
1813
1814 switch (pCpu->pCurInstr->uOpcode)
1815 {
1816 case OP_CLI:
1817 {
1818 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1819 * until we've found the proper exit point(s).
1820 */
1821 if ( pCurInstrGC != pInstrGC
1822 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1823 )
1824 {
1825 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1826 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1827 }
1828 /* Set by irq inhibition; no longer valid now. */
1829 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1830
1831 rc = patmPatchGenCli(pVM, pPatch);
1832 if (RT_SUCCESS(rc))
1833 rc = VWRN_CONTINUE_RECOMPILE;
1834 break;
1835 }
1836
1837 case OP_MOV:
1838 if (pCpu->pCurInstr->fOpType & DISOPTYPE_POTENTIALLY_DANGEROUS)
1839 {
1840 /* mov ss, src? */
1841 if ( (pCpu->Param1.fUse & DISUSE_REG_SEG)
1842 && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS))
1843 {
1844 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1845 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1846 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1847 }
1848#if 0 /* necessary for Haiku */
1849 else
1850 if ( (pCpu->Param2.fUse & DISUSE_REG_SEG)
1851 && (pCpu->Param2.Base.idxSegReg == USE_REG_SS)
1852 && (pCpu->Param1.fUse & (DISUSE_REG_GEN32|DISUSE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1853 {
1854 /* mov GPR, ss */
1855 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1856 if (RT_SUCCESS(rc))
1857 rc = VWRN_CONTINUE_RECOMPILE;
1858 break;
1859 }
1860#endif
1861 }
1862 goto duplicate_instr;
1863
1864 case OP_POP:
1865 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1866 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS)
1867 {
1868 Assert(pCpu->pCurInstr->fOpType & DISOPTYPE_INHIBIT_IRQS);
1869
1870 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1871 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1872 }
1873 goto duplicate_instr;
1874
1875 case OP_STI:
1876 {
1877 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1878
1879 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1880 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1881 {
1882 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1883 fInhibitIRQInstr = true;
1884 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1885 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1886 }
1887 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1888
1889 if (RT_SUCCESS(rc))
1890 {
1891 DISCPUSTATE cpu = *pCpu;
1892 unsigned cbInstr;
1893 int disret;
1894 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1895
1896 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1897
1898 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1899 { /* Force pNextInstrHC out of scope after using it */
1900 uint8_t *pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1901 if (pNextInstrHC == NULL)
1902 {
1903 AssertFailed();
1904 return VERR_PATCHING_REFUSED;
1905 }
1906
1907 // Disassemble the next instruction
1908 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
1909 }
1910 if (disret == false)
1911 {
1912 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1913 return VERR_PATCHING_REFUSED;
1914 }
1915 pReturnInstrGC = pNextInstrGC + cbInstr;
1916
1917 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1918 || pReturnInstrGC <= pInstrGC
1919 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1920 )
1921 {
1922 /* Not an exit point for function duplication patches */
1923 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1924 && RT_SUCCESS(rc))
1925 {
1926 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1927 rc = VWRN_CONTINUE_RECOMPILE;
1928 }
1929 else
1930 rc = VINF_SUCCESS; //exit point
1931 }
1932 else {
1933 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1934 rc = VERR_PATCHING_REFUSED; //not allowed!!
1935 }
1936 }
1937 break;
1938 }
1939
1940 case OP_POPF:
1941 {
1942 bool fGenerateJmpBack = (pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32);
1943
1944 /* Not an exit point for IDT handler or function replacement patches */
1945 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1946 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1947 fGenerateJmpBack = false;
1948
1949 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->cbInstr, !!(pCpu->fPrefix & DISPREFIX_OPSIZE), fGenerateJmpBack);
1950 if (RT_SUCCESS(rc))
1951 {
1952 if (fGenerateJmpBack == false)
1953 {
1954 /* Not an exit point for IDT handler or function replacement patches */
1955 rc = VWRN_CONTINUE_RECOMPILE;
1956 }
1957 else
1958 {
1959 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1960 rc = VINF_SUCCESS; /* exit point! */
1961 }
1962 }
1963 break;
1964 }
1965
1966 case OP_PUSHF:
1967 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1968 if (RT_SUCCESS(rc))
1969 rc = VWRN_CONTINUE_RECOMPILE;
1970 break;
1971
1972 case OP_PUSH:
1973 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1974 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS)
1975 {
1976 rc = patmPatchGenPushCS(pVM, pPatch);
1977 if (RT_SUCCESS(rc))
1978 rc = VWRN_CONTINUE_RECOMPILE;
1979 break;
1980 }
1981 goto duplicate_instr;
1982
1983 case OP_IRET:
1984 Log(("IRET at %RRv\n", pCurInstrGC));
1985 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1986 if (RT_SUCCESS(rc))
1987 {
1988 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1989 rc = VINF_SUCCESS; /* exit point by definition */
1990 }
1991 break;
1992
1993 case OP_ILLUD2:
1994 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1995 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1996 if (RT_SUCCESS(rc))
1997 rc = VINF_SUCCESS; /* exit point by definition */
1998 Log(("Illegal opcode (0xf 0xb)\n"));
1999 break;
2000
2001 case OP_CPUID:
2002 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
2003 if (RT_SUCCESS(rc))
2004 rc = VWRN_CONTINUE_RECOMPILE;
2005 break;
2006
2007 case OP_STR:
2008#ifdef VBOX_WITH_SAFE_STR /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table and move OP_STR into #ifndef */
2009 /* Now safe because our shadow TR entry is identical to the guest's. */
2010 goto duplicate_instr;
2011#endif
2012 case OP_SLDT:
2013 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
2014 if (RT_SUCCESS(rc))
2015 rc = VWRN_CONTINUE_RECOMPILE;
2016 break;
2017
2018 case OP_SGDT:
2019 case OP_SIDT:
2020 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
2021 if (RT_SUCCESS(rc))
2022 rc = VWRN_CONTINUE_RECOMPILE;
2023 break;
2024
2025 case OP_RETN:
2026 /* retn is an exit point for function patches */
2027 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
2028 if (RT_SUCCESS(rc))
2029 rc = VINF_SUCCESS; /* exit point by definition */
2030 break;
2031
2032 case OP_SYSEXIT:
2033 /* Duplicate it, so it can be emulated in GC (or fault). */
2034 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2035 if (RT_SUCCESS(rc))
2036 rc = VINF_SUCCESS; /* exit point by definition */
2037 break;
2038
2039 case OP_CALL:
2040 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2041 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2042 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2043 */
2044 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2045 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far calls! */)
2046 {
2047 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
2048 if (RT_SUCCESS(rc))
2049 {
2050 rc = VWRN_CONTINUE_RECOMPILE;
2051 }
2052 break;
2053 }
2054 goto gen_illegal_instr;
2055
2056 case OP_JMP:
2057 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
2058 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
2059 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
2060 */
2061 Assert(pCpu->Param1.cb == 4 || pCpu->Param1.cb == 6);
2062 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->Param1.cb == 4 /* no far jumps! */)
2063 {
2064 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
2065 if (RT_SUCCESS(rc))
2066 rc = VINF_SUCCESS; /* end of branch */
2067 break;
2068 }
2069 goto gen_illegal_instr;
2070
2071 case OP_INT3:
2072 case OP_INT:
2073 case OP_INTO:
2074 goto gen_illegal_instr;
2075
2076 case OP_MOV_DR:
2077 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2078 if (pCpu->pCurInstr->fParam2 == OP_PARM_Dd)
2079 {
2080 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
2081 if (RT_SUCCESS(rc))
2082 rc = VWRN_CONTINUE_RECOMPILE;
2083 break;
2084 }
2085 goto duplicate_instr;
2086
2087 case OP_MOV_CR:
2088 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
2089 if (pCpu->pCurInstr->fParam2 == OP_PARM_Cd)
2090 {
2091 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
2092 if (RT_SUCCESS(rc))
2093 rc = VWRN_CONTINUE_RECOMPILE;
2094 break;
2095 }
2096 goto duplicate_instr;
2097
2098 default:
2099 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW | DISOPTYPE_PRIVILEGED_NOTRAP))
2100 {
2101gen_illegal_instr:
2102 rc = patmPatchGenIllegalInstr(pVM, pPatch);
2103 if (RT_SUCCESS(rc))
2104 rc = VINF_SUCCESS; /* exit point by definition */
2105 }
2106 else
2107 {
2108duplicate_instr:
2109 Log(("patmPatchGenDuplicate\n"));
2110 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2111 if (RT_SUCCESS(rc))
2112 rc = VWRN_CONTINUE_RECOMPILE;
2113 }
2114 break;
2115 }
2116
2117end:
2118
2119 if ( !fInhibitIRQInstr
2120 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2121 {
2122 int rc2;
2123 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2124
2125 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2126 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
2127 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
2128 {
2129 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
2130
2131 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2132 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
2133 rc = VINF_SUCCESS; /* end of the line */
2134 }
2135 else
2136 {
2137 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
2138 }
2139 if (RT_FAILURE(rc2))
2140 rc = rc2;
2141 }
2142
2143 if (RT_SUCCESS(rc))
2144 {
2145 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
2146 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
2147 && pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32
2148 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
2149 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
2150 )
2151 {
2152 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2153
2154 // The end marker for this kind of patch is any instruction at a location outside our patch jump
2155 Log(("patmRecompileCallback: end found for single instruction patch at %RRv cbInstr %d\n", pNextInstrGC, pCpu->cbInstr));
2156
2157 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2158 AssertRC(rc);
2159 }
2160 }
2161 return rc;
2162}
2163
2164
2165#ifdef LOG_ENABLED
2166
2167/**
2168 * Add a disasm jump record (temporary for prevent duplicate analysis)
2169 *
2170 * @param pVM Pointer to the VM.
2171 * @param pPatch Patch structure ptr
2172 * @param pInstrGC Guest context pointer to privileged instruction
2173 *
2174 */
2175static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2176{
2177 PAVLPVNODECORE pRec;
2178
2179 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2180 Assert(pRec);
2181 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2182
2183 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2184 Assert(ret);
2185}
2186
2187/**
2188 * Checks if jump target has been analysed before.
2189 *
2190 * @returns VBox status code.
2191 * @param pPatch Patch struct
2192 * @param pInstrGC Jump target
2193 *
2194 */
2195static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2196{
2197 PAVLPVNODECORE pRec;
2198
2199 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2200 if (pRec)
2201 return true;
2202 return false;
2203}
2204
2205/**
2206 * For proper disassembly of the final patch block
2207 *
2208 * @returns VBox status code.
2209 * @param pVM Pointer to the VM.
2210 * @param pCpu CPU disassembly state
2211 * @param pInstrGC Guest context pointer to privileged instruction
2212 * @param pCurInstrGC Guest context pointer to the current instruction
2213 * @param pCacheRec Cache record ptr
2214 *
2215 */
2216int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2217{
2218 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2219 NOREF(pInstrGC);
2220
2221 if (pCpu->pCurInstr->uOpcode == OP_INT3)
2222 {
2223 /* Could be an int3 inserted in a call patch. Check to be sure */
2224 DISCPUSTATE cpu;
2225 RTRCPTR pOrgJumpGC;
2226
2227 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2228
2229 { /* Force pOrgJumpHC out of scope after using it */
2230 uint8_t *pOrgJumpHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2231
2232 bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL);
2233 if (!disret || cpu.pCurInstr->uOpcode != OP_CALL || cpu.Param1.cb != 4 /* only near calls */)
2234 return VINF_SUCCESS;
2235 }
2236 return VWRN_CONTINUE_ANALYSIS;
2237 }
2238
2239 if ( pCpu->pCurInstr->uOpcode == OP_ILLUD2
2240 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2241 {
2242 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2243 return VWRN_CONTINUE_ANALYSIS;
2244 }
2245
2246 if ( (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2247 || pCpu->pCurInstr->uOpcode == OP_INT
2248 || pCpu->pCurInstr->uOpcode == OP_IRET
2249 || pCpu->pCurInstr->uOpcode == OP_RETN
2250 || pCpu->pCurInstr->uOpcode == OP_RETF
2251 )
2252 {
2253 return VINF_SUCCESS;
2254 }
2255
2256 if (pCpu->pCurInstr->uOpcode == OP_ILLUD2)
2257 return VINF_SUCCESS;
2258
2259 return VWRN_CONTINUE_ANALYSIS;
2260}
2261
2262
2263/**
2264 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2265 *
2266 * @returns VBox status code.
2267 * @param pVM Pointer to the VM.
2268 * @param pInstrGC Guest context pointer to the initial privileged instruction
2269 * @param pCurInstrGC Guest context pointer to the current instruction
2270 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2271 * @param pCacheRec Cache record ptr
2272 *
2273 */
2274int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2275{
2276 DISCPUSTATE cpu;
2277 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2278 int rc = VWRN_CONTINUE_ANALYSIS;
2279 uint32_t cbInstr, delta;
2280 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2281 bool disret;
2282 char szOutput[256];
2283
2284 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2285
2286 /* We need this to determine branch targets (and for disassembling). */
2287 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2288
2289 while (rc == VWRN_CONTINUE_ANALYSIS)
2290 {
2291 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2292 if (pCurInstrHC == NULL)
2293 {
2294 rc = VERR_PATCHING_REFUSED;
2295 goto end;
2296 }
2297
2298 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_RAWCODE,
2299 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2300 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2301 {
2302 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2303
2304 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2305 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2306 else
2307 Log(("DIS %s", szOutput));
2308
2309 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2310 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2311 {
2312 rc = VINF_SUCCESS;
2313 goto end;
2314 }
2315 }
2316 else
2317 Log(("DIS: %s", szOutput));
2318
2319 if (disret == false)
2320 {
2321 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2322 rc = VINF_SUCCESS;
2323 goto end;
2324 }
2325
2326 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2327 if (rc != VWRN_CONTINUE_ANALYSIS) {
2328 break; //done!
2329 }
2330
2331 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2332 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2333 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2334 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2335 )
2336 {
2337 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2338 RTRCPTR pOrgTargetGC;
2339
2340 if (pTargetGC == 0)
2341 {
2342 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2343 rc = VERR_PATCHING_REFUSED;
2344 break;
2345 }
2346
2347 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2348 {
2349 //jump back to guest code
2350 rc = VINF_SUCCESS;
2351 goto end;
2352 }
2353 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2354
2355 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2356 {
2357 rc = VINF_SUCCESS;
2358 goto end;
2359 }
2360
2361 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2362 {
2363 /* New jump, let's check it. */
2364 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2365
2366 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2367 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2368 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2369
2370 if (rc != VINF_SUCCESS) {
2371 break; //done!
2372 }
2373 }
2374 if (cpu.pCurInstr->uOpcode == OP_JMP)
2375 {
2376 /* Unconditional jump; return to caller. */
2377 rc = VINF_SUCCESS;
2378 goto end;
2379 }
2380
2381 rc = VWRN_CONTINUE_ANALYSIS;
2382 }
2383 pCurInstrGC += cbInstr;
2384 }
2385end:
2386 return rc;
2387}
2388
2389/**
2390 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2391 *
2392 * @returns VBox status code.
2393 * @param pVM Pointer to the VM.
2394 * @param pInstrGC Guest context pointer to the initial privileged instruction
2395 * @param pCurInstrGC Guest context pointer to the current instruction
2396 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2397 * @param pCacheRec Cache record ptr
2398 *
2399 */
2400int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2401{
2402 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2403
2404 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2405 /* Free all disasm jump records. */
2406 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2407 return rc;
2408}
2409
2410#endif /* LOG_ENABLED */
2411
2412/**
2413 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2414 * If so, this patch is permanently disabled.
2415 *
2416 * @param pVM Pointer to the VM.
2417 * @param pInstrGC Guest context pointer to instruction
2418 * @param pConflictGC Guest context pointer to check
2419 *
2420 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2421 *
2422 */
2423VMMR3_INT_DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2424{
2425 AssertReturn(!HMIsEnabled(pVM), VERR_PATCH_NO_CONFLICT);
2426 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2427 if (pTargetPatch)
2428 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2429 return VERR_PATCH_NO_CONFLICT;
2430}
2431
2432/**
2433 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2434 *
2435 * @returns VBox status code.
2436 * @param pVM Pointer to the VM.
2437 * @param pInstrGC Guest context pointer to privileged instruction
2438 * @param pCurInstrGC Guest context pointer to the current instruction
2439 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2440 * @param pCacheRec Cache record ptr
2441 *
2442 */
2443static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2444{
2445 DISCPUSTATE cpu;
2446 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2447 int rc = VWRN_CONTINUE_ANALYSIS;
2448 uint32_t cbInstr;
2449 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2450 bool disret;
2451#ifdef LOG_ENABLED
2452 char szOutput[256];
2453#endif
2454
2455 while (rc == VWRN_CONTINUE_RECOMPILE)
2456 {
2457 pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2458 if (pCurInstrHC == NULL)
2459 {
2460 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2461 goto end;
2462 }
2463#ifdef LOG_ENABLED
2464 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE,
2465 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2466 Log(("Recompile: %s", szOutput));
2467#else
2468 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
2469#endif
2470 if (disret == false)
2471 {
2472 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2473
2474 /* Add lookup record for patch to guest address translation */
2475 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2476 patmPatchGenIllegalInstr(pVM, pPatch);
2477 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2478 goto end;
2479 }
2480
2481 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2482 if (rc != VWRN_CONTINUE_RECOMPILE)
2483 {
2484 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2485 if ( rc == VINF_SUCCESS
2486 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2487 {
2488 DISCPUSTATE cpunext;
2489 uint32_t opsizenext;
2490 uint8_t *pNextInstrHC;
2491 RTRCPTR pNextInstrGC = pCurInstrGC + cbInstr;
2492
2493 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2494
2495 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2496 * Recompile the next instruction as well
2497 */
2498 pNextInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2499 if (pNextInstrHC == NULL)
2500 {
2501 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2502 goto end;
2503 }
2504 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpunext, &opsizenext);
2505 if (disret == false)
2506 {
2507 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2508 goto end;
2509 }
2510 switch(cpunext.pCurInstr->uOpcode)
2511 {
2512 case OP_IRET: /* inhibit cleared in generated code */
2513 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2514 case OP_HLT:
2515 break; /* recompile these */
2516
2517 default:
2518 if (cpunext.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2519 {
2520 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2521
2522 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2523 AssertRC(rc);
2524 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2525 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2526 }
2527 break;
2528 }
2529
2530 /* Note: after a cli we must continue to a proper exit point */
2531 if (cpunext.pCurInstr->uOpcode != OP_CLI)
2532 {
2533 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2534 if (RT_SUCCESS(rc))
2535 {
2536 rc = VINF_SUCCESS;
2537 goto end;
2538 }
2539 break;
2540 }
2541 else
2542 rc = VWRN_CONTINUE_RECOMPILE;
2543 }
2544 else
2545 break; /* done! */
2546 }
2547
2548 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2549
2550
2551 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2552 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2553 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2554 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2555 )
2556 {
2557 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2558 if (addr == 0)
2559 {
2560 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
2561 rc = VERR_PATCHING_REFUSED;
2562 break;
2563 }
2564
2565 Log(("Jump encountered target %RRv\n", addr));
2566
2567 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2568 if (!(cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW))
2569 {
2570 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2571 /* First we need to finish this linear code stream until the next exit point. */
2572 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+cbInstr, pfnPATMR3Recompile, pCacheRec);
2573 if (RT_FAILURE(rc))
2574 {
2575 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2576 break; //fatal error
2577 }
2578 }
2579
2580 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2581 {
2582 /* New code; let's recompile it. */
2583 Log(("patmRecompileCodeStream continue with jump\n"));
2584
2585 /*
2586 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2587 * this patch so we can continue our analysis
2588 *
2589 * We rely on CSAM to detect and resolve conflicts
2590 */
2591 PPATCHINFO pTargetPatch = patmFindActivePatchByEntrypoint(pVM, addr);
2592 if(pTargetPatch)
2593 {
2594 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2595 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2596 }
2597
2598 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2599 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2600 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2601
2602 if(pTargetPatch)
2603 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2604
2605 if (RT_FAILURE(rc))
2606 {
2607 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2608 break; //done!
2609 }
2610 }
2611 /* Always return to caller here; we're done! */
2612 rc = VINF_SUCCESS;
2613 goto end;
2614 }
2615 else
2616 if (cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW)
2617 {
2618 rc = VINF_SUCCESS;
2619 goto end;
2620 }
2621 pCurInstrGC += cbInstr;
2622 }
2623end:
2624 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2625 return rc;
2626}
2627
2628
2629/**
2630 * Generate the jump from guest to patch code
2631 *
2632 * @returns VBox status code.
2633 * @param pVM Pointer to the VM.
2634 * @param pPatch Patch record
2635 * @param pCacheRec Guest translation lookup cache record
2636 */
2637static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2638{
2639 uint8_t temp[8];
2640 uint8_t *pPB;
2641 int rc;
2642
2643 Assert(pPatch->cbPatchJump <= sizeof(temp));
2644 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2645
2646 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2647 Assert(pPB);
2648
2649#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2650 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2651 {
2652 Assert(pPatch->pPatchJumpDestGC);
2653
2654 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2655 {
2656 // jmp [PatchCode]
2657 if (fAddFixup)
2658 {
2659 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
2660 pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2661 {
2662 Log(("Relocation failed for the jump in the guest code!!\n"));
2663 return VERR_PATCHING_REFUSED;
2664 }
2665 }
2666
2667 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2668 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2669 }
2670 else
2671 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2672 {
2673 // jmp [PatchCode]
2674 if (fAddFixup)
2675 {
2676 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
2677 pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2678 {
2679 Log(("Relocation failed for the jump in the guest code!!\n"));
2680 return VERR_PATCHING_REFUSED;
2681 }
2682 }
2683
2684 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2685 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2686 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2687 }
2688 else
2689 {
2690 Assert(0);
2691 return VERR_PATCHING_REFUSED;
2692 }
2693 }
2694 else
2695#endif
2696 {
2697 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2698
2699 // jmp [PatchCode]
2700 if (fAddFixup)
2701 {
2702 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32,
2703 PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2704 {
2705 Log(("Relocation failed for the jump in the guest code!!\n"));
2706 return VERR_PATCHING_REFUSED;
2707 }
2708 }
2709 temp[0] = 0xE9; //jmp
2710 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2711 }
2712 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2713 AssertRC(rc);
2714
2715 if (rc == VINF_SUCCESS)
2716 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2717
2718 return rc;
2719}
2720
2721/**
2722 * Remove the jump from guest to patch code
2723 *
2724 * @returns VBox status code.
2725 * @param pVM Pointer to the VM.
2726 * @param pPatch Patch record
2727 */
2728static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2729{
2730#ifdef DEBUG
2731 DISCPUSTATE cpu;
2732 char szOutput[256];
2733 uint32_t cbInstr, i = 0;
2734 bool disret;
2735
2736 while (i < pPatch->cbPrivInstr)
2737 {
2738 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2739 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2740 if (disret == false)
2741 break;
2742
2743 Log(("Org patch jump: %s", szOutput));
2744 Assert(cbInstr);
2745 i += cbInstr;
2746 }
2747#endif
2748
2749 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2750 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2751#ifdef DEBUG
2752 if (rc == VINF_SUCCESS)
2753 {
2754 i = 0;
2755 while (i < pPatch->cbPrivInstr)
2756 {
2757 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2758 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2759 if (disret == false)
2760 break;
2761
2762 Log(("Org instr: %s", szOutput));
2763 Assert(cbInstr);
2764 i += cbInstr;
2765 }
2766 }
2767#endif
2768 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2769 return rc;
2770}
2771
2772/**
2773 * Generate the call from guest to patch code
2774 *
2775 * @returns VBox status code.
2776 * @param pVM Pointer to the VM.
2777 * @param pPatch Patch record
2778 * @param pInstrHC HC address where to insert the jump
2779 * @param pCacheRec Guest translation cache record
2780 */
2781static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2782{
2783 uint8_t temp[8];
2784 uint8_t *pPB;
2785 int rc;
2786
2787 Assert(pPatch->cbPatchJump <= sizeof(temp));
2788
2789 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2790 Assert(pPB);
2791
2792 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2793
2794 // jmp [PatchCode]
2795 if (fAddFixup)
2796 {
2797 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH,
2798 pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2799 {
2800 Log(("Relocation failed for the jump in the guest code!!\n"));
2801 return VERR_PATCHING_REFUSED;
2802 }
2803 }
2804
2805 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2806 temp[0] = pPatch->aPrivInstr[0];
2807 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2808
2809 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2810 AssertRC(rc);
2811
2812 return rc;
2813}
2814
2815
2816/**
2817 * Patch cli/sti pushf/popf instruction block at specified location
2818 *
2819 * @returns VBox status code.
2820 * @param pVM Pointer to the VM.
2821 * @param pInstrGC Guest context point to privileged instruction
2822 * @param pInstrHC Host context point to privileged instruction
2823 * @param uOpcode Instruction opcode
2824 * @param uOpSize Size of starting instruction
2825 * @param pPatchRec Patch record
2826 *
2827 * @note returns failure if patching is not allowed or possible
2828 *
2829 */
2830static int patmR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2831 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2832{
2833 PPATCHINFO pPatch = &pPatchRec->patch;
2834 int rc = VERR_PATCHING_REFUSED;
2835 uint32_t orgOffsetPatchMem = ~0;
2836 RTRCPTR pInstrStart;
2837 bool fInserted;
2838 NOREF(pInstrHC); NOREF(uOpSize);
2839
2840 /* Save original offset (in case of failures later on) */
2841 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2842 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2843
2844 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2845 switch (uOpcode)
2846 {
2847 case OP_MOV:
2848 break;
2849
2850 case OP_CLI:
2851 case OP_PUSHF:
2852 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2853 /* Note: special precautions are taken when disabling and enabling such patches. */
2854 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2855 break;
2856
2857 default:
2858 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2859 {
2860 AssertMsg(0, ("patmR3PatchBlock: Invalid opcode %x\n", uOpcode));
2861 return VERR_INVALID_PARAMETER;
2862 }
2863 }
2864
2865 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2866 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2867
2868 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2869 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2870 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2871 )
2872 {
2873 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2874 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2875 rc = VERR_PATCHING_REFUSED;
2876 goto failure;
2877 }
2878
2879 pPatch->nrPatch2GuestRecs = 0;
2880 pInstrStart = pInstrGC;
2881
2882#ifdef PATM_ENABLE_CALL
2883 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2884#endif
2885
2886 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2887 pPatch->uCurPatchOffset = 0;
2888
2889 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2890 {
2891 Assert(pPatch->flags & PATMFL_INTHANDLER);
2892
2893 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2894 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2895 if (RT_FAILURE(rc))
2896 goto failure;
2897 }
2898
2899 /***************************************************************************************************************************/
2900 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2901 /***************************************************************************************************************************/
2902#ifdef VBOX_WITH_STATISTICS
2903 if (!(pPatch->flags & PATMFL_SYSENTER))
2904 {
2905 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2906 if (RT_FAILURE(rc))
2907 goto failure;
2908 }
2909#endif
2910
2911 PATMP2GLOOKUPREC cacheRec;
2912 RT_ZERO(cacheRec);
2913 cacheRec.pPatch = pPatch;
2914
2915 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2916 /* Free leftover lock if any. */
2917 if (cacheRec.Lock.pvMap)
2918 {
2919 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2920 cacheRec.Lock.pvMap = NULL;
2921 }
2922 if (rc != VINF_SUCCESS)
2923 {
2924 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2925 goto failure;
2926 }
2927
2928 /* Calculated during analysis. */
2929 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2930 {
2931 /* Most likely cause: we encountered an illegal instruction very early on. */
2932 /** @todo could turn it into an int3 callable patch. */
2933 Log(("patmR3PatchBlock: patch block too small -> refuse\n"));
2934 rc = VERR_PATCHING_REFUSED;
2935 goto failure;
2936 }
2937
2938 /* size of patch block */
2939 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2940
2941
2942 /* Update free pointer in patch memory. */
2943 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2944 /* Round to next 8 byte boundary. */
2945 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2946
2947 /*
2948 * Insert into patch to guest lookup tree
2949 */
2950 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2951 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2952 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2953 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2954 if (!fInserted)
2955 {
2956 rc = VERR_PATCHING_REFUSED;
2957 goto failure;
2958 }
2959
2960 /* Note that patmr3SetBranchTargets can install additional patches!! */
2961 rc = patmr3SetBranchTargets(pVM, pPatch);
2962 if (rc != VINF_SUCCESS)
2963 {
2964 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2965 goto failure;
2966 }
2967
2968#ifdef LOG_ENABLED
2969 Log(("Patch code ----------------------------------------------------------\n"));
2970 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2971 /* Free leftover lock if any. */
2972 if (cacheRec.Lock.pvMap)
2973 {
2974 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2975 cacheRec.Lock.pvMap = NULL;
2976 }
2977 Log(("Patch code ends -----------------------------------------------------\n"));
2978#endif
2979
2980 /* make a copy of the guest code bytes that will be overwritten */
2981 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2982
2983 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2984 AssertRC(rc);
2985
2986 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2987 {
2988 /*uint8_t bASMInt3 = 0xCC; - unused */
2989
2990 Log(("patmR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2991 /* Replace first opcode byte with 'int 3'. */
2992 rc = patmActivateInt3Patch(pVM, pPatch);
2993 if (RT_FAILURE(rc))
2994 goto failure;
2995
2996 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2997 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2998
2999 pPatch->flags &= ~PATMFL_INSTR_HINT;
3000 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
3001 }
3002 else
3003 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
3004 {
3005 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
3006 /* now insert a jump in the guest code */
3007 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
3008 AssertRC(rc);
3009 if (RT_FAILURE(rc))
3010 goto failure;
3011
3012 }
3013
3014 patmR3DbgAddPatch(pVM, pPatchRec);
3015
3016 PATM_LOG_RAW_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
3017
3018 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3019 pPatch->pTempInfo->nrIllegalInstr = 0;
3020
3021 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3022
3023 pPatch->uState = PATCH_ENABLED;
3024 return VINF_SUCCESS;
3025
3026failure:
3027 if (pPatchRec->CoreOffset.Key)
3028 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3029
3030 patmEmptyTree(pVM, &pPatch->FixupTree);
3031 pPatch->nrFixups = 0;
3032
3033 patmEmptyTree(pVM, &pPatch->JumpTree);
3034 pPatch->nrJumpRecs = 0;
3035
3036 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3037 pPatch->pTempInfo->nrIllegalInstr = 0;
3038
3039 /* Turn this cli patch into a dummy. */
3040 pPatch->uState = PATCH_REFUSED;
3041 pPatch->pPatchBlockOffset = 0;
3042
3043 // Give back the patch memory we no longer need
3044 Assert(orgOffsetPatchMem != (uint32_t)~0);
3045 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3046
3047 return rc;
3048}
3049
3050/**
3051 * Patch IDT handler
3052 *
3053 * @returns VBox status code.
3054 * @param pVM Pointer to the VM.
3055 * @param pInstrGC Guest context point to privileged instruction
3056 * @param uOpSize Size of starting instruction
3057 * @param pPatchRec Patch record
3058 * @param pCacheRec Cache record ptr
3059 *
3060 * @note returns failure if patching is not allowed or possible
3061 *
3062 */
3063static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3064{
3065 PPATCHINFO pPatch = &pPatchRec->patch;
3066 bool disret;
3067 DISCPUSTATE cpuPush, cpuJmp;
3068 uint32_t cbInstr;
3069 RTRCPTR pCurInstrGC = pInstrGC;
3070 uint8_t *pCurInstrHC, *pInstrHC;
3071 uint32_t orgOffsetPatchMem = ~0;
3072
3073 pInstrHC = pCurInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
3074 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
3075
3076 /*
3077 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
3078 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
3079 * condition here and only patch the common entypoint once.
3080 */
3081 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &cbInstr);
3082 Assert(disret);
3083 if (disret && cpuPush.pCurInstr->uOpcode == OP_PUSH)
3084 {
3085 RTRCPTR pJmpInstrGC;
3086 int rc;
3087 pCurInstrGC += cbInstr;
3088
3089 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuJmp, &cbInstr);
3090 if ( disret
3091 && cpuJmp.pCurInstr->uOpcode == OP_JMP
3092 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
3093 )
3094 {
3095 bool fInserted;
3096 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3097 if (pJmpPatch == 0)
3098 {
3099 /* Patch it first! */
3100 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
3101 if (rc != VINF_SUCCESS)
3102 goto failure;
3103 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3104 Assert(pJmpPatch);
3105 }
3106 if (pJmpPatch->patch.uState != PATCH_ENABLED)
3107 goto failure;
3108
3109 /* save original offset (in case of failures later on) */
3110 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3111
3112 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3113 pPatch->uCurPatchOffset = 0;
3114 pPatch->nrPatch2GuestRecs = 0;
3115
3116#ifdef VBOX_WITH_STATISTICS
3117 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3118 if (RT_FAILURE(rc))
3119 goto failure;
3120#endif
3121
3122 /* Install fake cli patch (to clear the virtual IF) */
3123 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
3124 if (RT_FAILURE(rc))
3125 goto failure;
3126
3127 /* Add lookup record for patch to guest address translation (for the push) */
3128 patmR3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
3129
3130 /* Duplicate push. */
3131 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
3132 if (RT_FAILURE(rc))
3133 goto failure;
3134
3135 /* Generate jump to common entrypoint. */
3136 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
3137 if (RT_FAILURE(rc))
3138 goto failure;
3139
3140 /* size of patch block */
3141 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3142
3143 /* Update free pointer in patch memory. */
3144 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3145 /* Round to next 8 byte boundary */
3146 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3147
3148 /* There's no jump from guest to patch code. */
3149 pPatch->cbPatchJump = 0;
3150
3151
3152#ifdef LOG_ENABLED
3153 Log(("Patch code ----------------------------------------------------------\n"));
3154 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3155 Log(("Patch code ends -----------------------------------------------------\n"));
3156#endif
3157 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3158
3159 /*
3160 * Insert into patch to guest lookup tree
3161 */
3162 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3163 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3164 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3165 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3166 patmR3DbgAddPatch(pVM, pPatchRec);
3167
3168 pPatch->uState = PATCH_ENABLED;
3169
3170 return VINF_SUCCESS;
3171 }
3172 }
3173failure:
3174 /* Give back the patch memory we no longer need */
3175 if (orgOffsetPatchMem != (uint32_t)~0)
3176 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3177
3178 return patmR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3179}
3180
3181/**
3182 * Install a trampoline to call a guest trap handler directly
3183 *
3184 * @returns VBox status code.
3185 * @param pVM Pointer to the VM.
3186 * @param pInstrGC Guest context point to privileged instruction
3187 * @param pPatchRec Patch record
3188 * @param pCacheRec Cache record ptr
3189 *
3190 */
3191static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3192{
3193 PPATCHINFO pPatch = &pPatchRec->patch;
3194 int rc = VERR_PATCHING_REFUSED;
3195 uint32_t orgOffsetPatchMem = ~0;
3196 bool fInserted;
3197
3198 // save original offset (in case of failures later on)
3199 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3200
3201 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3202 pPatch->uCurPatchOffset = 0;
3203 pPatch->nrPatch2GuestRecs = 0;
3204
3205#ifdef VBOX_WITH_STATISTICS
3206 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3207 if (RT_FAILURE(rc))
3208 goto failure;
3209#endif
3210
3211 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3212 if (RT_FAILURE(rc))
3213 goto failure;
3214
3215 /* size of patch block */
3216 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3217
3218 /* Update free pointer in patch memory. */
3219 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3220 /* Round to next 8 byte boundary */
3221 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3222
3223 /* There's no jump from guest to patch code. */
3224 pPatch->cbPatchJump = 0;
3225
3226#ifdef LOG_ENABLED
3227 Log(("Patch code ----------------------------------------------------------\n"));
3228 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3229 Log(("Patch code ends -----------------------------------------------------\n"));
3230#endif
3231 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "TRAP handler");
3232 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3233
3234 /*
3235 * Insert into patch to guest lookup tree
3236 */
3237 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3238 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3239 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3240 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3241 patmR3DbgAddPatch(pVM, pPatchRec);
3242
3243 pPatch->uState = PATCH_ENABLED;
3244 return VINF_SUCCESS;
3245
3246failure:
3247 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3248
3249 /* Turn this cli patch into a dummy. */
3250 pPatch->uState = PATCH_REFUSED;
3251 pPatch->pPatchBlockOffset = 0;
3252
3253 /* Give back the patch memory we no longer need */
3254 Assert(orgOffsetPatchMem != (uint32_t)~0);
3255 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3256
3257 return rc;
3258}
3259
3260
3261#ifdef LOG_ENABLED
3262/**
3263 * Check if the instruction is patched as a common idt handler
3264 *
3265 * @returns true or false
3266 * @param pVM Pointer to the VM.
3267 * @param pInstrGC Guest context point to the instruction
3268 *
3269 */
3270static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3271{
3272 PPATMPATCHREC pRec;
3273
3274 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3275 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3276 return true;
3277 return false;
3278}
3279#endif //DEBUG
3280
3281
3282/**
3283 * Duplicates a complete function
3284 *
3285 * @returns VBox status code.
3286 * @param pVM Pointer to the VM.
3287 * @param pInstrGC Guest context point to privileged instruction
3288 * @param pPatchRec Patch record
3289 * @param pCacheRec Cache record ptr
3290 *
3291 */
3292static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3293{
3294 PPATCHINFO pPatch = &pPatchRec->patch;
3295 int rc = VERR_PATCHING_REFUSED;
3296 uint32_t orgOffsetPatchMem = ~0;
3297 bool fInserted;
3298
3299 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3300 /* Save original offset (in case of failures later on). */
3301 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3302
3303 /* We will not go on indefinitely with call instruction handling. */
3304 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3305 {
3306 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3307 return VERR_PATCHING_REFUSED;
3308 }
3309
3310 pVM->patm.s.ulCallDepth++;
3311
3312#ifdef PATM_ENABLE_CALL
3313 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3314#endif
3315
3316 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3317
3318 pPatch->nrPatch2GuestRecs = 0;
3319 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3320 pPatch->uCurPatchOffset = 0;
3321
3322 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3323 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3324 if (RT_FAILURE(rc))
3325 goto failure;
3326
3327#ifdef VBOX_WITH_STATISTICS
3328 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3329 if (RT_FAILURE(rc))
3330 goto failure;
3331#endif
3332
3333 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3334 if (rc != VINF_SUCCESS)
3335 {
3336 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3337 goto failure;
3338 }
3339
3340 //size of patch block
3341 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3342
3343 //update free pointer in patch memory
3344 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3345 /* Round to next 8 byte boundary. */
3346 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3347
3348 pPatch->uState = PATCH_ENABLED;
3349
3350 /*
3351 * Insert into patch to guest lookup tree
3352 */
3353 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3354 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3355 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3356 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3357 if (!fInserted)
3358 {
3359 rc = VERR_PATCHING_REFUSED;
3360 goto failure;
3361 }
3362
3363 /* Note that patmr3SetBranchTargets can install additional patches!! */
3364 rc = patmr3SetBranchTargets(pVM, pPatch);
3365 if (rc != VINF_SUCCESS)
3366 {
3367 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3368 goto failure;
3369 }
3370
3371 patmR3DbgAddPatch(pVM, pPatchRec);
3372
3373#ifdef LOG_ENABLED
3374 Log(("Patch code ----------------------------------------------------------\n"));
3375 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3376 Log(("Patch code ends -----------------------------------------------------\n"));
3377#endif
3378
3379 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3380
3381 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3382 pPatch->pTempInfo->nrIllegalInstr = 0;
3383
3384 pVM->patm.s.ulCallDepth--;
3385 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3386 return VINF_SUCCESS;
3387
3388failure:
3389 if (pPatchRec->CoreOffset.Key)
3390 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3391
3392 patmEmptyTree(pVM, &pPatch->FixupTree);
3393 pPatch->nrFixups = 0;
3394
3395 patmEmptyTree(pVM, &pPatch->JumpTree);
3396 pPatch->nrJumpRecs = 0;
3397
3398 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3399 pPatch->pTempInfo->nrIllegalInstr = 0;
3400
3401 /* Turn this cli patch into a dummy. */
3402 pPatch->uState = PATCH_REFUSED;
3403 pPatch->pPatchBlockOffset = 0;
3404
3405 // Give back the patch memory we no longer need
3406 Assert(orgOffsetPatchMem != (uint32_t)~0);
3407 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3408
3409 pVM->patm.s.ulCallDepth--;
3410 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3411 return rc;
3412}
3413
3414/**
3415 * Creates trampoline code to jump inside an existing patch
3416 *
3417 * @returns VBox status code.
3418 * @param pVM Pointer to the VM.
3419 * @param pInstrGC Guest context point to privileged instruction
3420 * @param pPatchRec Patch record
3421 *
3422 */
3423static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3424{
3425 PPATCHINFO pPatch = &pPatchRec->patch;
3426 RTRCPTR pPage, pPatchTargetGC = 0;
3427 uint32_t orgOffsetPatchMem = ~0;
3428 int rc = VERR_PATCHING_REFUSED;
3429 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3430 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3431 bool fInserted = false;
3432
3433 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3434 /* Save original offset (in case of failures later on). */
3435 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3436
3437 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3438 /** @todo we already checked this before */
3439 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3440
3441 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3442 if (pPatchPage)
3443 {
3444 uint32_t i;
3445
3446 for (i=0;i<pPatchPage->cCount;i++)
3447 {
3448 if (pPatchPage->papPatch[i])
3449 {
3450 pPatchToJmp = pPatchPage->papPatch[i];
3451
3452 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3453 && pPatchToJmp->uState == PATCH_ENABLED)
3454 {
3455 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3456 if (pPatchTargetGC)
3457 {
3458 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3459 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3460 Assert(pPatchToGuestRec);
3461
3462 pPatchToGuestRec->fJumpTarget = true;
3463 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3464 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3465 break;
3466 }
3467 }
3468 }
3469 }
3470 }
3471 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3472
3473 /*
3474 * Only record the trampoline patch if this is the first patch to the target
3475 * or we recorded other patches already.
3476 * The goal is to refuse refreshing function duplicates if the guest
3477 * modifies code after a saved state was loaded because it is not possible
3478 * to save the relation between trampoline and target without changing the
3479 * saved satte version.
3480 */
3481 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3482 || pPatchToJmp->pTrampolinePatchesHead)
3483 {
3484 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3485 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3486 if (!pTrampRec)
3487 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3488
3489 pTrampRec->pPatchTrampoline = pPatchRec;
3490 }
3491
3492 pPatch->nrPatch2GuestRecs = 0;
3493 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3494 pPatch->uCurPatchOffset = 0;
3495
3496 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3497 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3498 if (RT_FAILURE(rc))
3499 goto failure;
3500
3501#ifdef VBOX_WITH_STATISTICS
3502 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3503 if (RT_FAILURE(rc))
3504 goto failure;
3505#endif
3506
3507 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3508 if (RT_FAILURE(rc))
3509 goto failure;
3510
3511 /*
3512 * Insert into patch to guest lookup tree
3513 */
3514 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3515 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3516 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3517 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3518 if (!fInserted)
3519 {
3520 rc = VERR_PATCHING_REFUSED;
3521 goto failure;
3522 }
3523 patmR3DbgAddPatch(pVM, pPatchRec);
3524
3525 /* size of patch block */
3526 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3527
3528 /* Update free pointer in patch memory. */
3529 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3530 /* Round to next 8 byte boundary */
3531 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3532
3533 /* There's no jump from guest to patch code. */
3534 pPatch->cbPatchJump = 0;
3535
3536 /* Enable the patch. */
3537 pPatch->uState = PATCH_ENABLED;
3538 /* We allow this patch to be called as a function. */
3539 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3540
3541 if (pTrampRec)
3542 {
3543 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3544 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3545 }
3546 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3547 return VINF_SUCCESS;
3548
3549failure:
3550 if (pPatchRec->CoreOffset.Key)
3551 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3552
3553 patmEmptyTree(pVM, &pPatch->FixupTree);
3554 pPatch->nrFixups = 0;
3555
3556 patmEmptyTree(pVM, &pPatch->JumpTree);
3557 pPatch->nrJumpRecs = 0;
3558
3559 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3560 pPatch->pTempInfo->nrIllegalInstr = 0;
3561
3562 /* Turn this cli patch into a dummy. */
3563 pPatch->uState = PATCH_REFUSED;
3564 pPatch->pPatchBlockOffset = 0;
3565
3566 // Give back the patch memory we no longer need
3567 Assert(orgOffsetPatchMem != (uint32_t)~0);
3568 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3569
3570 if (pTrampRec)
3571 MMR3HeapFree(pTrampRec);
3572
3573 return rc;
3574}
3575
3576
3577/**
3578 * Patch branch target function for call/jump at specified location.
3579 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3580 *
3581 * @returns VBox status code.
3582 * @param pVM Pointer to the VM.
3583 * @param pCtx Pointer to the guest CPU context.
3584 *
3585 */
3586VMMR3_INT_DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3587{
3588 RTRCPTR pBranchTarget, pPage;
3589 int rc;
3590 RTRCPTR pPatchTargetGC = 0;
3591 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
3592
3593 pBranchTarget = pCtx->edx;
3594 pBranchTarget = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3595
3596 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3597 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3598
3599 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3600 if (pPatchPage)
3601 {
3602 uint32_t i;
3603
3604 for (i=0;i<pPatchPage->cCount;i++)
3605 {
3606 if (pPatchPage->papPatch[i])
3607 {
3608 PPATCHINFO pPatch = pPatchPage->papPatch[i];
3609
3610 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3611 && pPatch->uState == PATCH_ENABLED)
3612 {
3613 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3614 if (pPatchTargetGC)
3615 {
3616 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3617 break;
3618 }
3619 }
3620 }
3621 }
3622 }
3623
3624 if (pPatchTargetGC)
3625 {
3626 /* Create a trampoline that also sets PATM_ASMFIX_INTERRUPTFLAG. */
3627 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3628 }
3629 else
3630 {
3631 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3632 }
3633
3634 if (rc == VINF_SUCCESS)
3635 {
3636 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3637 Assert(pPatchTargetGC);
3638 }
3639
3640 if (pPatchTargetGC)
3641 {
3642 pCtx->eax = pPatchTargetGC;
3643 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3644 }
3645 else
3646 {
3647 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3648 pCtx->eax = 0;
3649 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3650 }
3651 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3652 rc = patmAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3653 AssertRC(rc);
3654
3655 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3656 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3657 return VINF_SUCCESS;
3658}
3659
3660/**
3661 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3662 *
3663 * @returns VBox status code.
3664 * @param pVM Pointer to the VM.
3665 * @param pCpu Disassembly CPU structure ptr
3666 * @param pInstrGC Guest context point to privileged instruction
3667 * @param pCacheRec Cache record ptr
3668 *
3669 */
3670static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3671{
3672 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3673 int rc = VERR_PATCHING_REFUSED;
3674 DISCPUSTATE cpu;
3675 RTRCPTR pTargetGC;
3676 PPATMPATCHREC pPatchFunction;
3677 uint32_t cbInstr;
3678 bool disret;
3679
3680 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3681 Assert((pCpu->pCurInstr->uOpcode == OP_CALL || pCpu->pCurInstr->uOpcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32);
3682
3683 if ((pCpu->pCurInstr->uOpcode != OP_CALL && pCpu->pCurInstr->uOpcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32)
3684 {
3685 rc = VERR_PATCHING_REFUSED;
3686 goto failure;
3687 }
3688
3689 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3690 if (pTargetGC == 0)
3691 {
3692 Log(("We don't support far jumps here!! (%08X)\n", pCpu->Param1.fUse));
3693 rc = VERR_PATCHING_REFUSED;
3694 goto failure;
3695 }
3696
3697 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3698 if (pPatchFunction == NULL)
3699 {
3700 for(;;)
3701 {
3702 /* It could be an indirect call (call -> jmp dest).
3703 * Note that it's dangerous to assume the jump will never change...
3704 */
3705 uint8_t *pTmpInstrHC;
3706
3707 pTmpInstrHC = patmR3GCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3708 Assert(pTmpInstrHC);
3709 if (pTmpInstrHC == 0)
3710 break;
3711
3712 disret = patmR3DisInstr(pVM, pPatch, pTargetGC, pTmpInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
3713 if (disret == false || cpu.pCurInstr->uOpcode != OP_JMP)
3714 break;
3715
3716 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3717 if (pTargetGC == 0)
3718 {
3719 break;
3720 }
3721
3722 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3723 break;
3724 }
3725 if (pPatchFunction == 0)
3726 {
3727 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3728 rc = VERR_PATCHING_REFUSED;
3729 goto failure;
3730 }
3731 }
3732
3733 // make a copy of the guest code bytes that will be overwritten
3734 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3735
3736 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3737 AssertRC(rc);
3738
3739 /* Now replace the original call in the guest code */
3740 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3741 AssertRC(rc);
3742 if (RT_FAILURE(rc))
3743 goto failure;
3744
3745 /* Lowest and highest address for write monitoring. */
3746 pPatch->pInstrGCLowest = pInstrGC;
3747 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3748 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "Call");
3749
3750 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3751
3752 pPatch->uState = PATCH_ENABLED;
3753 return VINF_SUCCESS;
3754
3755failure:
3756 /* Turn this patch into a dummy. */
3757 pPatch->uState = PATCH_REFUSED;
3758
3759 return rc;
3760}
3761
3762/**
3763 * Replace the address in an MMIO instruction with the cached version.
3764 *
3765 * @returns VBox status code.
3766 * @param pVM Pointer to the VM.
3767 * @param pInstrGC Guest context point to privileged instruction
3768 * @param pCpu Disassembly CPU structure ptr
3769 * @param pCacheRec Cache record ptr
3770 *
3771 * @note returns failure if patching is not allowed or possible
3772 *
3773 */
3774static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3775{
3776 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3777 uint8_t *pPB;
3778 int rc = VERR_PATCHING_REFUSED;
3779
3780 Assert(pVM->patm.s.mmio.pCachedData);
3781 if (!pVM->patm.s.mmio.pCachedData)
3782 goto failure;
3783
3784 if (pCpu->Param2.fUse != DISUSE_DISPLACEMENT32)
3785 goto failure;
3786
3787 pPB = patmR3GCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3788 if (pPB == 0)
3789 goto failure;
3790
3791 /* Add relocation record for cached data access. */
3792 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC,
3793 pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3794 {
3795 Log(("Relocation failed for cached mmio address!!\n"));
3796 return VERR_PATCHING_REFUSED;
3797 }
3798 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "MMIO patch old instruction:", "");
3799
3800 /* Save original instruction. */
3801 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3802 AssertRC(rc);
3803
3804 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3805
3806 /* Replace address with that of the cached item. */
3807 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR),
3808 &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3809 AssertRC(rc);
3810 if (RT_FAILURE(rc))
3811 {
3812 goto failure;
3813 }
3814
3815 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3816 pVM->patm.s.mmio.pCachedData = 0;
3817 pVM->patm.s.mmio.GCPhys = 0;
3818 pPatch->uState = PATCH_ENABLED;
3819 return VINF_SUCCESS;
3820
3821failure:
3822 /* Turn this patch into a dummy. */
3823 pPatch->uState = PATCH_REFUSED;
3824
3825 return rc;
3826}
3827
3828
3829/**
3830 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3831 *
3832 * @returns VBox status code.
3833 * @param pVM Pointer to the VM.
3834 * @param pInstrGC Guest context point to privileged instruction
3835 * @param pPatch Patch record
3836 *
3837 * @note returns failure if patching is not allowed or possible
3838 *
3839 */
3840static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3841{
3842 DISCPUSTATE cpu;
3843 uint32_t cbInstr;
3844 bool disret;
3845 uint8_t *pInstrHC;
3846
3847 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3848
3849 /* Convert GC to HC address. */
3850 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3851 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3852
3853 /* Disassemble mmio instruction. */
3854 disret = patmR3DisInstrNoStrOpMode(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE,
3855 &cpu, &cbInstr);
3856 if (disret == false)
3857 {
3858 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3859 return VERR_PATCHING_REFUSED;
3860 }
3861
3862 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
3863 if (cbInstr > MAX_INSTR_SIZE)
3864 return VERR_PATCHING_REFUSED;
3865 if (cpu.Param2.fUse != DISUSE_DISPLACEMENT32)
3866 return VERR_PATCHING_REFUSED;
3867
3868 /* Add relocation record for cached data access. */
3869 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3870 {
3871 Log(("Relocation failed for cached mmio address!!\n"));
3872 return VERR_PATCHING_REFUSED;
3873 }
3874 /* Replace address with that of the cached item. */
3875 *(RTRCPTR *)&pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3876
3877 /* Lowest and highest address for write monitoring. */
3878 pPatch->pInstrGCLowest = pInstrGC;
3879 pPatch->pInstrGCHighest = pInstrGC + cpu.cbInstr;
3880
3881 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3882 pVM->patm.s.mmio.pCachedData = 0;
3883 pVM->patm.s.mmio.GCPhys = 0;
3884 return VINF_SUCCESS;
3885}
3886
3887/**
3888 * Activates an int3 patch
3889 *
3890 * @returns VBox status code.
3891 * @param pVM Pointer to the VM.
3892 * @param pPatch Patch record
3893 */
3894static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3895{
3896 uint8_t bASMInt3 = 0xCC;
3897 int rc;
3898
3899 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3900 Assert(pPatch->uState != PATCH_ENABLED);
3901
3902 /* Replace first opcode byte with 'int 3'. */
3903 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
3904 AssertRC(rc);
3905
3906 pPatch->cbPatchJump = sizeof(bASMInt3);
3907
3908 return rc;
3909}
3910
3911/**
3912 * Deactivates an int3 patch
3913 *
3914 * @returns VBox status code.
3915 * @param pVM Pointer to the VM.
3916 * @param pPatch Patch record
3917 */
3918static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3919{
3920 uint8_t ASMInt3 = 0xCC;
3921 int rc;
3922
3923 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3924 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3925
3926 /* Restore first opcode byte. */
3927 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3928 AssertRC(rc);
3929 return rc;
3930}
3931
3932/**
3933 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically
3934 * in the raw-mode context.
3935 *
3936 * @returns VBox status code.
3937 * @param pVM Pointer to the VM.
3938 * @param pInstrGC Guest context point to privileged instruction
3939 * @param pInstrHC Host context point to privileged instruction
3940 * @param pCpu Disassembly CPU structure ptr
3941 * @param pPatch Patch record
3942 *
3943 * @note returns failure if patching is not allowed or possible
3944 *
3945 */
3946int patmR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3947{
3948 uint8_t bASMInt3 = 0xCC;
3949 int rc;
3950
3951 /* Note: Do not use patch memory here! It might called during patch installation too. */
3952 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "patmR3PatchInstrInt3:", "");
3953
3954 /* Save the original instruction. */
3955 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3956 AssertRC(rc);
3957 pPatch->cbPatchJump = sizeof(bASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3958
3959 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3960
3961 /* Replace first opcode byte with 'int 3'. */
3962 rc = patmActivateInt3Patch(pVM, pPatch);
3963 if (RT_FAILURE(rc))
3964 goto failure;
3965
3966 /* Lowest and highest address for write monitoring. */
3967 pPatch->pInstrGCLowest = pInstrGC;
3968 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3969
3970 pPatch->uState = PATCH_ENABLED;
3971 return VINF_SUCCESS;
3972
3973failure:
3974 /* Turn this patch into a dummy. */
3975 return VERR_PATCHING_REFUSED;
3976}
3977
3978#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3979/**
3980 * Patch a jump instruction at specified location
3981 *
3982 * @returns VBox status code.
3983 * @param pVM Pointer to the VM.
3984 * @param pInstrGC Guest context point to privileged instruction
3985 * @param pInstrHC Host context point to privileged instruction
3986 * @param pCpu Disassembly CPU structure ptr
3987 * @param pPatchRec Patch record
3988 *
3989 * @note returns failure if patching is not allowed or possible
3990 *
3991 */
3992int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3993{
3994 PPATCHINFO pPatch = &pPatchRec->patch;
3995 int rc = VERR_PATCHING_REFUSED;
3996
3997 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3998 pPatch->uCurPatchOffset = 0;
3999 pPatch->cbPatchBlockSize = 0;
4000 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
4001
4002 /*
4003 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
4004 * make sure this never happens. (unless a trap is triggered (intentionally or not))
4005 */
4006 switch (pCpu->pCurInstr->uOpcode)
4007 {
4008 case OP_JO:
4009 case OP_JNO:
4010 case OP_JC:
4011 case OP_JNC:
4012 case OP_JE:
4013 case OP_JNE:
4014 case OP_JBE:
4015 case OP_JNBE:
4016 case OP_JS:
4017 case OP_JNS:
4018 case OP_JP:
4019 case OP_JNP:
4020 case OP_JL:
4021 case OP_JNL:
4022 case OP_JLE:
4023 case OP_JNLE:
4024 case OP_JMP:
4025 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
4026 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL);
4027 if (!(pCpu->Param1.fUse & DISUSE_IMMEDIATE32_REL))
4028 goto failure;
4029
4030 Assert(pCpu->cbInstr == SIZEOF_NEARJUMP32 || pCpu->cbInstr == SIZEOF_NEAR_COND_JUMP32);
4031 if (pCpu->cbInstr != SIZEOF_NEARJUMP32 && pCpu->cbInstr != SIZEOF_NEAR_COND_JUMP32)
4032 goto failure;
4033
4034 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->cbInstr))
4035 {
4036 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
4037 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
4038 rc = VERR_PATCHING_REFUSED;
4039 goto failure;
4040 }
4041
4042 break;
4043
4044 default:
4045 goto failure;
4046 }
4047
4048 // make a copy of the guest code bytes that will be overwritten
4049 Assert(pCpu->cbInstr <= sizeof(pPatch->aPrivInstr));
4050 Assert(pCpu->cbInstr >= SIZEOF_NEARJUMP32);
4051 pPatch->cbPatchJump = pCpu->cbInstr;
4052
4053 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
4054 AssertRC(rc);
4055
4056 /* Now insert a jump in the guest code. */
4057 /*
4058 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
4059 * references the target instruction in the conflict patch.
4060 */
4061 RTRCPTR pJmpDest = patmR3GuestGCPtrToPatchGCPtrSimple(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue);
4062
4063 AssertMsg(pJmpDest, ("patmR3GuestGCPtrToPatchGCPtrSimple failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->Param1.uValue));
4064 pPatch->pPatchJumpDestGC = pJmpDest;
4065
4066 PATMP2GLOOKUPREC cacheRec;
4067 RT_ZERO(cacheRec);
4068 cacheRec.pPatch = pPatch;
4069
4070 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
4071 /* Free leftover lock if any. */
4072 if (cacheRec.Lock.pvMap)
4073 {
4074 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4075 cacheRec.Lock.pvMap = NULL;
4076 }
4077 AssertRC(rc);
4078 if (RT_FAILURE(rc))
4079 goto failure;
4080
4081 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
4082
4083 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
4084 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
4085
4086 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
4087
4088 /* Lowest and highest address for write monitoring. */
4089 pPatch->pInstrGCLowest = pInstrGC;
4090 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
4091
4092 pPatch->uState = PATCH_ENABLED;
4093 return VINF_SUCCESS;
4094
4095failure:
4096 /* Turn this cli patch into a dummy. */
4097 pPatch->uState = PATCH_REFUSED;
4098
4099 return rc;
4100}
4101#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
4102
4103
4104/**
4105 * Gives hint to PATM about supervisor guest instructions
4106 *
4107 * @returns VBox status code.
4108 * @param pVM Pointer to the VM.
4109 * @param pInstr Guest context point to privileged instruction
4110 * @param flags Patch flags
4111 */
4112VMMR3_INT_DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4113{
4114 Assert(pInstrGC);
4115 Assert(flags == PATMFL_CODE32);
4116
4117 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4118 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4119}
4120
4121/**
4122 * Patch privileged instruction at specified location
4123 *
4124 * @returns VBox status code.
4125 * @param pVM Pointer to the VM.
4126 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4127 * @param flags Patch flags
4128 *
4129 * @note returns failure if patching is not allowed or possible
4130 */
4131VMMR3_INT_DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4132{
4133 DISCPUSTATE cpu;
4134 R3PTRTYPE(uint8_t *) pInstrHC;
4135 uint32_t cbInstr;
4136 PPATMPATCHREC pPatchRec;
4137 PCPUMCTX pCtx = 0;
4138 bool disret;
4139 int rc;
4140 PVMCPU pVCpu = VMMGetCpu0(pVM);
4141 LogFlow(("PATMR3InstallPatch: %08x (%#llx)\n", pInstrGC, flags));
4142
4143 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4144
4145 if ( !pVM
4146 || pInstrGC == 0
4147 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4148 {
4149 AssertFailed();
4150 return VERR_INVALID_PARAMETER;
4151 }
4152
4153 if (PATMIsEnabled(pVM) == false)
4154 return VERR_PATCHING_REFUSED;
4155
4156 /* Test for patch conflict only with patches that actually change guest code. */
4157 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4158 {
4159 PPATCHINFO pConflictPatch = patmFindActivePatchByEntrypoint(pVM, pInstrGC);
4160 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4161 if (pConflictPatch != 0)
4162 return VERR_PATCHING_REFUSED;
4163 }
4164
4165 if (!(flags & PATMFL_CODE32))
4166 {
4167 /** @todo Only 32 bits code right now */
4168 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4169 return VERR_NOT_IMPLEMENTED;
4170 }
4171
4172 /* We ran out of patch memory; don't bother anymore. */
4173 if (pVM->patm.s.fOutOfMemory == true)
4174 return VERR_PATCHING_REFUSED;
4175
4176#if 1 /* DONT COMMIT ENABLED! */
4177 /* Blacklisted NT4SP1 areas - debugging why we sometimes crash early on, */
4178 if ( 0
4179 //|| (pInstrGC - 0x80010000U) < 0x10000U // NT4SP1 HAL
4180 //|| (pInstrGC - 0x80010000U) < 0x5000U // NT4SP1 HAL
4181 //|| (pInstrGC - 0x80013000U) < 0x2000U // NT4SP1 HAL
4182 //|| (pInstrGC - 0x80014000U) < 0x1000U // NT4SP1 HAL
4183 //|| (pInstrGC - 0x80014000U) < 0x800U // NT4SP1 HAL
4184 //|| (pInstrGC - 0x80014400U) < 0x400U // NT4SP1 HAL
4185 //|| (pInstrGC - 0x80014400U) < 0x200U // NT4SP1 HAL
4186 //|| (pInstrGC - 0x80014400U) < 0x100U // NT4SP1 HAL
4187 //|| (pInstrGC - 0x80014500U) < 0x100U // NT4SP1 HAL - negative
4188 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4189 //|| (pInstrGC - 0x80014400U) < 0x80U // NT4SP1 HAL
4190 //|| (pInstrGC - 0x80014440U) < 0x40U // NT4SP1 HAL
4191 //|| (pInstrGC - 0x80014440U) < 0x20U // NT4SP1 HAL
4192 || pInstrGC == 0x80014447 /* KfLowerIrql */
4193 || 0)
4194 {
4195 Log(("PATMR3InstallPatch: %08x is blacklisted\n", pInstrGC));
4196 return VERR_PATCHING_REFUSED;
4197 }
4198#endif
4199
4200 /* Make sure the code selector is wide open; otherwise refuse. */
4201 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4202 if (CPUMGetGuestCPL(pVCpu) == 0)
4203 {
4204 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4205 if (pInstrGCFlat != pInstrGC)
4206 {
4207 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs.Sel, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4208 return VERR_PATCHING_REFUSED;
4209 }
4210 }
4211
4212 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4213 if (!(flags & PATMFL_GUEST_SPECIFIC))
4214 {
4215 /* New code. Make sure CSAM has a go at it first. */
4216 CSAMR3CheckCode(pVM, pInstrGC);
4217 }
4218
4219 /* Note: obsolete */
4220 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4221 && (flags & PATMFL_MMIO_ACCESS))
4222 {
4223 RTRCUINTPTR offset;
4224 void *pvPatchCoreOffset;
4225
4226 /* Find the patch record. */
4227 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4228 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4229 if (pvPatchCoreOffset == NULL)
4230 {
4231 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4232 return VERR_PATCH_NOT_FOUND; //fatal error
4233 }
4234 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4235
4236 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4237 }
4238
4239 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4240
4241 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4242 if (pPatchRec)
4243 {
4244 Assert(!(flags & PATMFL_TRAMPOLINE));
4245
4246 /* Hints about existing patches are ignored. */
4247 if (flags & PATMFL_INSTR_HINT)
4248 return VERR_PATCHING_REFUSED;
4249
4250 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4251 {
4252 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4253 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4254 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4255 }
4256
4257 if (pPatchRec->patch.uState == PATCH_DISABLED)
4258 {
4259 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4260 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4261 {
4262 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4263 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4264 }
4265 else
4266 Log(("Enabling patch %RRv again\n", pInstrGC));
4267
4268 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4269 rc = PATMR3EnablePatch(pVM, pInstrGC);
4270 if (RT_SUCCESS(rc))
4271 return VWRN_PATCH_ENABLED;
4272
4273 return rc;
4274 }
4275 if ( pPatchRec->patch.uState == PATCH_ENABLED
4276 || pPatchRec->patch.uState == PATCH_DIRTY)
4277 {
4278 /*
4279 * The patch might have been overwritten.
4280 */
4281 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4282 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4283 {
4284 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4285 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4286 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4287 {
4288 if (flags & PATMFL_IDTHANDLER)
4289 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4290
4291 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4292 }
4293 }
4294 rc = PATMR3RemovePatch(pVM, pInstrGC);
4295 if (RT_FAILURE(rc))
4296 return VERR_PATCHING_REFUSED;
4297 }
4298 else
4299 {
4300 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4301 /* already tried it once! */
4302 return VERR_PATCHING_REFUSED;
4303 }
4304 }
4305
4306 RTGCPHYS GCPhys;
4307 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4308 if (rc != VINF_SUCCESS)
4309 {
4310 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4311 return rc;
4312 }
4313 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4314 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4315 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4316 {
4317 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4318 return VERR_PATCHING_REFUSED;
4319 }
4320
4321 /* Initialize cache record for guest address translations. */
4322 bool fInserted;
4323 PATMP2GLOOKUPREC cacheRec;
4324 RT_ZERO(cacheRec);
4325
4326 pInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4327 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4328
4329 /* Allocate patch record. */
4330 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4331 if (RT_FAILURE(rc))
4332 {
4333 Log(("Out of memory!!!!\n"));
4334 return VERR_NO_MEMORY;
4335 }
4336 pPatchRec->Core.Key = pInstrGC;
4337 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4338 /* Insert patch record into the lookup tree. */
4339 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4340 Assert(fInserted);
4341
4342 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4343 pPatchRec->patch.flags = flags;
4344 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
4345 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4346
4347 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4348 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4349
4350 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4351 {
4352 /*
4353 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4354 */
4355 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4356 if (pPatchNear)
4357 {
4358 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4359 {
4360 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4361
4362 pPatchRec->patch.uState = PATCH_UNUSABLE;
4363 /*
4364 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4365 */
4366 return VERR_PATCHING_REFUSED;
4367 }
4368 }
4369 }
4370
4371 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4372 if (pPatchRec->patch.pTempInfo == 0)
4373 {
4374 Log(("Out of memory!!!!\n"));
4375 return VERR_NO_MEMORY;
4376 }
4377
4378 disret = patmR3DisInstrNoStrOpMode(pVM, &pPatchRec->patch, pInstrGC, NULL, PATMREAD_ORGCODE, &cpu, &cbInstr);
4379 if (disret == false)
4380 {
4381 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4382 return VERR_PATCHING_REFUSED;
4383 }
4384
4385 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
4386 if (cbInstr > MAX_INSTR_SIZE)
4387 return VERR_PATCHING_REFUSED;
4388
4389 pPatchRec->patch.cbPrivInstr = cbInstr;
4390 pPatchRec->patch.opcode = cpu.pCurInstr->uOpcode;
4391
4392 /* Restricted hinting for now. */
4393 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->uOpcode == OP_CLI);
4394
4395 /* Initialize cache record patch pointer. */
4396 cacheRec.pPatch = &pPatchRec->patch;
4397
4398 /* Allocate statistics slot */
4399 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4400 {
4401 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4402 }
4403 else
4404 {
4405 Log(("WARNING: Patch index wrap around!!\n"));
4406 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4407 }
4408
4409 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4410 {
4411 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4412 }
4413 else
4414 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4415 {
4416 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4417 }
4418 else
4419 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4420 {
4421 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4422 }
4423 else
4424 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4425 {
4426 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4427 }
4428 else
4429 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4430 {
4431 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4432 }
4433 else
4434 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4435 {
4436 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4437 }
4438 else
4439 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4440 {
4441 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4442 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4443
4444 rc = patmIdtHandler(pVM, pInstrGC, cbInstr, pPatchRec, &cacheRec);
4445#ifdef VBOX_WITH_STATISTICS
4446 if ( rc == VINF_SUCCESS
4447 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4448 {
4449 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4450 }
4451#endif
4452 }
4453 else
4454 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4455 {
4456 switch (cpu.pCurInstr->uOpcode)
4457 {
4458 case OP_SYSENTER:
4459 case OP_PUSH:
4460 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4461 if (rc == VINF_SUCCESS)
4462 {
4463 if (rc == VINF_SUCCESS)
4464 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4465 return rc;
4466 }
4467 break;
4468
4469 default:
4470 rc = VERR_NOT_IMPLEMENTED;
4471 break;
4472 }
4473 }
4474 else
4475 {
4476 switch (cpu.pCurInstr->uOpcode)
4477 {
4478 case OP_SYSENTER:
4479 rc = patmR3InstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4480 if (rc == VINF_SUCCESS)
4481 {
4482 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4483 return VINF_SUCCESS;
4484 }
4485 break;
4486
4487#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4488 case OP_JO:
4489 case OP_JNO:
4490 case OP_JC:
4491 case OP_JNC:
4492 case OP_JE:
4493 case OP_JNE:
4494 case OP_JBE:
4495 case OP_JNBE:
4496 case OP_JS:
4497 case OP_JNS:
4498 case OP_JP:
4499 case OP_JNP:
4500 case OP_JL:
4501 case OP_JNL:
4502 case OP_JLE:
4503 case OP_JNLE:
4504 case OP_JECXZ:
4505 case OP_LOOP:
4506 case OP_LOOPNE:
4507 case OP_LOOPE:
4508 case OP_JMP:
4509 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4510 {
4511 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4512 break;
4513 }
4514 return VERR_NOT_IMPLEMENTED;
4515#endif
4516
4517 case OP_PUSHF:
4518 case OP_CLI:
4519 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4520 rc = patmR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec);
4521 break;
4522
4523#ifndef VBOX_WITH_SAFE_STR
4524 case OP_STR:
4525#endif
4526 case OP_SGDT:
4527 case OP_SLDT:
4528 case OP_SIDT:
4529 case OP_CPUID:
4530 case OP_LSL:
4531 case OP_LAR:
4532 case OP_SMSW:
4533 case OP_VERW:
4534 case OP_VERR:
4535 case OP_IRET:
4536#ifdef VBOX_WITH_RAW_RING1
4537 case OP_MOV:
4538#endif
4539 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4540 break;
4541
4542 default:
4543 return VERR_NOT_IMPLEMENTED;
4544 }
4545 }
4546
4547 if (rc != VINF_SUCCESS)
4548 {
4549 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4550 {
4551 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4552 pPatchRec->patch.nrPatch2GuestRecs = 0;
4553 }
4554 pVM->patm.s.uCurrentPatchIdx--;
4555 }
4556 else
4557 {
4558 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4559 AssertRCReturn(rc, rc);
4560
4561 /* Keep track upper and lower boundaries of patched instructions */
4562 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4563 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4564 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4565 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4566
4567 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4568 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4569
4570 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4571 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4572
4573 rc = VINF_SUCCESS;
4574
4575 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4576 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4577 {
4578 rc = PATMR3DisablePatch(pVM, pInstrGC);
4579 AssertRCReturn(rc, rc);
4580 }
4581
4582#ifdef VBOX_WITH_STATISTICS
4583 /* Register statistics counter */
4584 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4585 {
4586 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4587 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4588#ifndef DEBUG_sandervl
4589 /* Full breakdown for the GUI. */
4590 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4591 "/PATM/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4592 STAMR3RegisterF(pVM, &pPatchRec->patch.pPatchBlockOffset,STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/offPatchBlock", pPatchRec->patch.pPrivInstrGC);
4593 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4594 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4595 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4596 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4597 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4598 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4599 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4600 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4601 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4602 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4603 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4604 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4605 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4606 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4607#endif
4608 }
4609#endif
4610
4611 /* Add debug symbol. */
4612 patmR3DbgAddPatch(pVM, pPatchRec);
4613 }
4614 /* Free leftover lock if any. */
4615 if (cacheRec.Lock.pvMap)
4616 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4617 return rc;
4618}
4619
4620/**
4621 * Query instruction size
4622 *
4623 * @returns VBox status code.
4624 * @param pVM Pointer to the VM.
4625 * @param pPatch Patch record
4626 * @param pInstrGC Instruction address
4627 */
4628static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4629{
4630 uint8_t *pInstrHC;
4631 PGMPAGEMAPLOCK Lock;
4632
4633 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4634 if (rc == VINF_SUCCESS)
4635 {
4636 DISCPUSTATE cpu;
4637 bool disret;
4638 uint32_t cbInstr;
4639
4640 disret = patmR3DisInstr(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE | PATMREAD_NOCHECK, &cpu, &cbInstr);
4641 PGMPhysReleasePageMappingLock(pVM, &Lock);
4642 if (disret)
4643 return cbInstr;
4644 }
4645 return 0;
4646}
4647
4648/**
4649 * Add patch to page record
4650 *
4651 * @returns VBox status code.
4652 * @param pVM Pointer to the VM.
4653 * @param pPage Page address
4654 * @param pPatch Patch record
4655 */
4656int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4657{
4658 PPATMPATCHPAGE pPatchPage;
4659 int rc;
4660
4661 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4662
4663 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4664 if (pPatchPage)
4665 {
4666 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4667 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4668 {
4669 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4670 PPATCHINFO *papPatchOld = pPatchPage->papPatch;
4671
4672 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4673 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH,
4674 (void **)&pPatchPage->papPatch);
4675 if (RT_FAILURE(rc))
4676 {
4677 Log(("Out of memory!!!!\n"));
4678 return VERR_NO_MEMORY;
4679 }
4680 memcpy(pPatchPage->papPatch, papPatchOld, cMaxPatchesOld * sizeof(pPatchPage->papPatch[0]));
4681 MMHyperFree(pVM, papPatchOld);
4682 }
4683 pPatchPage->papPatch[pPatchPage->cCount] = pPatch;
4684 pPatchPage->cCount++;
4685 }
4686 else
4687 {
4688 bool fInserted;
4689
4690 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4691 if (RT_FAILURE(rc))
4692 {
4693 Log(("Out of memory!!!!\n"));
4694 return VERR_NO_MEMORY;
4695 }
4696 pPatchPage->Core.Key = pPage;
4697 pPatchPage->cCount = 1;
4698 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4699
4700 rc = MMHyperAlloc(pVM, sizeof(pPatchPage->papPatch[0]) * PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH,
4701 (void **)&pPatchPage->papPatch);
4702 if (RT_FAILURE(rc))
4703 {
4704 Log(("Out of memory!!!!\n"));
4705 MMHyperFree(pVM, pPatchPage);
4706 return VERR_NO_MEMORY;
4707 }
4708 pPatchPage->papPatch[0] = pPatch;
4709
4710 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4711 Assert(fInserted);
4712 pVM->patm.s.cPageRecords++;
4713
4714 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4715 }
4716 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4717
4718 /* Get the closest guest instruction (from below) */
4719 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4720 Assert(pGuestToPatchRec);
4721 if (pGuestToPatchRec)
4722 {
4723 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4724 if ( pPatchPage->pLowestAddrGC == 0
4725 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4726 {
4727 RTRCUINTPTR offset;
4728
4729 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4730
4731 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4732 /* If we're too close to the page boundary, then make sure an
4733 instruction from the previous page doesn't cross the
4734 boundary itself. */
4735 if (offset && offset < MAX_INSTR_SIZE)
4736 {
4737 /* Get the closest guest instruction (from above) */
4738 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4739
4740 if (pGuestToPatchRec)
4741 {
4742 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4743 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4744 {
4745 pPatchPage->pLowestAddrGC = pPage;
4746 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4747 }
4748 }
4749 }
4750 }
4751 }
4752
4753 /* Get the closest guest instruction (from above) */
4754 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4755 Assert(pGuestToPatchRec);
4756 if (pGuestToPatchRec)
4757 {
4758 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4759 if ( pPatchPage->pHighestAddrGC == 0
4760 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4761 {
4762 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4763 /* Increase by instruction size. */
4764 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4765//// Assert(size);
4766 pPatchPage->pHighestAddrGC += size;
4767 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4768 }
4769 }
4770
4771 return VINF_SUCCESS;
4772}
4773
4774/**
4775 * Remove patch from page record
4776 *
4777 * @returns VBox status code.
4778 * @param pVM Pointer to the VM.
4779 * @param pPage Page address
4780 * @param pPatch Patch record
4781 */
4782int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4783{
4784 PPATMPATCHPAGE pPatchPage;
4785 int rc;
4786
4787 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4788 Assert(pPatchPage);
4789
4790 if (!pPatchPage)
4791 return VERR_INVALID_PARAMETER;
4792
4793 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4794
4795 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4796 if (pPatchPage->cCount > 1)
4797 {
4798 uint32_t i;
4799
4800 /* Used by multiple patches */
4801 for (i = 0; i < pPatchPage->cCount; i++)
4802 {
4803 if (pPatchPage->papPatch[i] == pPatch)
4804 {
4805 /* close the gap between the remaining pointers. */
4806 uint32_t cNew = --pPatchPage->cCount;
4807 if (i < cNew)
4808 pPatchPage->papPatch[i] = pPatchPage->papPatch[cNew];
4809 pPatchPage->papPatch[cNew] = NULL;
4810 return VINF_SUCCESS;
4811 }
4812 }
4813 AssertMsgFailed(("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4814 }
4815 else
4816 {
4817 PPATMPATCHPAGE pPatchNode;
4818
4819 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4820
4821 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4822 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4823 Assert(pPatchNode && pPatchNode == pPatchPage);
4824
4825 Assert(pPatchPage->papPatch);
4826 rc = MMHyperFree(pVM, pPatchPage->papPatch);
4827 AssertRC(rc);
4828 rc = MMHyperFree(pVM, pPatchPage);
4829 AssertRC(rc);
4830 pVM->patm.s.cPageRecords--;
4831 }
4832 return VINF_SUCCESS;
4833}
4834
4835/**
4836 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4837 *
4838 * @returns VBox status code.
4839 * @param pVM Pointer to the VM.
4840 * @param pPatch Patch record
4841 */
4842int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4843{
4844 int rc;
4845 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4846
4847 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4848 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4849 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4850
4851 /** @todo optimize better (large gaps between current and next used page) */
4852 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4853 {
4854 /* Get the closest guest instruction (from above) */
4855 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4856 if ( pGuestToPatchRec
4857 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4858 )
4859 {
4860 /* Code in page really patched -> add record */
4861 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4862 AssertRC(rc);
4863 }
4864 }
4865 pPatch->flags |= PATMFL_CODE_MONITORED;
4866 return VINF_SUCCESS;
4867}
4868
4869/**
4870 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4871 *
4872 * @returns VBox status code.
4873 * @param pVM Pointer to the VM.
4874 * @param pPatch Patch record
4875 */
4876static int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4877{
4878 int rc;
4879 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4880
4881 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4882 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4883 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4884
4885 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4886 {
4887 /* Get the closest guest instruction (from above) */
4888 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4889 if ( pGuestToPatchRec
4890 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4891 )
4892 {
4893 /* Code in page really patched -> remove record */
4894 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4895 AssertRC(rc);
4896 }
4897 }
4898 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4899 return VINF_SUCCESS;
4900}
4901
4902/**
4903 * Notifies PATM about a (potential) write to code that has been patched.
4904 *
4905 * @returns VBox status code.
4906 * @param pVM Pointer to the VM.
4907 * @param GCPtr GC pointer to write address
4908 * @param cbWrite Nr of bytes to write
4909 *
4910 */
4911VMMR3_INT_DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4912{
4913 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4914
4915 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4916
4917 Assert(VM_IS_EMT(pVM));
4918 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
4919
4920 /* Quick boundary check */
4921 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4922 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4923 )
4924 return VINF_SUCCESS;
4925
4926 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4927
4928 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4929 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4930
4931 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4932 {
4933loop_start:
4934 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4935 if (pPatchPage)
4936 {
4937 uint32_t i;
4938 bool fValidPatchWrite = false;
4939
4940 /* Quick check to see if the write is in the patched part of the page */
4941 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4942 || pPatchPage->pHighestAddrGC < GCPtr)
4943 {
4944 break;
4945 }
4946
4947 for (i=0;i<pPatchPage->cCount;i++)
4948 {
4949 if (pPatchPage->papPatch[i])
4950 {
4951 PPATCHINFO pPatch = pPatchPage->papPatch[i];
4952 RTRCPTR pPatchInstrGC;
4953 //unused: bool fForceBreak = false;
4954
4955 Assert(pPatchPage->papPatch[i]->flags & PATMFL_CODE_MONITORED);
4956 /** @todo inefficient and includes redundant checks for multiple pages. */
4957 for (uint32_t j=0; j<cbWrite; j++)
4958 {
4959 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4960
4961 if ( pPatch->cbPatchJump
4962 && pGuestPtrGC >= pPatch->pPrivInstrGC
4963 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4964 {
4965 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4966 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4967 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4968 if (rc == VINF_SUCCESS)
4969 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4970 goto loop_start;
4971
4972 continue;
4973 }
4974
4975 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4976 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4977 if (!pPatchInstrGC)
4978 {
4979 RTRCPTR pClosestInstrGC;
4980 uint32_t size;
4981
4982 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4983 if (pPatchInstrGC)
4984 {
4985 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4986 Assert(pClosestInstrGC <= pGuestPtrGC);
4987 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4988 /* Check if this is not a write into a gap between two patches */
4989 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4990 pPatchInstrGC = 0;
4991 }
4992 }
4993 if (pPatchInstrGC)
4994 {
4995 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4996
4997 fValidPatchWrite = true;
4998
4999 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
5000 Assert(pPatchToGuestRec);
5001 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
5002 {
5003 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
5004
5005 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
5006 {
5007 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
5008
5009 patmR3MarkDirtyPatch(pVM, pPatch);
5010
5011 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
5012 goto loop_start;
5013 }
5014 else
5015 {
5016 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
5017 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
5018
5019 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
5020 pPatchToGuestRec->fDirty = true;
5021
5022 *pInstrHC = 0xCC;
5023
5024 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
5025 }
5026 }
5027 /* else already marked dirty */
5028 }
5029 }
5030 }
5031 } /* for each patch */
5032
5033 if (fValidPatchWrite == false)
5034 {
5035 /* Write to a part of the page that either:
5036 * - doesn't contain any code (shared code/data); rather unlikely
5037 * - old code page that's no longer in active use.
5038 */
5039invalid_write_loop_start:
5040 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
5041
5042 if (pPatchPage)
5043 {
5044 for (i=0;i<pPatchPage->cCount;i++)
5045 {
5046 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5047
5048 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
5049 {
5050 /* Note: possibly dangerous assumption that all future writes will be harmless. */
5051 if (pPatch->flags & PATMFL_IDTHANDLER)
5052 {
5053 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5054
5055 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
5056 int rc = patmRemovePatchPages(pVM, pPatch);
5057 AssertRC(rc);
5058 }
5059 else
5060 {
5061 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
5062 patmR3MarkDirtyPatch(pVM, pPatch);
5063 }
5064 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
5065 goto invalid_write_loop_start;
5066 }
5067 } /* for */
5068 }
5069 }
5070 }
5071 }
5072 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
5073 return VINF_SUCCESS;
5074
5075}
5076
5077/**
5078 * Disable all patches in a flushed page
5079 *
5080 * @returns VBox status code
5081 * @param pVM Pointer to the VM.
5082 * @param addr GC address of the page to flush
5083 * @note Currently only called by CSAMR3FlushPage; optimization to avoid
5084 * having to double check if the physical address has changed
5085 */
5086VMMR3_INT_DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
5087{
5088 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5089
5090 addr &= PAGE_BASE_GC_MASK;
5091
5092 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
5093 if (pPatchPage)
5094 {
5095 int i;
5096
5097 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
5098 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
5099 {
5100 if (pPatchPage->papPatch[i])
5101 {
5102 PPATCHINFO pPatch = pPatchPage->papPatch[i];
5103
5104 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
5105 patmR3MarkDirtyPatch(pVM, pPatch);
5106 }
5107 }
5108 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
5109 }
5110 return VINF_SUCCESS;
5111}
5112
5113/**
5114 * Checks if the instructions at the specified address has been patched already.
5115 *
5116 * @returns boolean, patched or not
5117 * @param pVM Pointer to the VM.
5118 * @param pInstrGC Guest context pointer to instruction
5119 */
5120VMMR3_INT_DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
5121{
5122 Assert(!HMIsEnabled(pVM));
5123 PPATMPATCHREC pPatchRec;
5124 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5125 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
5126 return true;
5127 return false;
5128}
5129
5130/**
5131 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
5132 *
5133 * @returns VBox status code.
5134 * @param pVM Pointer to the VM.
5135 * @param pInstrGC GC address of instr
5136 * @param pByte opcode byte pointer (OUT)
5137 *
5138 */
5139VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
5140{
5141 PPATMPATCHREC pPatchRec;
5142
5143 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5144
5145 /* Shortcut. */
5146 if (!PATMIsEnabled(pVM))
5147 return VERR_PATCH_NOT_FOUND;
5148 Assert(!HMIsEnabled(pVM));
5149 if ( pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5150 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5151 return VERR_PATCH_NOT_FOUND;
5152
5153 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5154 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5155 if ( pPatchRec
5156 && pPatchRec->patch.uState == PATCH_ENABLED
5157 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5158 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5159 {
5160 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5161 *pByte = pPatchRec->patch.aPrivInstr[offset];
5162
5163 if (pPatchRec->patch.cbPatchJump == 1)
5164 {
5165 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5166 }
5167 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5168 return VINF_SUCCESS;
5169 }
5170 return VERR_PATCH_NOT_FOUND;
5171}
5172
5173/**
5174 * Read instruction bytes of the original code that was overwritten by the 5
5175 * bytes patch jump.
5176 *
5177 * @returns VINF_SUCCESS or VERR_PATCH_NOT_FOUND.
5178 * @param pVM Pointer to the VM.
5179 * @param GCPtrInstr GC address of instr
5180 * @param pbDst The output buffer.
5181 * @param cbToRead The maximum number bytes to read.
5182 * @param pcbRead Where to return the acutal number of bytes read.
5183 */
5184VMMR3_INT_DECL(int) PATMR3ReadOrgInstr(PVM pVM, RTGCPTR32 GCPtrInstr, uint8_t *pbDst, size_t cbToRead, size_t *pcbRead)
5185{
5186 /* Shortcut. */
5187 if (!PATMIsEnabled(pVM))
5188 return VERR_PATCH_NOT_FOUND;
5189 Assert(!HMIsEnabled(pVM));
5190 if ( GCPtrInstr < pVM->patm.s.pPatchedInstrGCLowest
5191 || GCPtrInstr > pVM->patm.s.pPatchedInstrGCHighest)
5192 return VERR_PATCH_NOT_FOUND;
5193
5194 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5195
5196 /*
5197 * If the patch is enabled and the pointer lies within 5 bytes of this
5198 * priv instr ptr, then we've got a hit!
5199 */
5200 RTGCPTR32 off;
5201 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree,
5202 GCPtrInstr, false /*fAbove*/);
5203 if ( pPatchRec
5204 && pPatchRec->patch.uState == PATCH_ENABLED
5205 && (off = GCPtrInstr - pPatchRec->patch.pPrivInstrGC) < pPatchRec->patch.cbPatchJump)
5206 {
5207 uint8_t const *pbSrc = &pPatchRec->patch.aPrivInstr[off];
5208 uint32_t const cbMax = pPatchRec->patch.cbPatchJump - off;
5209 if (cbToRead > cbMax)
5210 cbToRead = cbMax;
5211 switch (cbToRead)
5212 {
5213 case 5: pbDst[4] = pbSrc[4];
5214 case 4: pbDst[3] = pbSrc[3];
5215 case 3: pbDst[2] = pbSrc[2];
5216 case 2: pbDst[1] = pbSrc[1];
5217 case 1: pbDst[0] = pbSrc[0];
5218 break;
5219 default:
5220 memcpy(pbDst, pbSrc, cbToRead);
5221 }
5222 *pcbRead = cbToRead;
5223
5224 if (pPatchRec->patch.cbPatchJump == 1)
5225 Log(("PATMR3ReadOrgInstr: returning opcode %.*Rhxs for instruction at %RX32\n", cbToRead, pbSrc, GCPtrInstr));
5226 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5227 return VINF_SUCCESS;
5228 }
5229
5230 return VERR_PATCH_NOT_FOUND;
5231}
5232
5233/**
5234 * Disable patch for privileged instruction at specified location
5235 *
5236 * @returns VBox status code.
5237 * @param pVM Pointer to the VM.
5238 * @param pInstr Guest context point to privileged instruction
5239 *
5240 * @note returns failure if patching is not allowed or possible
5241 *
5242 */
5243VMMR3_INT_DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5244{
5245 PPATMPATCHREC pPatchRec;
5246 PPATCHINFO pPatch;
5247
5248 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5249 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5250 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5251 if (pPatchRec)
5252 {
5253 int rc = VINF_SUCCESS;
5254
5255 pPatch = &pPatchRec->patch;
5256
5257 /* Already disabled? */
5258 if (pPatch->uState == PATCH_DISABLED)
5259 return VINF_SUCCESS;
5260
5261 /* Clear the IDT entries for the patch we're disabling. */
5262 /* Note: very important as we clear IF in the patch itself */
5263 /** @todo this needs to be changed */
5264 if (pPatch->flags & PATMFL_IDTHANDLER)
5265 {
5266 uint32_t iGate;
5267
5268 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5269 if (iGate != (uint32_t)~0)
5270 {
5271 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5272 if (++cIDTHandlersDisabled < 256)
5273 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5274 }
5275 }
5276
5277 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5278 if ( pPatch->pPatchBlockOffset
5279 && pPatch->uState == PATCH_ENABLED)
5280 {
5281 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5282 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5283 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5284 }
5285
5286 /* IDT or function patches haven't changed any guest code. */
5287 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5288 {
5289 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5290 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5291
5292 if (pPatch->uState != PATCH_REFUSED)
5293 {
5294 uint8_t temp[16];
5295
5296 Assert(pPatch->cbPatchJump < sizeof(temp));
5297
5298 /* Let's first check if the guest code is still the same. */
5299 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5300 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5301 if (rc == VINF_SUCCESS)
5302 {
5303 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5304
5305 if ( temp[0] != 0xE9 /* jmp opcode */
5306 || *(RTRCINTPTR *)(&temp[1]) != displ
5307 )
5308 {
5309 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5310 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5311 /* Remove it completely */
5312 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5313 rc = PATMR3RemovePatch(pVM, pInstrGC);
5314 AssertRC(rc);
5315 return VWRN_PATCH_REMOVED;
5316 }
5317 patmRemoveJumpToPatch(pVM, pPatch);
5318 }
5319 else
5320 {
5321 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5322 pPatch->uState = PATCH_DISABLE_PENDING;
5323 }
5324 }
5325 else
5326 {
5327 AssertMsgFailed(("Patch was refused!\n"));
5328 return VERR_PATCH_ALREADY_DISABLED;
5329 }
5330 }
5331 else
5332 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5333 {
5334 uint8_t temp[16];
5335
5336 Assert(pPatch->cbPatchJump < sizeof(temp));
5337
5338 /* Let's first check if the guest code is still the same. */
5339 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5340 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5341 if (rc == VINF_SUCCESS)
5342 {
5343 if (temp[0] != 0xCC)
5344 {
5345 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5346 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5347 /* Remove it completely */
5348 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5349 rc = PATMR3RemovePatch(pVM, pInstrGC);
5350 AssertRC(rc);
5351 return VWRN_PATCH_REMOVED;
5352 }
5353 patmDeactivateInt3Patch(pVM, pPatch);
5354 }
5355 }
5356
5357 if (rc == VINF_SUCCESS)
5358 {
5359 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5360 if (pPatch->uState == PATCH_DISABLE_PENDING)
5361 {
5362 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5363 pPatch->uState = PATCH_UNUSABLE;
5364 }
5365 else
5366 if (pPatch->uState != PATCH_DIRTY)
5367 {
5368 pPatch->uOldState = pPatch->uState;
5369 pPatch->uState = PATCH_DISABLED;
5370 }
5371 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5372 }
5373
5374 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5375 return VINF_SUCCESS;
5376 }
5377 Log(("Patch not found!\n"));
5378 return VERR_PATCH_NOT_FOUND;
5379}
5380
5381/**
5382 * Permanently disable patch for privileged instruction at specified location
5383 *
5384 * @returns VBox status code.
5385 * @param pVM Pointer to the VM.
5386 * @param pInstr Guest context instruction pointer
5387 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5388 * @param pConflictPatch Conflicting patch
5389 *
5390 */
5391static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5392{
5393 NOREF(pConflictAddr);
5394#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5395 PATCHINFO patch;
5396 DISCPUSTATE cpu;
5397 R3PTRTYPE(uint8_t *) pInstrHC;
5398 uint32_t cbInstr;
5399 bool disret;
5400 int rc;
5401
5402 RT_ZERO(patch);
5403 pInstrHC = patmR3GCVirtToHCVirt(pVM, &patch, pInstrGC);
5404 disret = patmR3DisInstr(pVM, &patch, pInstrGC, pInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
5405 /*
5406 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5407 * with one that jumps right into the conflict patch.
5408 * Otherwise we must disable the conflicting patch to avoid serious problems.
5409 */
5410 if ( disret == true
5411 && (pConflictPatch->flags & PATMFL_CODE32)
5412 && (cpu.pCurInstr->uOpcode == OP_JMP || (cpu.pCurInstr->fOpType & DISOPTYPE_COND_CONTROLFLOW))
5413 && (cpu.Param1.fUse & DISUSE_IMMEDIATE32_REL))
5414 {
5415 /* Hint patches must be enabled first. */
5416 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5417 {
5418 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5419 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5420 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5421 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5422 /* Enabling might fail if the patched code has changed in the meantime. */
5423 if (rc != VINF_SUCCESS)
5424 return rc;
5425 }
5426
5427 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5428 if (RT_SUCCESS(rc))
5429 {
5430 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5431 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5432 return VINF_SUCCESS;
5433 }
5434 }
5435#endif
5436
5437 if (pConflictPatch->opcode == OP_CLI)
5438 {
5439 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5440 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5441 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5442 if (rc == VWRN_PATCH_REMOVED)
5443 return VINF_SUCCESS;
5444 if (RT_SUCCESS(rc))
5445 {
5446 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5447 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5448 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5449 if (rc == VERR_PATCH_NOT_FOUND)
5450 return VINF_SUCCESS; /* removed already */
5451
5452 AssertRC(rc);
5453 if (RT_SUCCESS(rc))
5454 {
5455 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5456 return VINF_SUCCESS;
5457 }
5458 }
5459 /* else turned into unusable patch (see below) */
5460 }
5461 else
5462 {
5463 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5464 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5465 if (rc == VWRN_PATCH_REMOVED)
5466 return VINF_SUCCESS;
5467 }
5468
5469 /* No need to monitor the code anymore. */
5470 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5471 {
5472 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5473 AssertRC(rc);
5474 }
5475 pConflictPatch->uState = PATCH_UNUSABLE;
5476 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5477 return VERR_PATCH_DISABLED;
5478}
5479
5480/**
5481 * Enable patch for privileged instruction at specified location
5482 *
5483 * @returns VBox status code.
5484 * @param pVM Pointer to the VM.
5485 * @param pInstr Guest context point to privileged instruction
5486 *
5487 * @note returns failure if patching is not allowed or possible
5488 *
5489 */
5490VMMR3_INT_DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5491{
5492 PPATMPATCHREC pPatchRec;
5493 PPATCHINFO pPatch;
5494
5495 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5496 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5497 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5498 if (pPatchRec)
5499 {
5500 int rc = VINF_SUCCESS;
5501
5502 pPatch = &pPatchRec->patch;
5503
5504 if (pPatch->uState == PATCH_DISABLED)
5505 {
5506 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5507 {
5508 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5509 uint8_t temp[16];
5510
5511 Assert(pPatch->cbPatchJump < sizeof(temp));
5512
5513 /* Let's first check if the guest code is still the same. */
5514 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5515 AssertRC(rc2);
5516 if (rc2 == VINF_SUCCESS)
5517 {
5518 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5519 {
5520 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5521 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5522 /* Remove it completely */
5523 rc = PATMR3RemovePatch(pVM, pInstrGC);
5524 AssertRC(rc);
5525 return VERR_PATCH_NOT_FOUND;
5526 }
5527
5528 PATMP2GLOOKUPREC cacheRec;
5529 RT_ZERO(cacheRec);
5530 cacheRec.pPatch = pPatch;
5531
5532 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5533 /* Free leftover lock if any. */
5534 if (cacheRec.Lock.pvMap)
5535 {
5536 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5537 cacheRec.Lock.pvMap = NULL;
5538 }
5539 AssertRC(rc2);
5540 if (RT_FAILURE(rc2))
5541 return rc2;
5542
5543#ifdef DEBUG
5544 {
5545 DISCPUSTATE cpu;
5546 char szOutput[256];
5547 uint32_t cbInstr;
5548 uint32_t i = 0;
5549 bool disret;
5550 while(i < pPatch->cbPatchJump)
5551 {
5552 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
5553 &cpu, &cbInstr, szOutput, sizeof(szOutput));
5554 Log(("Renewed patch instr: %s", szOutput));
5555 i += cbInstr;
5556 }
5557 }
5558#endif
5559 }
5560 }
5561 else
5562 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5563 {
5564 uint8_t temp[16];
5565
5566 Assert(pPatch->cbPatchJump < sizeof(temp));
5567
5568 /* Let's first check if the guest code is still the same. */
5569 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5570 AssertRC(rc2);
5571
5572 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5573 {
5574 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5575 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5576 rc = PATMR3RemovePatch(pVM, pInstrGC);
5577 AssertRC(rc);
5578 return VERR_PATCH_NOT_FOUND;
5579 }
5580
5581 rc2 = patmActivateInt3Patch(pVM, pPatch);
5582 if (RT_FAILURE(rc2))
5583 return rc2;
5584 }
5585
5586 pPatch->uState = pPatch->uOldState; //restore state
5587
5588 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5589 if (pPatch->pPatchBlockOffset)
5590 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5591
5592 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5593 }
5594 else
5595 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5596
5597 return rc;
5598 }
5599 return VERR_PATCH_NOT_FOUND;
5600}
5601
5602/**
5603 * Remove patch for privileged instruction at specified location
5604 *
5605 * @returns VBox status code.
5606 * @param pVM Pointer to the VM.
5607 * @param pPatchRec Patch record
5608 * @param fForceRemove Remove *all* patches
5609 */
5610int patmR3RemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5611{
5612 PPATCHINFO pPatch;
5613
5614 pPatch = &pPatchRec->patch;
5615
5616 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5617 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5618 {
5619 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5620 return VERR_ACCESS_DENIED;
5621 }
5622 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5623
5624 /* Note: NEVER EVER REUSE PATCH MEMORY */
5625 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5626
5627 if (pPatchRec->patch.pPatchBlockOffset)
5628 {
5629 PAVLOU32NODECORE pNode;
5630
5631 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5632 Assert(pNode);
5633 }
5634
5635 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5636 {
5637 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5638 AssertRC(rc);
5639 }
5640
5641#ifdef VBOX_WITH_STATISTICS
5642 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5643 {
5644 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
5645 STAMR3DeregisterF(pVM->pUVM, "/PATM/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
5646 }
5647#endif
5648
5649 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5650 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5651 pPatch->nrPatch2GuestRecs = 0;
5652 Assert(pPatch->Patch2GuestAddrTree == 0);
5653
5654 patmEmptyTree(pVM, &pPatch->FixupTree);
5655 pPatch->nrFixups = 0;
5656 Assert(pPatch->FixupTree == 0);
5657
5658 if (pPatchRec->patch.pTempInfo)
5659 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5660
5661 /* Note: might fail, because it has already been removed (e.g. during reset). */
5662 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5663
5664 /* Free the patch record */
5665 MMHyperFree(pVM, pPatchRec);
5666 return VINF_SUCCESS;
5667}
5668
5669/**
5670 * RTAvlU32DoWithAll() worker.
5671 * Checks whether the current trampoline instruction is the jump to the target patch
5672 * and updates the displacement to jump to the new target.
5673 *
5674 * @returns VBox status code.
5675 * @retval VERR_ALREADY_EXISTS if the jump was found.
5676 * @param pNode The current patch to guest record to check.
5677 * @param pvUser The refresh state.
5678 */
5679static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5680{
5681 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5682 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5683 PVM pVM = pRefreshPatchState->pVM;
5684
5685 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5686
5687 /*
5688 * Check if the patch instruction starts with a jump.
5689 * ASSUMES that there is no other patch to guest record that starts
5690 * with a jump.
5691 */
5692 if (*pPatchInstr == 0xE9)
5693 {
5694 /* Jump found, update the displacement. */
5695 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5696 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5697 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5698
5699 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5700 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5701
5702 *(uint32_t *)&pPatchInstr[1] = displ;
5703 return VERR_ALREADY_EXISTS; /** @todo better return code */
5704 }
5705
5706 return VINF_SUCCESS;
5707}
5708
5709/**
5710 * Attempt to refresh the patch by recompiling its entire code block
5711 *
5712 * @returns VBox status code.
5713 * @param pVM Pointer to the VM.
5714 * @param pPatchRec Patch record
5715 */
5716int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5717{
5718 PPATCHINFO pPatch;
5719 int rc;
5720 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5721 PTRAMPREC pTrampolinePatchesHead = NULL;
5722
5723 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5724
5725 pPatch = &pPatchRec->patch;
5726 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5727 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5728 {
5729 if (!pPatch->pTrampolinePatchesHead)
5730 {
5731 /*
5732 * It is sometimes possible that there are trampoline patches to this patch
5733 * but they are not recorded (after a saved state load for example).
5734 * Refuse to refresh those patches.
5735 * Can hurt performance in theory if the patched code is modified by the guest
5736 * and is executed often. However most of the time states are saved after the guest
5737 * code was modified and is not updated anymore afterwards so this shouldn't be a
5738 * big problem.
5739 */
5740 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5741 return VERR_PATCHING_REFUSED;
5742 }
5743 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5744 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5745 }
5746
5747 /* Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5748
5749 rc = PATMR3DisablePatch(pVM, pInstrGC);
5750 AssertRC(rc);
5751
5752 /* Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5753 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5754#ifdef VBOX_WITH_STATISTICS
5755 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5756 {
5757 STAMR3DeregisterF(pVM->pUVM, "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
5758 STAMR3DeregisterF(pVM->pUVM, "/PATM/PatchBD/0x%RRv*", pPatchRec->patch.pPrivInstrGC);
5759 }
5760#endif
5761
5762 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5763
5764 /* Attempt to install a new patch. */
5765 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5766 if (RT_SUCCESS(rc))
5767 {
5768 RTRCPTR pPatchTargetGC;
5769 PPATMPATCHREC pNewPatchRec;
5770
5771 /* Determine target address in new patch */
5772 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5773 Assert(pPatchTargetGC);
5774 if (!pPatchTargetGC)
5775 {
5776 rc = VERR_PATCHING_REFUSED;
5777 goto failure;
5778 }
5779
5780 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5781 pPatch->uCurPatchOffset = 0;
5782
5783 /* insert jump to new patch in old patch block */
5784 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5785 if (RT_FAILURE(rc))
5786 goto failure;
5787
5788 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5789 Assert(pNewPatchRec); /* can't fail */
5790
5791 /* Remove old patch (only do that when everything is finished) */
5792 int rc2 = patmR3RemovePatch(pVM, pPatchRec, true /* force removal */);
5793 AssertRC(rc2);
5794
5795 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5796 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5797 Assert(fInserted); NOREF(fInserted);
5798
5799 Log(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5800 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5801
5802 /* Used by another patch, so don't remove it! */
5803 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5804
5805 if (pTrampolinePatchesHead)
5806 {
5807 /* Update all trampoline patches to jump to the new patch. */
5808 PTRAMPREC pTrampRec = NULL;
5809 PATMREFRESHPATCH RefreshPatch;
5810
5811 RefreshPatch.pVM = pVM;
5812 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5813
5814 pTrampRec = pTrampolinePatchesHead;
5815
5816 while (pTrampRec)
5817 {
5818 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5819
5820 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5821 /*
5822 * We have to find the right patch2guest record because there might be others
5823 * for statistics.
5824 */
5825 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5826 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5827 Assert(rc == VERR_ALREADY_EXISTS);
5828 rc = VINF_SUCCESS;
5829 pTrampRec = pTrampRec->pNext;
5830 }
5831 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5832 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5833 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5834 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5835 }
5836 }
5837
5838failure:
5839 if (RT_FAILURE(rc))
5840 {
5841 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5842
5843 /* Remove the new inactive patch */
5844 rc = PATMR3RemovePatch(pVM, pInstrGC);
5845 AssertRC(rc);
5846
5847 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5848 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5849 Assert(fInserted); NOREF(fInserted);
5850
5851 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5852 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5853 AssertRC(rc2);
5854
5855 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5856 }
5857 return rc;
5858}
5859
5860/**
5861 * Find patch for privileged instruction at specified location
5862 *
5863 * @returns Patch structure pointer if found; else NULL
5864 * @param pVM Pointer to the VM.
5865 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5866 * @param fIncludeHints Include hinted patches or not
5867 *
5868 */
5869PPATCHINFO patmFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5870{
5871 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5872 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5873 if (pPatchRec)
5874 {
5875 if ( pPatchRec->patch.uState == PATCH_ENABLED
5876 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5877 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5878 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5879 {
5880 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5881 return &pPatchRec->patch;
5882 }
5883 else
5884 if ( fIncludeHints
5885 && pPatchRec->patch.uState == PATCH_DISABLED
5886 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5887 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5888 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5889 {
5890 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5891 return &pPatchRec->patch;
5892 }
5893 }
5894 return NULL;
5895}
5896
5897/**
5898 * Checks whether the GC address is inside a generated patch jump
5899 *
5900 * @returns true -> yes, false -> no
5901 * @param pVM Pointer to the VM.
5902 * @param pAddr Guest context address.
5903 * @param pPatchAddr Guest context patch address (if true).
5904 */
5905VMMR3_INT_DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5906{
5907 RTRCPTR addr;
5908 PPATCHINFO pPatch;
5909
5910 Assert(!HMIsEnabled(pVM));
5911 if (PATMIsEnabled(pVM) == false)
5912 return false;
5913
5914 if (pPatchAddr == NULL)
5915 pPatchAddr = &addr;
5916
5917 *pPatchAddr = 0;
5918
5919 pPatch = patmFindActivePatchByEntrypoint(pVM, pAddr);
5920 if (pPatch)
5921 *pPatchAddr = pPatch->pPrivInstrGC;
5922
5923 return *pPatchAddr == 0 ? false : true;
5924}
5925
5926/**
5927 * Remove patch for privileged instruction at specified location
5928 *
5929 * @returns VBox status code.
5930 * @param pVM Pointer to the VM.
5931 * @param pInstr Guest context point to privileged instruction
5932 *
5933 * @note returns failure if patching is not allowed or possible
5934 *
5935 */
5936VMMR3_INT_DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5937{
5938 PPATMPATCHREC pPatchRec;
5939
5940 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
5941 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5942 if (pPatchRec)
5943 {
5944 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5945 if (rc == VWRN_PATCH_REMOVED)
5946 return VINF_SUCCESS;
5947
5948 return patmR3RemovePatch(pVM, pPatchRec, false);
5949 }
5950 AssertFailed();
5951 return VERR_PATCH_NOT_FOUND;
5952}
5953
5954/**
5955 * Mark patch as dirty
5956 *
5957 * @returns VBox status code.
5958 * @param pVM Pointer to the VM.
5959 * @param pPatch Patch record
5960 *
5961 * @note returns failure if patching is not allowed or possible
5962 *
5963 */
5964static int patmR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5965{
5966 if (pPatch->pPatchBlockOffset)
5967 {
5968 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5969 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5970 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5971 }
5972
5973 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5974 /* Put back the replaced instruction. */
5975 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5976 if (rc == VWRN_PATCH_REMOVED)
5977 return VINF_SUCCESS;
5978
5979 /* Note: we don't restore patch pages for patches that are not enabled! */
5980 /* Note: be careful when changing this behaviour!! */
5981
5982 /* The patch pages are no longer marked for self-modifying code detection */
5983 if (pPatch->flags & PATMFL_CODE_MONITORED)
5984 {
5985 rc = patmRemovePatchPages(pVM, pPatch);
5986 AssertRCReturn(rc, rc);
5987 }
5988 pPatch->uState = PATCH_DIRTY;
5989
5990 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5991 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5992
5993 return VINF_SUCCESS;
5994}
5995
5996/**
5997 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5998 *
5999 * @returns VBox status code.
6000 * @param pVM Pointer to the VM.
6001 * @param pPatch Patch block structure pointer
6002 * @param pPatchGC GC address in patch block
6003 */
6004RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
6005{
6006 Assert(pPatch->Patch2GuestAddrTree);
6007 /* Get the closest record from below. */
6008 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6009 if (pPatchToGuestRec)
6010 return pPatchToGuestRec->pOrgInstrGC;
6011
6012 return 0;
6013}
6014
6015/**
6016 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6017 *
6018 * @returns corresponding GC pointer in patch block
6019 * @param pVM Pointer to the VM.
6020 * @param pPatch Current patch block pointer
6021 * @param pInstrGC Guest context pointer to privileged instruction
6022 *
6023 */
6024RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6025{
6026 if (pPatch->Guest2PatchAddrTree)
6027 {
6028 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
6029 if (pGuestToPatchRec)
6030 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6031 }
6032
6033 return 0;
6034}
6035
6036/**
6037 * Converts Guest code GC ptr to Patch code GC ptr (if found)
6038 *
6039 * @returns corresponding GC pointer in patch block
6040 * @param pVM Pointer to the VM.
6041 * @param pInstrGC Guest context pointer to privileged instruction
6042 */
6043static RTRCPTR patmR3GuestGCPtrToPatchGCPtrSimple(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
6044{
6045 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
6046 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
6047 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
6048 return NIL_RTRCPTR;
6049}
6050
6051/**
6052 * Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no
6053 * identical match)
6054 *
6055 * @returns corresponding GC pointer in patch block
6056 * @param pVM Pointer to the VM.
6057 * @param pPatch Current patch block pointer
6058 * @param pInstrGC Guest context pointer to privileged instruction
6059 *
6060 */
6061RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
6062{
6063 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
6064 if (pGuestToPatchRec)
6065 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
6066 return NIL_RTRCPTR;
6067}
6068
6069/**
6070 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
6071 *
6072 * @returns original GC instruction pointer or 0 if not found
6073 * @param pVM Pointer to the VM.
6074 * @param pPatchGC GC address in patch block
6075 * @param pEnmState State of the translated address (out)
6076 *
6077 */
6078VMMR3_INT_DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
6079{
6080 PPATMPATCHREC pPatchRec;
6081 void *pvPatchCoreOffset;
6082 RTRCPTR pPrivInstrGC;
6083
6084 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
6085 Assert(!HMIsEnabled(pVM));
6086 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
6087 if (pvPatchCoreOffset == 0)
6088 {
6089 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
6090 return 0;
6091 }
6092 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6093 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
6094 if (pEnmState)
6095 {
6096 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
6097 || pPatchRec->patch.uState == PATCH_DIRTY
6098 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
6099 || pPatchRec->patch.uState == PATCH_UNUSABLE),
6100 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
6101
6102 if ( !pPrivInstrGC
6103 || pPatchRec->patch.uState == PATCH_UNUSABLE
6104 || pPatchRec->patch.uState == PATCH_REFUSED)
6105 {
6106 pPrivInstrGC = 0;
6107 *pEnmState = PATMTRANS_FAILED;
6108 }
6109 else
6110 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
6111 {
6112 *pEnmState = PATMTRANS_INHIBITIRQ;
6113 }
6114 else
6115 if ( pPatchRec->patch.uState == PATCH_ENABLED
6116 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
6117 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
6118 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
6119 {
6120 *pEnmState = PATMTRANS_OVERWRITTEN;
6121 }
6122 else
6123 if (patmFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
6124 {
6125 *pEnmState = PATMTRANS_OVERWRITTEN;
6126 }
6127 else
6128 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
6129 {
6130 *pEnmState = PATMTRANS_PATCHSTART;
6131 }
6132 else
6133 *pEnmState = PATMTRANS_SAFE;
6134 }
6135 return pPrivInstrGC;
6136}
6137
6138/**
6139 * Returns the GC pointer of the patch for the specified GC address
6140 *
6141 * @returns VBox status code.
6142 * @param pVM Pointer to the VM.
6143 * @param pAddrGC Guest context address
6144 */
6145VMMR3_INT_DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
6146{
6147 PPATMPATCHREC pPatchRec;
6148
6149 Assert(!HMIsEnabled(pVM));
6150
6151 /* Find the patch record. */
6152 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
6153 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
6154 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
6155 return PATCHCODE_PTR_GC(&pPatchRec->patch);
6156 return NIL_RTRCPTR;
6157}
6158
6159/**
6160 * Attempt to recover dirty instructions
6161 *
6162 * @returns VBox status code.
6163 * @param pVM Pointer to the VM.
6164 * @param pCtx Pointer to the guest CPU context.
6165 * @param pPatch Patch record.
6166 * @param pPatchToGuestRec Patch to guest address record.
6167 * @param pEip GC pointer of trapping instruction.
6168 */
6169static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
6170{
6171 DISCPUSTATE CpuOld, CpuNew;
6172 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
6173 int rc;
6174 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
6175 uint32_t cbDirty;
6176 PRECPATCHTOGUEST pRec;
6177 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
6178 PVMCPU pVCpu = VMMGetCpu0(pVM);
6179 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
6180
6181 pRec = pPatchToGuestRec;
6182 pCurInstrGC = pOrgInstrGC;
6183 pCurPatchInstrGC = pEip;
6184 cbDirty = 0;
6185 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6186
6187 /* Find all adjacent dirty instructions */
6188 while (true)
6189 {
6190 if (pRec->fJumpTarget)
6191 {
6192 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6193 pRec->fDirty = false;
6194 return VERR_PATCHING_REFUSED;
6195 }
6196
6197 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6198 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6199 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6200
6201 /* Only harmless instructions are acceptable. */
6202 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6203 if ( RT_FAILURE(rc)
6204 || !(CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS))
6205 {
6206 if (RT_SUCCESS(rc))
6207 cbDirty += CpuOld.cbInstr;
6208 else
6209 if (!cbDirty)
6210 cbDirty = 1;
6211 break;
6212 }
6213
6214#ifdef DEBUG
6215 char szBuf[256];
6216 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6217 szBuf, sizeof(szBuf), NULL);
6218 Log(("DIRTY: %s\n", szBuf));
6219#endif
6220 /* Mark as clean; if we fail we'll let it always fault. */
6221 pRec->fDirty = false;
6222
6223 /* Remove old lookup record. */
6224 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6225 pPatchToGuestRec = NULL;
6226
6227 pCurPatchInstrGC += CpuOld.cbInstr;
6228 cbDirty += CpuOld.cbInstr;
6229
6230 /* Let's see if there's another dirty instruction right after. */
6231 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6232 if (!pRec || !pRec->fDirty)
6233 break; /* no more dirty instructions */
6234
6235 /* In case of complex instructions the next guest instruction could be quite far off. */
6236 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6237 }
6238
6239 if ( RT_SUCCESS(rc)
6240 && (CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS)
6241 )
6242 {
6243 uint32_t cbLeft;
6244
6245 pCurPatchInstrHC = pPatchInstrHC;
6246 pCurPatchInstrGC = pEip;
6247 cbLeft = cbDirty;
6248
6249 while (cbLeft && RT_SUCCESS(rc))
6250 {
6251 bool fValidInstr;
6252
6253 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6254
6255 fValidInstr = !!(CpuNew.pCurInstr->fOpType & DISOPTYPE_HARMLESS);
6256 if ( !fValidInstr
6257 && (CpuNew.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
6258 )
6259 {
6260 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6261
6262 if ( pTargetGC >= pOrgInstrGC
6263 && pTargetGC <= pOrgInstrGC + cbDirty
6264 )
6265 {
6266 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6267 fValidInstr = true;
6268 }
6269 }
6270
6271 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6272 if ( rc == VINF_SUCCESS
6273 && CpuNew.cbInstr <= cbLeft /* must still fit */
6274 && fValidInstr
6275 )
6276 {
6277#ifdef DEBUG
6278 char szBuf[256];
6279 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6280 szBuf, sizeof(szBuf), NULL);
6281 Log(("NEW: %s\n", szBuf));
6282#endif
6283
6284 /* Copy the new instruction. */
6285 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.cbInstr);
6286 AssertRC(rc);
6287
6288 /* Add a new lookup record for the duplicated instruction. */
6289 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6290 }
6291 else
6292 {
6293#ifdef DEBUG
6294 char szBuf[256];
6295 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6296 szBuf, sizeof(szBuf), NULL);
6297 Log(("NEW: %s (FAILED)\n", szBuf));
6298#endif
6299 /* Restore the old lookup record for the duplicated instruction. */
6300 patmR3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6301
6302 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6303 rc = VERR_PATCHING_REFUSED;
6304 break;
6305 }
6306 pCurInstrGC += CpuNew.cbInstr;
6307 pCurPatchInstrHC += CpuNew.cbInstr;
6308 pCurPatchInstrGC += CpuNew.cbInstr;
6309 cbLeft -= CpuNew.cbInstr;
6310
6311 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6312 if (!cbLeft)
6313 {
6314 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6315 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6316 {
6317 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6318 if (pRec)
6319 {
6320 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6321 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6322
6323 Assert(!pRec->fDirty);
6324
6325 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6326 if (cbFiller >= SIZEOF_NEARJUMP32)
6327 {
6328 pPatchFillHC[0] = 0xE9;
6329 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6330#ifdef DEBUG
6331 char szBuf[256];
6332 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC,
6333 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6334 Log(("FILL: %s\n", szBuf));
6335#endif
6336 }
6337 else
6338 {
6339 for (unsigned i = 0; i < cbFiller; i++)
6340 {
6341 pPatchFillHC[i] = 0x90; /* NOP */
6342#ifdef DEBUG
6343 char szBuf[256];
6344 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCurPatchInstrGC + i,
6345 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6346 Log(("FILL: %s\n", szBuf));
6347#endif
6348 }
6349 }
6350 }
6351 }
6352 }
6353 }
6354 }
6355 else
6356 rc = VERR_PATCHING_REFUSED;
6357
6358 if (RT_SUCCESS(rc))
6359 {
6360 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6361 }
6362 else
6363 {
6364 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6365 Assert(cbDirty);
6366
6367 /* Mark the whole instruction stream with breakpoints. */
6368 if (cbDirty)
6369 memset(pPatchInstrHC, 0xCC, cbDirty);
6370
6371 if ( pVM->patm.s.fOutOfMemory == false
6372 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6373 {
6374 rc = patmR3RefreshPatch(pVM, pPatch);
6375 if (RT_FAILURE(rc))
6376 {
6377 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6378 }
6379 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6380 rc = VERR_PATCHING_REFUSED;
6381 }
6382 }
6383 return rc;
6384}
6385
6386/**
6387 * Handle trap inside patch code
6388 *
6389 * @returns VBox status code.
6390 * @param pVM Pointer to the VM.
6391 * @param pCtx Pointer to the guest CPU context.
6392 * @param pEip GC pointer of trapping instruction.
6393 * @param ppNewEip GC pointer to new instruction.
6394 */
6395VMMR3_INT_DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6396{
6397 PPATMPATCHREC pPatch = 0;
6398 void *pvPatchCoreOffset;
6399 RTRCUINTPTR offset;
6400 RTRCPTR pNewEip;
6401 int rc ;
6402 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6403 PVMCPU pVCpu = VMMGetCpu0(pVM);
6404
6405 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6406 Assert(pVM->cCpus == 1);
6407
6408 pNewEip = 0;
6409 *ppNewEip = 0;
6410
6411 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6412
6413 /* Find the patch record. */
6414 /* Note: there might not be a patch to guest translation record (global function) */
6415 offset = pEip - pVM->patm.s.pPatchMemGC;
6416 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6417 if (pvPatchCoreOffset)
6418 {
6419 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6420
6421 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6422
6423 if (pPatch->patch.uState == PATCH_DIRTY)
6424 {
6425 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6426 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6427 {
6428 /* Function duplication patches set fPIF to 1 on entry */
6429 pVM->patm.s.pGCStateHC->fPIF = 1;
6430 }
6431 }
6432 else
6433 if (pPatch->patch.uState == PATCH_DISABLED)
6434 {
6435 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6436 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6437 {
6438 /* Function duplication patches set fPIF to 1 on entry */
6439 pVM->patm.s.pGCStateHC->fPIF = 1;
6440 }
6441 }
6442 else
6443 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6444 {
6445 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6446
6447 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6448 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6449 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6450 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6451 }
6452
6453 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6454 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6455
6456 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6457 pPatch->patch.cTraps++;
6458 PATM_STAT_FAULT_INC(&pPatch->patch);
6459 }
6460 else
6461 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6462
6463 /* Check if we were interrupted in PATM generated instruction code. */
6464 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6465 {
6466 DISCPUSTATE Cpu;
6467 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6468 AssertRC(rc);
6469
6470 if ( rc == VINF_SUCCESS
6471 && ( Cpu.pCurInstr->uOpcode == OP_PUSHF
6472 || Cpu.pCurInstr->uOpcode == OP_PUSH
6473 || Cpu.pCurInstr->uOpcode == OP_CALL)
6474 )
6475 {
6476 uint64_t fFlags;
6477
6478 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6479
6480 if (Cpu.pCurInstr->uOpcode == OP_PUSH)
6481 {
6482 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6483 if ( rc == VINF_SUCCESS
6484 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6485 {
6486 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6487
6488 /* Reset the PATM stack. */
6489 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6490
6491 pVM->patm.s.pGCStateHC->fPIF = 1;
6492
6493 Log(("Faulting push -> go back to the original instruction\n"));
6494
6495 /* continue at the original instruction */
6496 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6497 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6498 return VINF_SUCCESS;
6499 }
6500 }
6501
6502 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6503 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6504 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6505 if (rc == VINF_SUCCESS)
6506 {
6507 /* The guest page *must* be present. */
6508 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6509 if ( rc == VINF_SUCCESS
6510 && (fFlags & X86_PTE_P))
6511 {
6512 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6513 return VINF_PATCH_CONTINUE;
6514 }
6515 }
6516 }
6517 else
6518 if (pPatch->patch.pPrivInstrGC == pNewEip)
6519 {
6520 /* Invalidated patch or first instruction overwritten.
6521 * We can ignore the fPIF state in this case.
6522 */
6523 /* Reset the PATM stack. */
6524 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6525
6526 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6527
6528 pVM->patm.s.pGCStateHC->fPIF = 1;
6529
6530 /* continue at the original instruction */
6531 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6532 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6533 return VINF_SUCCESS;
6534 }
6535
6536 char szBuf[256];
6537 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6538
6539 /* Very bad. We crashed in emitted code. Probably stack? */
6540 if (pPatch)
6541 {
6542 AssertLogRelMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6543 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n",
6544 pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags,
6545 pPatchToGuestRec->fDirty, szBuf));
6546 }
6547 else
6548 AssertLogRelMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6549 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6550 EMR3FatalError(pVCpu, VERR_PATM_IPE_TRAP_IN_PATCH_CODE);
6551 }
6552
6553 /* From here on, we must have a valid patch to guest translation. */
6554 if (pvPatchCoreOffset == 0)
6555 {
6556 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6557 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6558 return VERR_PATCH_NOT_FOUND;
6559 }
6560
6561 /* Take care of dirty/changed instructions. */
6562 if (pPatchToGuestRec->fDirty)
6563 {
6564 Assert(pPatchToGuestRec->Core.Key == offset);
6565 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6566
6567 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6568 if (RT_SUCCESS(rc))
6569 {
6570 /* Retry the current instruction. */
6571 pNewEip = pEip;
6572 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6573 }
6574 else
6575 {
6576 /* Reset the PATM stack. */
6577 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6578
6579 rc = VINF_SUCCESS; /* Continue at original instruction. */
6580 }
6581
6582 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6583 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6584 return rc;
6585 }
6586
6587#ifdef VBOX_STRICT
6588 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6589 {
6590 DISCPUSTATE cpu;
6591 bool disret;
6592 uint32_t cbInstr;
6593 PATMP2GLOOKUPREC cacheRec;
6594 RT_ZERO(cacheRec);
6595 cacheRec.pPatch = &pPatch->patch;
6596
6597 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6598 &cpu, &cbInstr);
6599 if (cacheRec.Lock.pvMap)
6600 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6601
6602 if (disret && cpu.pCurInstr->uOpcode == OP_RETN)
6603 {
6604 RTRCPTR retaddr;
6605 PCPUMCTX pCtx2;
6606
6607 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6608
6609 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6610 AssertRC(rc);
6611
6612 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6613 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6614 }
6615 }
6616#endif
6617
6618 /* Return original address, correct by subtracting the CS base address. */
6619 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6620
6621 /* Reset the PATM stack. */
6622 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6623
6624 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6625 {
6626 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6627 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6628#ifdef VBOX_STRICT
6629 DISCPUSTATE cpu;
6630 bool disret;
6631 uint32_t cbInstr;
6632 PATMP2GLOOKUPREC cacheRec;
6633 RT_ZERO(cacheRec);
6634 cacheRec.pPatch = &pPatch->patch;
6635
6636 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
6637 &cpu, &cbInstr);
6638 if (cacheRec.Lock.pvMap)
6639 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6640
6641 if (disret && (cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_INT3))
6642 {
6643 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, patmR3GCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6644 &cpu, &cbInstr);
6645 if (cacheRec.Lock.pvMap)
6646 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6647
6648 Assert(cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_IRET);
6649 }
6650#endif
6651 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6652 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6653 }
6654
6655 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6656 DBGFR3_DISAS_INSTR_LOG(pVCpu, pCtx->cs.Sel, pNewEip, "PATCHRET: ");
6657 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6658 {
6659 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6660 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6661 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6662 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6663 return VERR_PATCH_DISABLED;
6664 }
6665
6666#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6667 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6668 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6669 {
6670 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6671 //we are only wasting time, back out the patch
6672 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6673 pTrapRec->pNextPatchInstr = 0;
6674 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6675 return VERR_PATCH_DISABLED;
6676 }
6677#endif
6678
6679 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6680 return VINF_SUCCESS;
6681}
6682
6683
6684/**
6685 * Handle page-fault in monitored page
6686 *
6687 * @returns VBox status code.
6688 * @param pVM Pointer to the VM.
6689 */
6690VMMR3_INT_DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6691{
6692 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
6693 PVMCPU pVCpu = VMMGetCpu0(pVM);
6694
6695 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6696 addr &= PAGE_BASE_GC_MASK;
6697
6698 int rc = PGMHandlerVirtualDeregister(pVM, pVCpu, addr, false /*fHypervisor*/);
6699 AssertRC(rc); NOREF(rc);
6700
6701 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6702 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6703 {
6704 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6705 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6706 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6707 if (rc == VWRN_PATCH_REMOVED)
6708 return VINF_SUCCESS;
6709
6710 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6711
6712 if (addr == pPatchRec->patch.pPrivInstrGC)
6713 addr++;
6714 }
6715
6716 for(;;)
6717 {
6718 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6719
6720 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6721 break;
6722
6723 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6724 {
6725 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6726 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6727 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6728 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6729 }
6730 addr = pPatchRec->patch.pPrivInstrGC + 1;
6731 }
6732
6733 pVM->patm.s.pvFaultMonitor = 0;
6734 return VINF_SUCCESS;
6735}
6736
6737
6738#ifdef VBOX_WITH_STATISTICS
6739
6740static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6741{
6742 if (pPatch->flags & PATMFL_SYSENTER)
6743 {
6744 return "SYSENT";
6745 }
6746 else
6747 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6748 {
6749 static char szTrap[16];
6750 uint32_t iGate;
6751
6752 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6753 if (iGate < 256)
6754 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6755 else
6756 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6757 return szTrap;
6758 }
6759 else
6760 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6761 return "DUPFUNC";
6762 else
6763 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6764 return "FUNCCALL";
6765 else
6766 if (pPatch->flags & PATMFL_TRAMPOLINE)
6767 return "TRAMP";
6768 else
6769 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6770}
6771
6772static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6773{
6774 NOREF(pVM);
6775 switch(pPatch->uState)
6776 {
6777 case PATCH_ENABLED:
6778 return "ENA";
6779 case PATCH_DISABLED:
6780 return "DIS";
6781 case PATCH_DIRTY:
6782 return "DIR";
6783 case PATCH_UNUSABLE:
6784 return "UNU";
6785 case PATCH_REFUSED:
6786 return "REF";
6787 case PATCH_DISABLE_PENDING:
6788 return "DIP";
6789 default:
6790 AssertFailed();
6791 return " ";
6792 }
6793}
6794
6795/**
6796 * Resets the sample.
6797 * @param pVM Pointer to the VM.
6798 * @param pvSample The sample registered using STAMR3RegisterCallback.
6799 */
6800static void patmResetStat(PVM pVM, void *pvSample)
6801{
6802 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6803 Assert(pPatch);
6804
6805 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6806 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6807}
6808
6809/**
6810 * Prints the sample into the buffer.
6811 *
6812 * @param pVM Pointer to the VM.
6813 * @param pvSample The sample registered using STAMR3RegisterCallback.
6814 * @param pszBuf The buffer to print into.
6815 * @param cchBuf The size of the buffer.
6816 */
6817static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6818{
6819 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6820 Assert(pPatch);
6821
6822 Assert(pPatch->uState != PATCH_REFUSED);
6823 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6824
6825 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6826 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6827 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6828}
6829
6830/**
6831 * Returns the GC address of the corresponding patch statistics counter
6832 *
6833 * @returns Stat address
6834 * @param pVM Pointer to the VM.
6835 * @param pPatch Patch structure
6836 */
6837RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6838{
6839 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6840 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6841}
6842
6843#endif /* VBOX_WITH_STATISTICS */
6844#ifdef VBOX_WITH_DEBUGGER
6845
6846/**
6847 * The '.patmoff' command.
6848 *
6849 * @returns VBox status.
6850 * @param pCmd Pointer to the command descriptor (as registered).
6851 * @param pCmdHlp Pointer to command helper functions.
6852 * @param pVM Pointer to the current VM (if any).
6853 * @param paArgs Pointer to (readonly) array of arguments.
6854 * @param cArgs Number of arguments in the array.
6855 */
6856static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6857{
6858 /*
6859 * Validate input.
6860 */
6861 NOREF(cArgs); NOREF(paArgs);
6862 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6863 PVM pVM = pUVM->pVM;
6864 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6865
6866 if (HMIsEnabled(pVM))
6867 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6868
6869 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6870 PATMR3AllowPatching(pVM->pUVM, false);
6871 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6872}
6873
6874/**
6875 * The '.patmon' command.
6876 *
6877 * @returns VBox status.
6878 * @param pCmd Pointer to the command descriptor (as registered).
6879 * @param pCmdHlp Pointer to command helper functions.
6880 * @param pVM Pointer to the current VM (if any).
6881 * @param paArgs Pointer to (readonly) array of arguments.
6882 * @param cArgs Number of arguments in the array.
6883 */
6884static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
6885{
6886 /*
6887 * Validate input.
6888 */
6889 NOREF(cArgs); NOREF(paArgs);
6890 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
6891 PVM pVM = pUVM->pVM;
6892 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
6893
6894 if (HMIsEnabled(pVM))
6895 return DBGCCmdHlpPrintf(pCmdHlp, "PATM is permanently disabled by HM.\n");
6896
6897 PATMR3AllowPatching(pVM->pUVM, true);
6898 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6899 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6900}
6901
6902#endif /* VBOX_WITH_DEBUGGER */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette