VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 41738

Last change on this file since 41738 was 41738, checked in by vboxsync, 13 years ago

DISOPCODE: s/optype/fOpType/ s/param/fParam/ docs

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 253.9 KB
Line 
1/* $Id: PATM.cpp 41738 2012-06-15 01:25:47Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * NOTE: Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2012 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/cpumdis.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/trpm.h>
34#include <VBox/vmm/cfgm.h>
35#include <VBox/param.h>
36#include <VBox/vmm/selm.h>
37#include <iprt/avl.h>
38#include "PATMInternal.h"
39#include "PATMPatch.h"
40#include <VBox/vmm/vm.h>
41#include <VBox/vmm/csam.h>
42#include <VBox/dbg.h>
43#include <VBox/err.h>
44#include <VBox/log.h>
45#include <iprt/assert.h>
46#include <iprt/asm.h>
47#include <VBox/dis.h>
48#include <VBox/disopcode.h>
49#include "internal/pgm.h"
50
51#include <iprt/string.h>
52#include "PATMA.h"
53
54//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
55//#define PATM_DISABLE_ALL
56
57/**
58 * Refresh trampoline patch state.
59 */
60typedef struct PATMREFRESHPATCH
61{
62 /** Pointer to the VM structure. */
63 PVM pVM;
64 /** The trampoline patch record. */
65 PPATCHINFO pPatchTrampoline;
66 /** The new patch we want to jump to. */
67 PPATCHINFO pPatchRec;
68} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
69
70
71#define PATMREAD_RAWCODE 1 /* read code as-is */
72#define PATMREAD_ORGCODE 2 /* read original guest opcode bytes; not the patched bytes */
73#define PATMREAD_NOCHECK 4 /* don't check for patch conflicts */
74
75/*
76 * Private structure used during disassembly
77 */
78typedef struct
79{
80 PVM pVM;
81 PPATCHINFO pPatchInfo;
82 R3PTRTYPE(uint8_t *) pInstrHC;
83 RTRCPTR pInstrGC;
84 uint32_t fReadFlags;
85} PATMDISASM, *PPATMDISASM;
86
87
88/*******************************************************************************
89* Internal Functions *
90*******************************************************************************/
91
92static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
93static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
94static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
95
96#ifdef LOG_ENABLED // keep gcc quiet
97static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
98#endif
99#ifdef VBOX_WITH_STATISTICS
100static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
101static void patmResetStat(PVM pVM, void *pvSample);
102static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
103#endif
104
105#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
106#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
107
108static int patmReinit(PVM pVM);
109static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
110
111#ifdef VBOX_WITH_DEBUGGER
112static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
113static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
114static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
115
116/** Command descriptors. */
117static const DBGCCMD g_aCmds[] =
118{
119 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
120 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
121 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
122};
123#endif
124
125/* Don't want to break saved states, so put it here as a global variable. */
126static unsigned int cIDTHandlersDisabled = 0;
127
128/**
129 * Initializes the PATM.
130 *
131 * @returns VBox status code.
132 * @param pVM The VM to operate on.
133 */
134VMMR3DECL(int) PATMR3Init(PVM pVM)
135{
136 int rc;
137
138 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
139
140 /* These values can't change as they are hardcoded in patch code (old saved states!) */
141 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
142 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
143 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
144 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
145
146 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
147 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
148
149 /* Allocate patch memory and GC patch state memory. */
150 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
151 /* Add another page in case the generated code is much larger than expected. */
152 /** @todo bad safety precaution */
153 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
154 if (RT_FAILURE(rc))
155 {
156 Log(("MMHyperAlloc failed with %Rrc\n", rc));
157 return rc;
158 }
159 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
160
161 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
162 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
163 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
164
165 /*
166 * Hypervisor memory for GC status data (read/write)
167 *
168 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
169 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
170 *
171 */
172 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
173 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
174 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
175
176 /* Hypervisor memory for patch statistics */
177 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
178 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
179
180 /* Memory for patch lookup trees. */
181 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
182 AssertRCReturn(rc, rc);
183 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
184
185#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
186 /* Check CFGM option. */
187 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
188 if (RT_FAILURE(rc))
189# ifdef PATM_DISABLE_ALL
190 pVM->fPATMEnabled = false;
191# else
192 pVM->fPATMEnabled = true;
193# endif
194#endif
195
196 rc = patmReinit(pVM);
197 AssertRC(rc);
198 if (RT_FAILURE(rc))
199 return rc;
200
201 /*
202 * Register save and load state notifiers.
203 */
204 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
205 NULL, NULL, NULL,
206 NULL, patmR3Save, NULL,
207 NULL, patmR3Load, NULL);
208 AssertRCReturn(rc, rc);
209
210#ifdef VBOX_WITH_DEBUGGER
211 /*
212 * Debugger commands.
213 */
214 static bool s_fRegisteredCmds = false;
215 if (!s_fRegisteredCmds)
216 {
217 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
218 if (RT_SUCCESS(rc2))
219 s_fRegisteredCmds = true;
220 }
221#endif
222
223#ifdef VBOX_WITH_STATISTICS
224 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
225 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
226 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
227 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
228 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
229 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
230 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
231 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
232
233 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
234 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
235
236 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
237 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
238 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
239
240 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
241 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
242 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
243 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
244 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
245
246 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
247 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
248
249 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
250 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
251
252 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
253 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
254 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
255
256 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
257 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
258 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
259
260 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
261 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
262
263 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
264 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
265 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
266 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
267
268 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
269 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
270
271 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
272 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
273
274 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
275 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
276 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
277
278 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
279 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
280 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
281 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
282
283 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
284 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
285 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
286 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
287 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
288
289 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
290#endif /* VBOX_WITH_STATISTICS */
291
292 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
293 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
294 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
295 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
296 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
297 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
298 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
299 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
300
301 return rc;
302}
303
304/**
305 * Finalizes HMA page attributes.
306 *
307 * @returns VBox status code.
308 * @param pVM The VM handle.
309 */
310VMMR3DECL(int) PATMR3InitFinalize(PVM pVM)
311{
312 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
313 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
314 if (RT_FAILURE(rc))
315 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
316
317 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
318 if (RT_FAILURE(rc))
319 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
320
321 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
322 if (RT_FAILURE(rc))
323 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
324
325 return rc;
326}
327
328/**
329 * (Re)initializes PATM
330 *
331 * @param pVM The VM.
332 */
333static int patmReinit(PVM pVM)
334{
335 int rc;
336
337 /*
338 * Assert alignment and sizes.
339 */
340 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
341 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
342
343 /*
344 * Setup any fixed pointers and offsets.
345 */
346 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
347
348#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
349#ifndef PATM_DISABLE_ALL
350 pVM->fPATMEnabled = true;
351#endif
352#endif
353
354 Assert(pVM->patm.s.pGCStateHC);
355 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
356 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
357
358 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
359 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
360
361 Assert(pVM->patm.s.pGCStackHC);
362 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
363 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
364 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
365 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
366
367 Assert(pVM->patm.s.pStatsHC);
368 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
369 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
370
371 Assert(pVM->patm.s.pPatchMemHC);
372 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
373 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
374 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
375
376 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
377 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
378
379 Assert(pVM->patm.s.PatchLookupTreeHC);
380 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
381
382 /*
383 * (Re)Initialize PATM structure
384 */
385 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
386 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
387 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
388 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
389 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
390 pVM->patm.s.pvFaultMonitor = 0;
391 pVM->patm.s.deltaReloc = 0;
392
393 /* Lowest and highest patched instruction */
394 pVM->patm.s.pPatchedInstrGCLowest = ~0;
395 pVM->patm.s.pPatchedInstrGCHighest = 0;
396
397 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
398 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
399 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
400
401 pVM->patm.s.pfnSysEnterPatchGC = 0;
402 pVM->patm.s.pfnSysEnterGC = 0;
403
404 pVM->patm.s.fOutOfMemory = false;
405
406 pVM->patm.s.pfnHelperCallGC = 0;
407
408 /* Generate all global functions to be used by future patches. */
409 /* We generate a fake patch in order to use the existing code for relocation. */
410 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
411 if (RT_FAILURE(rc))
412 {
413 Log(("Out of memory!!!!\n"));
414 return VERR_NO_MEMORY;
415 }
416 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
417 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
418 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
419
420 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
421 AssertRC(rc);
422
423 /* Update free pointer in patch memory. */
424 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
425 /* Round to next 8 byte boundary. */
426 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
427 return rc;
428}
429
430
431/**
432 * Applies relocations to data and code managed by this
433 * component. This function will be called at init and
434 * whenever the VMM need to relocate it self inside the GC.
435 *
436 * The PATM will update the addresses used by the switcher.
437 *
438 * @param pVM The VM.
439 */
440VMMR3DECL(void) PATMR3Relocate(PVM pVM)
441{
442 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
443 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
444
445 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
446 if (delta)
447 {
448 PCPUMCTX pCtx;
449
450 /* Update CPUMCTX guest context pointer. */
451 pVM->patm.s.pCPUMCtxGC += delta;
452
453 pVM->patm.s.deltaReloc = delta;
454
455 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
456
457 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
458
459 /* If we are running patch code right now, then also adjust EIP. */
460 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
461 pCtx->eip += delta;
462
463 pVM->patm.s.pGCStateGC = GCPtrNew;
464 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
465
466 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
467
468 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
469
470 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
471
472 if (pVM->patm.s.pfnSysEnterPatchGC)
473 pVM->patm.s.pfnSysEnterPatchGC += delta;
474
475 /* Deal with the global patch functions. */
476 pVM->patm.s.pfnHelperCallGC += delta;
477 pVM->patm.s.pfnHelperRetGC += delta;
478 pVM->patm.s.pfnHelperIretGC += delta;
479 pVM->patm.s.pfnHelperJumpGC += delta;
480
481 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
482 }
483}
484
485
486/**
487 * Terminates the PATM.
488 *
489 * Termination means cleaning up and freeing all resources,
490 * the VM it self is at this point powered off or suspended.
491 *
492 * @returns VBox status code.
493 * @param pVM The VM to operate on.
494 */
495VMMR3DECL(int) PATMR3Term(PVM pVM)
496{
497 /* Memory was all allocated from the two MM heaps and requires no freeing. */
498 NOREF(pVM);
499 return VINF_SUCCESS;
500}
501
502
503/**
504 * PATM reset callback.
505 *
506 * @returns VBox status code.
507 * @param pVM The VM which is reset.
508 */
509VMMR3DECL(int) PATMR3Reset(PVM pVM)
510{
511 Log(("PATMR3Reset\n"));
512
513 /* Free all patches. */
514 while (true)
515 {
516 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
517 if (pPatchRec)
518 {
519 PATMRemovePatch(pVM, pPatchRec, true);
520 }
521 else
522 break;
523 }
524 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
525 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
526 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
527 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
528
529 int rc = patmReinit(pVM);
530 if (RT_SUCCESS(rc))
531 rc = PATMR3InitFinalize(pVM); /* paranoia */
532
533 return rc;
534}
535
536DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDisState, uint8_t *pbDst, RTUINTPTR uSrcAddr, uint32_t cbToRead)
537{
538 PATMDISASM *pDisInfo = (PATMDISASM *)pDisState->pvUser;
539 int orgsize = cbToRead;
540
541 Assert(cbToRead);
542 if (cbToRead == 0)
543 return VERR_INVALID_PARAMETER;
544
545 /*
546 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
547 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
548 */
549 /** @todo could change in the future! */
550 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
551 {
552 for (int i = 0; i < orgsize; i++)
553 {
554 int rc = PATMR3QueryOpcode(pDisInfo->pVM, (RTRCPTR)uSrcAddr, pbDst);
555 if (RT_FAILURE(rc))
556 break;
557 uSrcAddr++;
558 pbDst++;
559 cbToRead--;
560 }
561 if (cbToRead == 0)
562 return VINF_SUCCESS;
563#ifdef VBOX_STRICT
564 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
565 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
566 {
567 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, uSrcAddr, NULL) == false);
568 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, uSrcAddr+cbToRead-1, NULL) == false);
569 }
570#endif
571 }
572
573 if ( !pDisInfo->pInstrHC
574 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(uSrcAddr + cbToRead - 1)
575 && !PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr)))
576 {
577 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr));
578 return PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], pbDst, uSrcAddr, cbToRead);
579 }
580
581 Assert(pDisInfo->pInstrHC);
582
583 uint8_t *pInstrHC = pDisInfo->pInstrHC;
584
585 Assert(pInstrHC);
586
587 /* pInstrHC is the base address; adjust according to the GC pointer. */
588 pInstrHC = pInstrHC + (uSrcAddr - pDisInfo->pInstrGC);
589
590 memcpy(pbDst, (void *)pInstrHC, cbToRead);
591
592 return VINF_SUCCESS;
593}
594
595
596DECLINLINE(bool) patmR3DisInstrToStr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
597 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
598{
599 PATMDISASM disinfo;
600 disinfo.pVM = pVM;
601 disinfo.pPatchInfo = pPatch;
602 disinfo.pInstrHC = pbInstrHC;
603 disinfo.pInstrGC = InstrGCPtr32;
604 disinfo.fReadFlags = fReadFlags;
605 return RT_SUCCESS(DISInstrToStrWithReader(InstrGCPtr32,
606 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
607 patmReadBytes, &disinfo,
608 pCpu, pcbInstr, pszOutput, cbOutput));
609}
610
611
612DECLINLINE(bool) patmR3DisInstr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
613 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
614{
615 PATMDISASM disinfo;
616 disinfo.pVM = pVM;
617 disinfo.pPatchInfo = pPatch;
618 disinfo.pInstrHC = pbInstrHC;
619 disinfo.pInstrGC = InstrGCPtr32;
620 disinfo.fReadFlags = fReadFlags;
621 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32,
622 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
623 patmReadBytes, &disinfo,
624 pCpu, pcbInstr));
625}
626
627
628DECLINLINE(bool) patmR3DisInstrNoStrOpMode(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC,
629 uint32_t fReadFlags,
630 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
631{
632 PATMDISASM disinfo;
633 disinfo.pVM = pVM;
634 disinfo.pPatchInfo = pPatch;
635 disinfo.pInstrHC = pbInstrHC;
636 disinfo.pInstrGC = InstrGCPtr32;
637 disinfo.fReadFlags = fReadFlags;
638 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32, pPatch->uOpMode, patmReadBytes, &disinfo,
639 pCpu, pcbInstr));
640}
641
642#ifdef LOG_ENABLED
643# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
644 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_ORGCODE, a_szComment, " patch:")
645# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
646 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_RAWCODE, a_szComment, " patch:")
647
648# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) \
649 do { \
650 if (LogIsEnabled()) \
651 patmLogRawPatchInstr(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2); \
652 } while (0)
653
654static void patmLogRawPatchInstr(PVM pVM, PPATCHINFO pPatch, uint32_t fFlags,
655 const char *pszComment1, const char *pszComment2)
656{
657 DISCPUSTATE DisState;
658 char szOutput[128];
659 szOutput[0] = '\0';
660 patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC, NULL, fFlags,
661 &DisState, NULL, szOutput, sizeof(szOutput));
662 Log(("%s%s %s", pszComment1, pszComment2, szOutput));
663}
664
665#else
666# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
667# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
668# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) do { } while (0)
669#endif
670
671
672/**
673 * Callback function for RTAvloU32DoWithAll
674 *
675 * Updates all fixups in the patches
676 *
677 * @returns VBox status code.
678 * @param pNode Current node
679 * @param pParam The VM to operate on.
680 */
681static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
682{
683 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
684 PVM pVM = (PVM)pParam;
685 RTRCINTPTR delta;
686 int rc;
687
688 /* Nothing to do if the patch is not active. */
689 if (pPatch->patch.uState == PATCH_REFUSED)
690 return 0;
691
692 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
693 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Org patch jump:", "");
694
695 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
696 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
697
698 /*
699 * Apply fixups
700 */
701 PRELOCREC pRec = 0;
702 AVLPVKEY key = 0;
703
704 while (true)
705 {
706 /* Get the record that's closest from above */
707 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
708 if (pRec == 0)
709 break;
710
711 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
712
713 switch (pRec->uType)
714 {
715 case FIXUP_ABSOLUTE:
716 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
717 if ( !pRec->pSource
718 || PATMIsPatchGCAddr(pVM, pRec->pSource))
719 {
720 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
721 }
722 else
723 {
724 uint8_t curInstr[15];
725 uint8_t oldInstr[15];
726 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
727
728 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
729
730 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
731 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
732
733 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
734 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
735
736 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
737
738 if ( rc == VERR_PAGE_NOT_PRESENT
739 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
740 {
741 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
742
743 Log(("PATM: Patch page not present -> check later!\n"));
744 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
745 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
746 }
747 else
748 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
749 {
750 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
751 /*
752 * Disable patch; this is not a good solution
753 */
754 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
755 pPatch->patch.uState = PATCH_DISABLED;
756 }
757 else
758 if (RT_SUCCESS(rc))
759 {
760 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
761 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
762 AssertRC(rc);
763 }
764 }
765 break;
766
767 case FIXUP_REL_JMPTOPATCH:
768 {
769 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
770
771 if ( pPatch->patch.uState == PATCH_ENABLED
772 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
773 {
774 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
775 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
776 RTRCPTR pJumpOffGC;
777 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
778 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
779
780#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
781 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
782#else
783 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
784#endif
785
786 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
787#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
788 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
789 {
790 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
791
792 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
793 oldJump[0] = pPatch->patch.aPrivInstr[0];
794 oldJump[1] = pPatch->patch.aPrivInstr[1];
795 *(RTRCUINTPTR *)&oldJump[2] = displOld;
796 }
797 else
798#endif
799 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
800 {
801 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
802 oldJump[0] = 0xE9;
803 *(RTRCUINTPTR *)&oldJump[1] = displOld;
804 }
805 else
806 {
807 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
808 continue; //this should never happen!!
809 }
810 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
811
812 /*
813 * Read old patch jump and compare it to the one we previously installed
814 */
815 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
816 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
817
818 if ( rc == VERR_PAGE_NOT_PRESENT
819 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
820 {
821 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
822
823 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
824 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
825 }
826 else
827 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
828 {
829 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
830 /*
831 * Disable patch; this is not a good solution
832 */
833 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
834 pPatch->patch.uState = PATCH_DISABLED;
835 }
836 else
837 if (RT_SUCCESS(rc))
838 {
839 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
840 AssertRC(rc);
841 }
842 else
843 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
844 }
845 else
846 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
847
848 pRec->pDest = pTarget;
849 break;
850 }
851
852 case FIXUP_REL_JMPTOGUEST:
853 {
854 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
855 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
856
857 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
858 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
859 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
860 pRec->pSource = pSource;
861 break;
862 }
863
864 default:
865 AssertMsg(0, ("Invalid fixup type!!\n"));
866 return VERR_INVALID_PARAMETER;
867 }
868 }
869
870 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
871 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Rel patch jump:", "");
872 return 0;
873}
874
875/**
876 * \#PF Handler callback for virtual access handler ranges.
877 *
878 * Important to realize that a physical page in a range can have aliases, and
879 * for ALL and WRITE handlers these will also trigger.
880 *
881 * @returns VINF_SUCCESS if the handler have carried out the operation.
882 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
883 * @param pVM VM Handle.
884 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
885 * @param pvPtr The HC mapping of that address.
886 * @param pvBuf What the guest is reading/writing.
887 * @param cbBuf How much it's reading/writing.
888 * @param enmAccessType The access type.
889 * @param pvUser User argument.
890 */
891DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
892 PGMACCESSTYPE enmAccessType, void *pvUser)
893{
894 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
895 NOREF(pvPtr); NOREF(pvBuf); NOREF(cbBuf); NOREF(pvUser);
896
897 /** @todo could be the wrong virtual address (alias) */
898 pVM->patm.s.pvFaultMonitor = GCPtr;
899 PATMR3HandleMonitoredPage(pVM);
900 return VINF_PGM_HANDLER_DO_DEFAULT;
901}
902
903
904#ifdef VBOX_WITH_DEBUGGER
905/**
906 * Callback function for RTAvloU32DoWithAll
907 *
908 * Enables the patch that's being enumerated
909 *
910 * @returns 0 (continue enumeration).
911 * @param pNode Current node
912 * @param pVM The VM to operate on.
913 */
914static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
915{
916 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
917
918 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
919 return 0;
920}
921#endif /* VBOX_WITH_DEBUGGER */
922
923
924#ifdef VBOX_WITH_DEBUGGER
925/**
926 * Callback function for RTAvloU32DoWithAll
927 *
928 * Disables the patch that's being enumerated
929 *
930 * @returns 0 (continue enumeration).
931 * @param pNode Current node
932 * @param pVM The VM to operate on.
933 */
934static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
935{
936 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
937
938 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
939 return 0;
940}
941#endif
942
943/**
944 * Returns the host context pointer and size of the patch memory block
945 *
946 * @returns VBox status code.
947 * @param pVM The VM to operate on.
948 * @param pcb Size of the patch memory block
949 */
950VMMR3DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
951{
952 if (pcb)
953 *pcb = pVM->patm.s.cbPatchMem;
954
955 return pVM->patm.s.pPatchMemHC;
956}
957
958
959/**
960 * Returns the guest context pointer and size of the patch memory block
961 *
962 * @returns VBox status code.
963 * @param pVM The VM to operate on.
964 * @param pcb Size of the patch memory block
965 */
966VMMR3DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
967{
968 if (pcb)
969 *pcb = pVM->patm.s.cbPatchMem;
970
971 return pVM->patm.s.pPatchMemGC;
972}
973
974
975/**
976 * Returns the host context pointer of the GC context structure
977 *
978 * @returns VBox status code.
979 * @param pVM The VM to operate on.
980 */
981VMMR3DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
982{
983 return pVM->patm.s.pGCStateHC;
984}
985
986
987/**
988 * Checks whether the HC address is part of our patch region
989 *
990 * @returns VBox status code.
991 * @param pVM The VM to operate on.
992 * @param pAddrGC Guest context address
993 */
994VMMR3DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, R3PTRTYPE(uint8_t *) pAddrHC)
995{
996 return (pAddrHC >= pVM->patm.s.pPatchMemHC && pAddrHC < pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) ? true : false;
997}
998
999
1000/**
1001 * Allows or disallow patching of privileged instructions executed by the guest OS
1002 *
1003 * @returns VBox status code.
1004 * @param pVM The VM to operate on.
1005 * @param fAllowPatching Allow/disallow patching
1006 */
1007VMMR3DECL(int) PATMR3AllowPatching(PVM pVM, uint32_t fAllowPatching)
1008{
1009 pVM->fPATMEnabled = (fAllowPatching) ? true : false;
1010 return VINF_SUCCESS;
1011}
1012
1013/**
1014 * Convert a GC patch block pointer to a HC patch pointer
1015 *
1016 * @returns HC pointer or NULL if it's not a GC patch pointer
1017 * @param pVM The VM to operate on.
1018 * @param pAddrGC GC pointer
1019 */
1020VMMR3DECL(R3PTRTYPE(void *)) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
1021{
1022 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
1023 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
1024 else
1025 return NULL;
1026}
1027
1028/**
1029 * Query PATM state (enabled/disabled)
1030 *
1031 * @returns 0 - disabled, 1 - enabled
1032 * @param pVM The VM to operate on.
1033 */
1034VMMR3DECL(int) PATMR3IsEnabled(PVM pVM)
1035{
1036 return pVM->fPATMEnabled;
1037}
1038
1039
1040/**
1041 * Convert guest context address to host context pointer
1042 *
1043 * @returns VBox status code.
1044 * @param pVM The VM to operate on.
1045 * @param pCacheRec Address conversion cache record
1046 * @param pGCPtr Guest context pointer
1047 *
1048 * @returns Host context pointer or NULL in case of an error
1049 *
1050 */
1051R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
1052{
1053 int rc;
1054 R3PTRTYPE(uint8_t *) pHCPtr;
1055 uint32_t offset;
1056
1057 if (PATMIsPatchGCAddr(pVM, pGCPtr))
1058 {
1059 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1060 Assert(pPatch);
1061 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
1062 }
1063
1064 offset = pGCPtr & PAGE_OFFSET_MASK;
1065 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1066 return pCacheRec->pPageLocStartHC + offset;
1067
1068 /* Release previous lock if any. */
1069 if (pCacheRec->Lock.pvMap)
1070 {
1071 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1072 pCacheRec->Lock.pvMap = NULL;
1073 }
1074
1075 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1076 if (rc != VINF_SUCCESS)
1077 {
1078 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1079 return NULL;
1080 }
1081 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1082 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1083 return pHCPtr;
1084}
1085
1086
1087/* Calculates and fills in all branch targets
1088 *
1089 * @returns VBox status code.
1090 * @param pVM The VM to operate on.
1091 * @param pPatch Current patch block pointer
1092 *
1093 */
1094static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1095{
1096 int32_t displ;
1097
1098 PJUMPREC pRec = 0;
1099 unsigned nrJumpRecs = 0;
1100
1101 /*
1102 * Set all branch targets inside the patch block.
1103 * We remove all jump records as they are no longer needed afterwards.
1104 */
1105 while (true)
1106 {
1107 RCPTRTYPE(uint8_t *) pInstrGC;
1108 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1109
1110 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1111 if (pRec == 0)
1112 break;
1113
1114 nrJumpRecs++;
1115
1116 /* HC in patch block to GC in patch block. */
1117 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1118
1119 if (pRec->opcode == OP_CALL)
1120 {
1121 /* Special case: call function replacement patch from this patch block.
1122 */
1123 PPATMPATCHREC pFunctionRec = PATMQueryFunctionPatch(pVM, pRec->pTargetGC);
1124 if (!pFunctionRec)
1125 {
1126 int rc;
1127
1128 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1129 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1130 else
1131 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1132
1133 if (RT_FAILURE(rc))
1134 {
1135 uint8_t *pPatchHC;
1136 RTRCPTR pPatchGC;
1137 RTRCPTR pOrgInstrGC;
1138
1139 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1140 Assert(pOrgInstrGC);
1141
1142 /* Failure for some reason -> mark exit point with int 3. */
1143 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1144
1145 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1146 Assert(pPatchGC);
1147
1148 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1149
1150 /* Set a breakpoint at the very beginning of the recompiled instruction */
1151 *pPatchHC = 0xCC;
1152
1153 continue;
1154 }
1155 }
1156 else
1157 {
1158 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1159 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1160 }
1161
1162 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1163 }
1164 else
1165 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1166
1167 if (pBranchTargetGC == 0)
1168 {
1169 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1170 return VERR_PATCHING_REFUSED;
1171 }
1172 /* Our jumps *always* have a dword displacement (to make things easier). */
1173 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1174 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1175 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1176 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1177 }
1178 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1179 Assert(pPatch->JumpTree == 0);
1180 return VINF_SUCCESS;
1181}
1182
1183/* Add an illegal instruction record
1184 *
1185 * @param pVM The VM to operate on.
1186 * @param pPatch Patch structure ptr
1187 * @param pInstrGC Guest context pointer to privileged instruction
1188 *
1189 */
1190static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1191{
1192 PAVLPVNODECORE pRec;
1193
1194 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1195 Assert(pRec);
1196 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1197
1198 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1199 Assert(ret); NOREF(ret);
1200 pPatch->pTempInfo->nrIllegalInstr++;
1201}
1202
1203static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1204{
1205 PAVLPVNODECORE pRec;
1206
1207 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1208 if (pRec)
1209 return true;
1210 else
1211 return false;
1212}
1213
1214/**
1215 * Add a patch to guest lookup record
1216 *
1217 * @param pVM The VM to operate on.
1218 * @param pPatch Patch structure ptr
1219 * @param pPatchInstrHC Guest context pointer to patch block
1220 * @param pInstrGC Guest context pointer to privileged instruction
1221 * @param enmType Lookup type
1222 * @param fDirty Dirty flag
1223 *
1224 */
1225 /** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
1226void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1227{
1228 bool ret;
1229 PRECPATCHTOGUEST pPatchToGuestRec;
1230 PRECGUESTTOPATCH pGuestToPatchRec;
1231 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1232
1233 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1234 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1235
1236 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1237 {
1238 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1239 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1240 return; /* already there */
1241
1242 Assert(!pPatchToGuestRec);
1243 }
1244#ifdef VBOX_STRICT
1245 else
1246 {
1247 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1248 Assert(!pPatchToGuestRec);
1249 }
1250#endif
1251
1252 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1253 Assert(pPatchToGuestRec);
1254 pPatchToGuestRec->Core.Key = PatchOffset;
1255 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1256 pPatchToGuestRec->enmType = enmType;
1257 pPatchToGuestRec->fDirty = fDirty;
1258
1259 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1260 Assert(ret);
1261
1262 /* GC to patch address */
1263 if (enmType == PATM_LOOKUP_BOTHDIR)
1264 {
1265 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1266 if (!pGuestToPatchRec)
1267 {
1268 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1269 pGuestToPatchRec->Core.Key = pInstrGC;
1270 pGuestToPatchRec->PatchOffset = PatchOffset;
1271
1272 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1273 Assert(ret);
1274 }
1275 }
1276
1277 pPatch->nrPatch2GuestRecs++;
1278}
1279
1280
1281/**
1282 * Removes a patch to guest lookup record
1283 *
1284 * @param pVM The VM to operate on.
1285 * @param pPatch Patch structure ptr
1286 * @param pPatchInstrGC Guest context pointer to patch block
1287 */
1288void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1289{
1290 PAVLU32NODECORE pNode;
1291 PAVLU32NODECORE pNode2;
1292 PRECPATCHTOGUEST pPatchToGuestRec;
1293 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1294
1295 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1296 Assert(pPatchToGuestRec);
1297 if (pPatchToGuestRec)
1298 {
1299 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1300 {
1301 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1302
1303 Assert(pGuestToPatchRec->Core.Key);
1304 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1305 Assert(pNode2);
1306 }
1307 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1308 Assert(pNode);
1309
1310 MMR3HeapFree(pPatchToGuestRec);
1311 pPatch->nrPatch2GuestRecs--;
1312 }
1313}
1314
1315
1316/**
1317 * RTAvlPVDestroy callback.
1318 */
1319static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1320{
1321 MMR3HeapFree(pNode);
1322 return 0;
1323}
1324
1325/**
1326 * Empty the specified tree (PV tree, MMR3 heap)
1327 *
1328 * @param pVM The VM to operate on.
1329 * @param ppTree Tree to empty
1330 */
1331void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1332{
1333 NOREF(pVM);
1334 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1335}
1336
1337
1338/**
1339 * RTAvlU32Destroy callback.
1340 */
1341static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1342{
1343 MMR3HeapFree(pNode);
1344 return 0;
1345}
1346
1347/**
1348 * Empty the specified tree (U32 tree, MMR3 heap)
1349 *
1350 * @param pVM The VM to operate on.
1351 * @param ppTree Tree to empty
1352 */
1353void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1354{
1355 NOREF(pVM);
1356 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1357}
1358
1359
1360/**
1361 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1362 *
1363 * @returns VBox status code.
1364 * @param pVM The VM to operate on.
1365 * @param pCpu CPU disassembly state
1366 * @param pInstrGC Guest context pointer to privileged instruction
1367 * @param pCurInstrGC Guest context pointer to the current instruction
1368 * @param pCacheRec Cache record ptr
1369 *
1370 */
1371static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1372{
1373 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1374 bool fIllegalInstr = false;
1375
1376 /*
1377 * Preliminary heuristics:
1378 *- no call instructions without a fixed displacement between cli and sti/popf
1379 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1380 *- no nested pushf/cli
1381 *- sti/popf should be the (eventual) target of all branches
1382 *- no near or far returns; no int xx, no into
1383 *
1384 * Note: Later on we can impose less stricter guidelines if the need arises
1385 */
1386
1387 /* Bail out if the patch gets too big. */
1388 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1389 {
1390 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1391 fIllegalInstr = true;
1392 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1393 }
1394 else
1395 {
1396 /* No unconditional jumps or calls without fixed displacements. */
1397 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1398 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1399 )
1400 {
1401 Assert(pCpu->param1.cb <= 4 || pCpu->param1.cb == 6);
1402 if ( pCpu->param1.cb == 6 /* far call/jmp */
1403 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1404 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1405 )
1406 {
1407 fIllegalInstr = true;
1408 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1409 }
1410 }
1411
1412 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1413 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->uOpcode == OP_JMP)
1414 {
1415 if ( pCurInstrGC > pPatch->pPrivInstrGC
1416 && pCurInstrGC + pCpu->cbInstr < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1417 {
1418 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1419 /* We turn this one into a int 3 callable patch. */
1420 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1421 }
1422 }
1423 else
1424 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1425 if (pPatch->opcode == OP_PUSHF)
1426 {
1427 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->uOpcode == OP_PUSHF)
1428 {
1429 fIllegalInstr = true;
1430 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1431 }
1432 }
1433
1434 /* no far returns */
1435 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1436 {
1437 pPatch->pTempInfo->nrRetInstr++;
1438 fIllegalInstr = true;
1439 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1440 }
1441 else if ( pCpu->pCurInstr->uOpcode == OP_INT3
1442 || pCpu->pCurInstr->uOpcode == OP_INT
1443 || pCpu->pCurInstr->uOpcode == OP_INTO)
1444 {
1445 /* No int xx or into either. */
1446 fIllegalInstr = true;
1447 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1448 }
1449 }
1450
1451 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1452
1453 /* Illegal instruction -> end of analysis phase for this code block */
1454 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1455 return VINF_SUCCESS;
1456
1457 /* Check for exit points. */
1458 switch (pCpu->pCurInstr->uOpcode)
1459 {
1460 case OP_SYSEXIT:
1461 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1462
1463 case OP_SYSENTER:
1464 case OP_ILLUD2:
1465 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1466 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1467 return VINF_SUCCESS;
1468
1469 case OP_STI:
1470 case OP_POPF:
1471 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1472 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1473 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1474 {
1475 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1476 return VERR_PATCHING_REFUSED;
1477 }
1478 if (pPatch->opcode == OP_PUSHF)
1479 {
1480 if (pCpu->pCurInstr->uOpcode == OP_POPF)
1481 {
1482 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1483 return VINF_SUCCESS;
1484
1485 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1486 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1487 pPatch->flags |= PATMFL_CHECK_SIZE;
1488 }
1489 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1490 }
1491 /* else: fall through. */
1492 case OP_RETN: /* exit point for function replacement */
1493 return VINF_SUCCESS;
1494
1495 case OP_IRET:
1496 return VINF_SUCCESS; /* exitpoint */
1497
1498 case OP_CPUID:
1499 case OP_CALL:
1500 case OP_JMP:
1501 break;
1502
1503 default:
1504 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1505 {
1506 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1507 return VINF_SUCCESS; /* exit point */
1508 }
1509 break;
1510 }
1511
1512 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1513 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW))
1514 {
1515 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1516 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->cbInstr));
1517 return VINF_SUCCESS;
1518 }
1519
1520 return VWRN_CONTINUE_ANALYSIS;
1521}
1522
1523/**
1524 * Analyses the instructions inside a function for compliance
1525 *
1526 * @returns VBox status code.
1527 * @param pVM The VM to operate on.
1528 * @param pCpu CPU disassembly state
1529 * @param pInstrGC Guest context pointer to privileged instruction
1530 * @param pCurInstrGC Guest context pointer to the current instruction
1531 * @param pCacheRec Cache record ptr
1532 *
1533 */
1534static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1535{
1536 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1537 bool fIllegalInstr = false;
1538 NOREF(pInstrGC);
1539
1540 //Preliminary heuristics:
1541 //- no call instructions
1542 //- ret ends a block
1543
1544 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1545
1546 // bail out if the patch gets too big
1547 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1548 {
1549 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1550 fIllegalInstr = true;
1551 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1552 }
1553 else
1554 {
1555 // no unconditional jumps or calls without fixed displacements
1556 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1557 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL)
1558 )
1559 {
1560 Assert(pCpu->param1.cb <= 4 || pCpu->param1.cb == 6);
1561 if ( pCpu->param1.cb == 6 /* far call/jmp */
1562 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1563 || (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1564 )
1565 {
1566 fIllegalInstr = true;
1567 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1568 }
1569 }
1570 else /* no far returns */
1571 if (pCpu->pCurInstr->uOpcode == OP_RETF)
1572 {
1573 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1574 fIllegalInstr = true;
1575 }
1576 else /* no int xx or into either */
1577 if (pCpu->pCurInstr->uOpcode == OP_INT3 || pCpu->pCurInstr->uOpcode == OP_INT || pCpu->pCurInstr->uOpcode == OP_INTO)
1578 {
1579 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1580 fIllegalInstr = true;
1581 }
1582
1583 #if 0
1584 ///@todo we can handle certain in/out and privileged instructions in the guest context
1585 if (pCpu->pCurInstr->fOpType & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr->uOpcode != OP_STI)
1586 {
1587 Log(("Illegal instructions for function patch!!\n"));
1588 return VERR_PATCHING_REFUSED;
1589 }
1590 #endif
1591 }
1592
1593 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1594
1595 /* Illegal instruction -> end of analysis phase for this code block */
1596 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1597 {
1598 return VINF_SUCCESS;
1599 }
1600
1601 // Check for exit points
1602 switch (pCpu->pCurInstr->uOpcode)
1603 {
1604 case OP_ILLUD2:
1605 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1606 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1607 return VINF_SUCCESS;
1608
1609 case OP_IRET:
1610 case OP_SYSEXIT: /* will fault or emulated in GC */
1611 case OP_RETN:
1612 return VINF_SUCCESS;
1613
1614 case OP_POPF:
1615 case OP_STI:
1616 return VWRN_CONTINUE_ANALYSIS;
1617 default:
1618 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP))
1619 {
1620 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1621 return VINF_SUCCESS; /* exit point */
1622 }
1623 return VWRN_CONTINUE_ANALYSIS;
1624 }
1625
1626 return VWRN_CONTINUE_ANALYSIS;
1627}
1628
1629/**
1630 * Recompiles the instructions in a code block
1631 *
1632 * @returns VBox status code.
1633 * @param pVM The VM to operate on.
1634 * @param pCpu CPU disassembly state
1635 * @param pInstrGC Guest context pointer to privileged instruction
1636 * @param pCurInstrGC Guest context pointer to the current instruction
1637 * @param pCacheRec Cache record ptr
1638 *
1639 */
1640static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1641{
1642 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1643 int rc = VINF_SUCCESS;
1644 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1645
1646 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1647
1648 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1649 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1650 {
1651 /*
1652 * Been there, done that; so insert a jump (we don't want to duplicate code)
1653 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1654 */
1655 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1656 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1657 }
1658
1659 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1660 {
1661 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1662 }
1663 else
1664 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1665
1666 if (RT_FAILURE(rc))
1667 return rc;
1668
1669 /* Note: Never do a direct return unless a failure is encountered! */
1670
1671 /* Clear recompilation of next instruction flag; we are doing that right here. */
1672 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1673 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1674
1675 /* Add lookup record for patch to guest address translation */
1676 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1677
1678 /* Update lowest and highest instruction address for this patch */
1679 if (pCurInstrGC < pPatch->pInstrGCLowest)
1680 pPatch->pInstrGCLowest = pCurInstrGC;
1681 else
1682 if (pCurInstrGC > pPatch->pInstrGCHighest)
1683 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->cbInstr;
1684
1685 /* Illegal instruction -> end of recompile phase for this code block. */
1686 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1687 {
1688 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1689 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1690 goto end;
1691 }
1692
1693 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1694 * Indirect calls are handled below.
1695 */
1696 if ( (pCpu->pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
1697 && (pCpu->pCurInstr->uOpcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1698 && (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J))
1699 {
1700 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1701 if (pTargetGC == 0)
1702 {
1703 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.fUse));
1704 return VERR_PATCHING_REFUSED;
1705 }
1706
1707 if (pCpu->pCurInstr->uOpcode == OP_CALL)
1708 {
1709 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1710 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1711 if (RT_FAILURE(rc))
1712 goto end;
1713 }
1714 else
1715 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->uOpcode, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1716
1717 if (RT_SUCCESS(rc))
1718 rc = VWRN_CONTINUE_RECOMPILE;
1719
1720 goto end;
1721 }
1722
1723 switch (pCpu->pCurInstr->uOpcode)
1724 {
1725 case OP_CLI:
1726 {
1727 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1728 * until we've found the proper exit point(s).
1729 */
1730 if ( pCurInstrGC != pInstrGC
1731 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1732 )
1733 {
1734 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1735 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1736 }
1737 /* Set by irq inhibition; no longer valid now. */
1738 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1739
1740 rc = patmPatchGenCli(pVM, pPatch);
1741 if (RT_SUCCESS(rc))
1742 rc = VWRN_CONTINUE_RECOMPILE;
1743 break;
1744 }
1745
1746 case OP_MOV:
1747 if (pCpu->pCurInstr->fOpType & DISOPTYPE_POTENTIALLY_DANGEROUS)
1748 {
1749 /* mov ss, src? */
1750 if ( (pCpu->param1.fUse & DISUSE_REG_SEG)
1751 && (pCpu->param1.base.reg_seg == DISSELREG_SS))
1752 {
1753 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1754 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1755 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1756 }
1757#if 0 /* necessary for Haiku */
1758 else
1759 if ( (pCpu->param2.fUse & DISUSE_REG_SEG)
1760 && (pCpu->param2.base.reg_seg == USE_REG_SS)
1761 && (pCpu->param1.fUse & (DISUSE_REG_GEN32|DISUSE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1762 {
1763 /* mov GPR, ss */
1764 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1765 if (RT_SUCCESS(rc))
1766 rc = VWRN_CONTINUE_RECOMPILE;
1767 break;
1768 }
1769#endif
1770 }
1771 goto duplicate_instr;
1772
1773 case OP_POP:
1774 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS)
1775 {
1776 Assert(pCpu->pCurInstr->fOpType & DISOPTYPE_INHIBIT_IRQS);
1777
1778 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1779 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1780 }
1781 goto duplicate_instr;
1782
1783 case OP_STI:
1784 {
1785 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1786
1787 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1788 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1789 {
1790 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1791 fInhibitIRQInstr = true;
1792 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1793 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1794 }
1795 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1796
1797 if (RT_SUCCESS(rc))
1798 {
1799 DISCPUSTATE cpu = *pCpu;
1800 unsigned cbInstr;
1801 int disret;
1802 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1803
1804 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1805
1806 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1807 { /* Force pNextInstrHC out of scope after using it */
1808 uint8_t *pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1809 if (pNextInstrHC == NULL)
1810 {
1811 AssertFailed();
1812 return VERR_PATCHING_REFUSED;
1813 }
1814
1815 // Disassemble the next instruction
1816 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
1817 }
1818 if (disret == false)
1819 {
1820 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1821 return VERR_PATCHING_REFUSED;
1822 }
1823 pReturnInstrGC = pNextInstrGC + cbInstr;
1824
1825 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1826 || pReturnInstrGC <= pInstrGC
1827 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1828 )
1829 {
1830 /* Not an exit point for function duplication patches */
1831 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1832 && RT_SUCCESS(rc))
1833 {
1834 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1835 rc = VWRN_CONTINUE_RECOMPILE;
1836 }
1837 else
1838 rc = VINF_SUCCESS; //exit point
1839 }
1840 else {
1841 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1842 rc = VERR_PATCHING_REFUSED; //not allowed!!
1843 }
1844 }
1845 break;
1846 }
1847
1848 case OP_POPF:
1849 {
1850 bool fGenerateJmpBack = (pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32);
1851
1852 /* Not an exit point for IDT handler or function replacement patches */
1853 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1854 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1855 fGenerateJmpBack = false;
1856
1857 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->cbInstr, !!(pCpu->fPrefix & DISPREFIX_OPSIZE), fGenerateJmpBack);
1858 if (RT_SUCCESS(rc))
1859 {
1860 if (fGenerateJmpBack == false)
1861 {
1862 /* Not an exit point for IDT handler or function replacement patches */
1863 rc = VWRN_CONTINUE_RECOMPILE;
1864 }
1865 else
1866 {
1867 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1868 rc = VINF_SUCCESS; /* exit point! */
1869 }
1870 }
1871 break;
1872 }
1873
1874 case OP_PUSHF:
1875 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1876 if (RT_SUCCESS(rc))
1877 rc = VWRN_CONTINUE_RECOMPILE;
1878 break;
1879
1880 case OP_PUSH:
1881 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS)
1882 {
1883 rc = patmPatchGenPushCS(pVM, pPatch);
1884 if (RT_SUCCESS(rc))
1885 rc = VWRN_CONTINUE_RECOMPILE;
1886 break;
1887 }
1888 goto duplicate_instr;
1889
1890 case OP_IRET:
1891 Log(("IRET at %RRv\n", pCurInstrGC));
1892 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));
1893 if (RT_SUCCESS(rc))
1894 {
1895 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1896 rc = VINF_SUCCESS; /* exit point by definition */
1897 }
1898 break;
1899
1900 case OP_ILLUD2:
1901 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1902 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1903 if (RT_SUCCESS(rc))
1904 rc = VINF_SUCCESS; /* exit point by definition */
1905 Log(("Illegal opcode (0xf 0xb)\n"));
1906 break;
1907
1908 case OP_CPUID:
1909 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1910 if (RT_SUCCESS(rc))
1911 rc = VWRN_CONTINUE_RECOMPILE;
1912 break;
1913
1914 case OP_STR:
1915 case OP_SLDT:
1916 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1917 if (RT_SUCCESS(rc))
1918 rc = VWRN_CONTINUE_RECOMPILE;
1919 break;
1920
1921 case OP_SGDT:
1922 case OP_SIDT:
1923 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1924 if (RT_SUCCESS(rc))
1925 rc = VWRN_CONTINUE_RECOMPILE;
1926 break;
1927
1928 case OP_RETN:
1929 /* retn is an exit point for function patches */
1930 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
1931 if (RT_SUCCESS(rc))
1932 rc = VINF_SUCCESS; /* exit point by definition */
1933 break;
1934
1935 case OP_SYSEXIT:
1936 /* Duplicate it, so it can be emulated in GC (or fault). */
1937 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1938 if (RT_SUCCESS(rc))
1939 rc = VINF_SUCCESS; /* exit point by definition */
1940 break;
1941
1942 case OP_CALL:
1943 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1944 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1945 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1946 */
1947 Assert(pCpu->param1.cb == 4 || pCpu->param1.cb == 6);
1948 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.cb == 4 /* no far calls! */)
1949 {
1950 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
1951 if (RT_SUCCESS(rc))
1952 {
1953 rc = VWRN_CONTINUE_RECOMPILE;
1954 }
1955 break;
1956 }
1957 goto gen_illegal_instr;
1958
1959 case OP_JMP:
1960 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1961 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1962 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1963 */
1964 Assert(pCpu->param1.cb == 4 || pCpu->param1.cb == 6);
1965 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.cb == 4 /* no far jumps! */)
1966 {
1967 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
1968 if (RT_SUCCESS(rc))
1969 rc = VINF_SUCCESS; /* end of branch */
1970 break;
1971 }
1972 goto gen_illegal_instr;
1973
1974 case OP_INT3:
1975 case OP_INT:
1976 case OP_INTO:
1977 goto gen_illegal_instr;
1978
1979 case OP_MOV_DR:
1980 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1981 if (pCpu->pCurInstr->fParam2 == OP_PARM_Dd)
1982 {
1983 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
1984 if (RT_SUCCESS(rc))
1985 rc = VWRN_CONTINUE_RECOMPILE;
1986 break;
1987 }
1988 goto duplicate_instr;
1989
1990 case OP_MOV_CR:
1991 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1992 if (pCpu->pCurInstr->fParam2 == OP_PARM_Cd)
1993 {
1994 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
1995 if (RT_SUCCESS(rc))
1996 rc = VWRN_CONTINUE_RECOMPILE;
1997 break;
1998 }
1999 goto duplicate_instr;
2000
2001 default:
2002 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW | DISOPTYPE_PRIVILEGED_NOTRAP))
2003 {
2004gen_illegal_instr:
2005 rc = patmPatchGenIllegalInstr(pVM, pPatch);
2006 if (RT_SUCCESS(rc))
2007 rc = VINF_SUCCESS; /* exit point by definition */
2008 }
2009 else
2010 {
2011duplicate_instr:
2012 Log(("patmPatchGenDuplicate\n"));
2013 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2014 if (RT_SUCCESS(rc))
2015 rc = VWRN_CONTINUE_RECOMPILE;
2016 }
2017 break;
2018 }
2019
2020end:
2021
2022 if ( !fInhibitIRQInstr
2023 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2024 {
2025 int rc2;
2026 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2027
2028 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2029 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
2030 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
2031 {
2032 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
2033
2034 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2035 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
2036 rc = VINF_SUCCESS; /* end of the line */
2037 }
2038 else
2039 {
2040 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
2041 }
2042 if (RT_FAILURE(rc2))
2043 rc = rc2;
2044 }
2045
2046 if (RT_SUCCESS(rc))
2047 {
2048 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
2049 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
2050 && pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32
2051 && !(pCpu->pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
2052 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
2053 )
2054 {
2055 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2056
2057 // The end marker for this kind of patch is any instruction at a location outside our patch jump
2058 Log(("patmRecompileCallback: end found for single instruction patch at %RRv cbInstr %d\n", pNextInstrGC, pCpu->cbInstr));
2059
2060 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2061 AssertRC(rc);
2062 }
2063 }
2064 return rc;
2065}
2066
2067
2068#ifdef LOG_ENABLED
2069
2070/* Add a disasm jump record (temporary for prevent duplicate analysis)
2071 *
2072 * @param pVM The VM to operate on.
2073 * @param pPatch Patch structure ptr
2074 * @param pInstrGC Guest context pointer to privileged instruction
2075 *
2076 */
2077static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2078{
2079 PAVLPVNODECORE pRec;
2080
2081 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2082 Assert(pRec);
2083 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2084
2085 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2086 Assert(ret);
2087}
2088
2089/**
2090 * Checks if jump target has been analysed before.
2091 *
2092 * @returns VBox status code.
2093 * @param pPatch Patch struct
2094 * @param pInstrGC Jump target
2095 *
2096 */
2097static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2098{
2099 PAVLPVNODECORE pRec;
2100
2101 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2102 if (pRec)
2103 return true;
2104 return false;
2105}
2106
2107/**
2108 * For proper disassembly of the final patch block
2109 *
2110 * @returns VBox status code.
2111 * @param pVM The VM to operate on.
2112 * @param pCpu CPU disassembly state
2113 * @param pInstrGC Guest context pointer to privileged instruction
2114 * @param pCurInstrGC Guest context pointer to the current instruction
2115 * @param pCacheRec Cache record ptr
2116 *
2117 */
2118int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2119{
2120 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2121 NOREF(pInstrGC);
2122
2123 if (pCpu->pCurInstr->uOpcode == OP_INT3)
2124 {
2125 /* Could be an int3 inserted in a call patch. Check to be sure */
2126 DISCPUSTATE cpu;
2127 RTRCPTR pOrgJumpGC;
2128
2129 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2130
2131 { /* Force pOrgJumpHC out of scope after using it */
2132 uint8_t *pOrgJumpHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2133
2134 bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL);
2135 if (!disret || cpu.pCurInstr->uOpcode != OP_CALL || cpu.param1.cb != 4 /* only near calls */)
2136 return VINF_SUCCESS;
2137 }
2138 return VWRN_CONTINUE_ANALYSIS;
2139 }
2140
2141 if ( pCpu->pCurInstr->uOpcode == OP_ILLUD2
2142 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2143 {
2144 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2145 return VWRN_CONTINUE_ANALYSIS;
2146 }
2147
2148 if ( (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2149 || pCpu->pCurInstr->uOpcode == OP_INT
2150 || pCpu->pCurInstr->uOpcode == OP_IRET
2151 || pCpu->pCurInstr->uOpcode == OP_RETN
2152 || pCpu->pCurInstr->uOpcode == OP_RETF
2153 )
2154 {
2155 return VINF_SUCCESS;
2156 }
2157
2158 if (pCpu->pCurInstr->uOpcode == OP_ILLUD2)
2159 return VINF_SUCCESS;
2160
2161 return VWRN_CONTINUE_ANALYSIS;
2162}
2163
2164
2165/**
2166 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2167 *
2168 * @returns VBox status code.
2169 * @param pVM The VM to operate on.
2170 * @param pInstrGC Guest context pointer to the initial privileged instruction
2171 * @param pCurInstrGC Guest context pointer to the current instruction
2172 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2173 * @param pCacheRec Cache record ptr
2174 *
2175 */
2176int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2177{
2178 DISCPUSTATE cpu;
2179 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2180 int rc = VWRN_CONTINUE_ANALYSIS;
2181 uint32_t cbInstr, delta;
2182 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2183 bool disret;
2184 char szOutput[256];
2185
2186 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2187
2188 /* We need this to determine branch targets (and for disassembling). */
2189 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2190
2191 while (rc == VWRN_CONTINUE_ANALYSIS)
2192 {
2193 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2194 if (pCurInstrHC == NULL)
2195 {
2196 rc = VERR_PATCHING_REFUSED;
2197 goto end;
2198 }
2199
2200 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_RAWCODE,
2201 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2202 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2203 {
2204 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2205
2206 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2207 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2208 else
2209 Log(("DIS %s", szOutput));
2210
2211 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2212 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2213 {
2214 rc = VINF_SUCCESS;
2215 goto end;
2216 }
2217 }
2218 else
2219 Log(("DIS: %s", szOutput));
2220
2221 if (disret == false)
2222 {
2223 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2224 rc = VINF_SUCCESS;
2225 goto end;
2226 }
2227
2228 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2229 if (rc != VWRN_CONTINUE_ANALYSIS) {
2230 break; //done!
2231 }
2232
2233 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2234 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2235 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2236 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2237 )
2238 {
2239 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2240 RTRCPTR pOrgTargetGC;
2241
2242 if (pTargetGC == 0)
2243 {
2244 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.fUse));
2245 rc = VERR_PATCHING_REFUSED;
2246 break;
2247 }
2248
2249 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2250 {
2251 //jump back to guest code
2252 rc = VINF_SUCCESS;
2253 goto end;
2254 }
2255 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2256
2257 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2258 {
2259 rc = VINF_SUCCESS;
2260 goto end;
2261 }
2262
2263 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2264 {
2265 /* New jump, let's check it. */
2266 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2267
2268 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2269 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2270 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2271
2272 if (rc != VINF_SUCCESS) {
2273 break; //done!
2274 }
2275 }
2276 if (cpu.pCurInstr->uOpcode == OP_JMP)
2277 {
2278 /* Unconditional jump; return to caller. */
2279 rc = VINF_SUCCESS;
2280 goto end;
2281 }
2282
2283 rc = VWRN_CONTINUE_ANALYSIS;
2284 }
2285 pCurInstrGC += cbInstr;
2286 }
2287end:
2288 return rc;
2289}
2290
2291/**
2292 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2293 *
2294 * @returns VBox status code.
2295 * @param pVM The VM to operate on.
2296 * @param pInstrGC Guest context pointer to the initial privileged instruction
2297 * @param pCurInstrGC Guest context pointer to the current instruction
2298 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2299 * @param pCacheRec Cache record ptr
2300 *
2301 */
2302int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2303{
2304 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2305
2306 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2307 /* Free all disasm jump records. */
2308 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2309 return rc;
2310}
2311
2312#endif /* LOG_ENABLED */
2313
2314/**
2315 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2316 * If so, this patch is permanently disabled.
2317 *
2318 * @param pVM The VM to operate on.
2319 * @param pInstrGC Guest context pointer to instruction
2320 * @param pConflictGC Guest context pointer to check
2321 *
2322 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2323 *
2324 */
2325VMMR3DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2326{
2327 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2328 if (pTargetPatch)
2329 {
2330 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2331 }
2332 return VERR_PATCH_NO_CONFLICT;
2333}
2334
2335/**
2336 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2337 *
2338 * @returns VBox status code.
2339 * @param pVM The VM to operate on.
2340 * @param pInstrGC Guest context pointer to privileged instruction
2341 * @param pCurInstrGC Guest context pointer to the current instruction
2342 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2343 * @param pCacheRec Cache record ptr
2344 *
2345 */
2346static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2347{
2348 DISCPUSTATE cpu;
2349 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2350 int rc = VWRN_CONTINUE_ANALYSIS;
2351 uint32_t cbInstr;
2352 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2353 bool disret;
2354#ifdef LOG_ENABLED
2355 char szOutput[256];
2356#endif
2357
2358 while (rc == VWRN_CONTINUE_RECOMPILE)
2359 {
2360 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2361 if (pCurInstrHC == NULL)
2362 {
2363 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2364 goto end;
2365 }
2366#ifdef LOG_ENABLED
2367 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE,
2368 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2369 Log(("Recompile: %s", szOutput));
2370#else
2371 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
2372#endif
2373 if (disret == false)
2374 {
2375 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2376
2377 /* Add lookup record for patch to guest address translation */
2378 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2379 patmPatchGenIllegalInstr(pVM, pPatch);
2380 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2381 goto end;
2382 }
2383
2384 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2385 if (rc != VWRN_CONTINUE_RECOMPILE)
2386 {
2387 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2388 if ( rc == VINF_SUCCESS
2389 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2390 {
2391 DISCPUSTATE cpunext;
2392 uint32_t opsizenext;
2393 uint8_t *pNextInstrHC;
2394 RTRCPTR pNextInstrGC = pCurInstrGC + cbInstr;
2395
2396 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2397
2398 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2399 * Recompile the next instruction as well
2400 */
2401 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2402 if (pNextInstrHC == NULL)
2403 {
2404 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2405 goto end;
2406 }
2407 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpunext, &opsizenext);
2408 if (disret == false)
2409 {
2410 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2411 goto end;
2412 }
2413 switch(cpunext.pCurInstr->uOpcode)
2414 {
2415 case OP_IRET: /* inhibit cleared in generated code */
2416 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2417 case OP_HLT:
2418 break; /* recompile these */
2419
2420 default:
2421 if (cpunext.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2422 {
2423 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2424
2425 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2426 AssertRC(rc);
2427 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2428 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2429 }
2430 break;
2431 }
2432
2433 /* Note: after a cli we must continue to a proper exit point */
2434 if (cpunext.pCurInstr->uOpcode != OP_CLI)
2435 {
2436 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2437 if (RT_SUCCESS(rc))
2438 {
2439 rc = VINF_SUCCESS;
2440 goto end;
2441 }
2442 break;
2443 }
2444 else
2445 rc = VWRN_CONTINUE_RECOMPILE;
2446 }
2447 else
2448 break; /* done! */
2449 }
2450
2451 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2452
2453
2454 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2455 if ( (cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
2456 && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J)
2457 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */
2458 )
2459 {
2460 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2461 if (addr == 0)
2462 {
2463 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.fUse));
2464 rc = VERR_PATCHING_REFUSED;
2465 break;
2466 }
2467
2468 Log(("Jump encountered target %RRv\n", addr));
2469
2470 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2471 if (!(cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW))
2472 {
2473 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2474 /* First we need to finish this linear code stream until the next exit point. */
2475 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+cbInstr, pfnPATMR3Recompile, pCacheRec);
2476 if (RT_FAILURE(rc))
2477 {
2478 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2479 break; //fatal error
2480 }
2481 }
2482
2483 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2484 {
2485 /* New code; let's recompile it. */
2486 Log(("patmRecompileCodeStream continue with jump\n"));
2487
2488 /*
2489 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2490 * this patch so we can continue our analysis
2491 *
2492 * We rely on CSAM to detect and resolve conflicts
2493 */
2494 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, addr);
2495 if(pTargetPatch)
2496 {
2497 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2498 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2499 }
2500
2501 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2502 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2503 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2504
2505 if(pTargetPatch)
2506 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2507
2508 if (RT_FAILURE(rc))
2509 {
2510 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2511 break; //done!
2512 }
2513 }
2514 /* Always return to caller here; we're done! */
2515 rc = VINF_SUCCESS;
2516 goto end;
2517 }
2518 else
2519 if (cpu.pCurInstr->fOpType & DISOPTYPE_UNCOND_CONTROLFLOW)
2520 {
2521 rc = VINF_SUCCESS;
2522 goto end;
2523 }
2524 pCurInstrGC += cbInstr;
2525 }
2526end:
2527 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2528 return rc;
2529}
2530
2531
2532/**
2533 * Generate the jump from guest to patch code
2534 *
2535 * @returns VBox status code.
2536 * @param pVM The VM to operate on.
2537 * @param pPatch Patch record
2538 * @param pCacheRec Guest translation lookup cache record
2539 */
2540static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2541{
2542 uint8_t temp[8];
2543 uint8_t *pPB;
2544 int rc;
2545
2546 Assert(pPatch->cbPatchJump <= sizeof(temp));
2547 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2548
2549 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2550 Assert(pPB);
2551
2552#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2553 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2554 {
2555 Assert(pPatch->pPatchJumpDestGC);
2556
2557 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2558 {
2559 // jmp [PatchCode]
2560 if (fAddFixup)
2561 {
2562 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2563 {
2564 Log(("Relocation failed for the jump in the guest code!!\n"));
2565 return VERR_PATCHING_REFUSED;
2566 }
2567 }
2568
2569 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2570 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2571 }
2572 else
2573 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2574 {
2575 // jmp [PatchCode]
2576 if (fAddFixup)
2577 {
2578 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2579 {
2580 Log(("Relocation failed for the jump in the guest code!!\n"));
2581 return VERR_PATCHING_REFUSED;
2582 }
2583 }
2584
2585 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2586 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2587 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2588 }
2589 else
2590 {
2591 Assert(0);
2592 return VERR_PATCHING_REFUSED;
2593 }
2594 }
2595 else
2596#endif
2597 {
2598 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2599
2600 // jmp [PatchCode]
2601 if (fAddFixup)
2602 {
2603 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2604 {
2605 Log(("Relocation failed for the jump in the guest code!!\n"));
2606 return VERR_PATCHING_REFUSED;
2607 }
2608 }
2609 temp[0] = 0xE9; //jmp
2610 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2611 }
2612 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2613 AssertRC(rc);
2614
2615 if (rc == VINF_SUCCESS)
2616 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2617
2618 return rc;
2619}
2620
2621/**
2622 * Remove the jump from guest to patch code
2623 *
2624 * @returns VBox status code.
2625 * @param pVM The VM to operate on.
2626 * @param pPatch Patch record
2627 */
2628static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2629{
2630#ifdef DEBUG
2631 DISCPUSTATE cpu;
2632 char szOutput[256];
2633 uint32_t cbInstr, i = 0;
2634 bool disret;
2635
2636 while (i < pPatch->cbPrivInstr)
2637 {
2638 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2639 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2640 if (disret == false)
2641 break;
2642
2643 Log(("Org patch jump: %s", szOutput));
2644 Assert(cbInstr);
2645 i += cbInstr;
2646 }
2647#endif
2648
2649 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2650 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2651#ifdef DEBUG
2652 if (rc == VINF_SUCCESS)
2653 {
2654 i = 0;
2655 while (i < pPatch->cbPrivInstr)
2656 {
2657 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2658 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2659 if (disret == false)
2660 break;
2661
2662 Log(("Org instr: %s", szOutput));
2663 Assert(cbInstr);
2664 i += cbInstr;
2665 }
2666 }
2667#endif
2668 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2669 return rc;
2670}
2671
2672/**
2673 * Generate the call from guest to patch code
2674 *
2675 * @returns VBox status code.
2676 * @param pVM The VM to operate on.
2677 * @param pPatch Patch record
2678 * @param pInstrHC HC address where to insert the jump
2679 * @param pCacheRec Guest translation cache record
2680 */
2681static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2682{
2683 uint8_t temp[8];
2684 uint8_t *pPB;
2685 int rc;
2686
2687 Assert(pPatch->cbPatchJump <= sizeof(temp));
2688
2689 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2690 Assert(pPB);
2691
2692 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2693
2694 // jmp [PatchCode]
2695 if (fAddFixup)
2696 {
2697 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2698 {
2699 Log(("Relocation failed for the jump in the guest code!!\n"));
2700 return VERR_PATCHING_REFUSED;
2701 }
2702 }
2703
2704 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2705 temp[0] = pPatch->aPrivInstr[0];
2706 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2707
2708 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2709 AssertRC(rc);
2710
2711 return rc;
2712}
2713
2714
2715/**
2716 * Patch cli/sti pushf/popf instruction block at specified location
2717 *
2718 * @returns VBox status code.
2719 * @param pVM The VM to operate on.
2720 * @param pInstrGC Guest context point to privileged instruction
2721 * @param pInstrHC Host context point to privileged instruction
2722 * @param uOpcode Instruction opcode
2723 * @param uOpSize Size of starting instruction
2724 * @param pPatchRec Patch record
2725 *
2726 * @note returns failure if patching is not allowed or possible
2727 *
2728 */
2729VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2730 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2731{
2732 PPATCHINFO pPatch = &pPatchRec->patch;
2733 int rc = VERR_PATCHING_REFUSED;
2734 uint32_t orgOffsetPatchMem = ~0;
2735 RTRCPTR pInstrStart;
2736 bool fInserted;
2737 NOREF(pInstrHC); NOREF(uOpSize);
2738
2739 /* Save original offset (in case of failures later on) */
2740 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2741 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2742
2743 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2744 switch (uOpcode)
2745 {
2746 case OP_MOV:
2747 break;
2748
2749 case OP_CLI:
2750 case OP_PUSHF:
2751 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2752 /* Note: special precautions are taken when disabling and enabling such patches. */
2753 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2754 break;
2755
2756 default:
2757 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2758 {
2759 AssertMsg(0, ("PATMR3PatchBlock: Invalid opcode %x\n", uOpcode));
2760 return VERR_INVALID_PARAMETER;
2761 }
2762 }
2763
2764 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2765 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2766
2767 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2768 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2769 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2770 )
2771 {
2772 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2773 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2774 rc = VERR_PATCHING_REFUSED;
2775 goto failure;
2776 }
2777
2778 pPatch->nrPatch2GuestRecs = 0;
2779 pInstrStart = pInstrGC;
2780
2781#ifdef PATM_ENABLE_CALL
2782 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2783#endif
2784
2785 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2786 pPatch->uCurPatchOffset = 0;
2787
2788 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2789 {
2790 Assert(pPatch->flags & PATMFL_INTHANDLER);
2791
2792 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2793 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2794 if (RT_FAILURE(rc))
2795 goto failure;
2796 }
2797
2798 /***************************************************************************************************************************/
2799 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2800 /***************************************************************************************************************************/
2801#ifdef VBOX_WITH_STATISTICS
2802 if (!(pPatch->flags & PATMFL_SYSENTER))
2803 {
2804 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2805 if (RT_FAILURE(rc))
2806 goto failure;
2807 }
2808#endif
2809
2810 PATMP2GLOOKUPREC cacheRec;
2811 RT_ZERO(cacheRec);
2812 cacheRec.pPatch = pPatch;
2813
2814 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2815 /* Free leftover lock if any. */
2816 if (cacheRec.Lock.pvMap)
2817 {
2818 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2819 cacheRec.Lock.pvMap = NULL;
2820 }
2821 if (rc != VINF_SUCCESS)
2822 {
2823 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2824 goto failure;
2825 }
2826
2827 /* Calculated during analysis. */
2828 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2829 {
2830 /* Most likely cause: we encountered an illegal instruction very early on. */
2831 /** @todo could turn it into an int3 callable patch. */
2832 Log(("PATMR3PatchBlock: patch block too small -> refuse\n"));
2833 rc = VERR_PATCHING_REFUSED;
2834 goto failure;
2835 }
2836
2837 /* size of patch block */
2838 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2839
2840
2841 /* Update free pointer in patch memory. */
2842 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2843 /* Round to next 8 byte boundary. */
2844 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2845
2846 /*
2847 * Insert into patch to guest lookup tree
2848 */
2849 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2850 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2851 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2852 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2853 if (!fInserted)
2854 {
2855 rc = VERR_PATCHING_REFUSED;
2856 goto failure;
2857 }
2858
2859 /* Note that patmr3SetBranchTargets can install additional patches!! */
2860 rc = patmr3SetBranchTargets(pVM, pPatch);
2861 if (rc != VINF_SUCCESS)
2862 {
2863 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2864 goto failure;
2865 }
2866
2867#ifdef LOG_ENABLED
2868 Log(("Patch code ----------------------------------------------------------\n"));
2869 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2870 /* Free leftover lock if any. */
2871 if (cacheRec.Lock.pvMap)
2872 {
2873 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2874 cacheRec.Lock.pvMap = NULL;
2875 }
2876 Log(("Patch code ends -----------------------------------------------------\n"));
2877#endif
2878
2879 /* make a copy of the guest code bytes that will be overwritten */
2880 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2881
2882 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2883 AssertRC(rc);
2884
2885 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2886 {
2887 /*uint8_t bASMInt3 = 0xCC; - unused */
2888
2889 Log(("PATMR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2890 /* Replace first opcode byte with 'int 3'. */
2891 rc = patmActivateInt3Patch(pVM, pPatch);
2892 if (RT_FAILURE(rc))
2893 goto failure;
2894
2895 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2896 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2897
2898 pPatch->flags &= ~PATMFL_INSTR_HINT;
2899 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2900 }
2901 else
2902 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2903 {
2904 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2905 /* now insert a jump in the guest code */
2906 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2907 AssertRC(rc);
2908 if (RT_FAILURE(rc))
2909 goto failure;
2910
2911 }
2912
2913 PATM_LOG_RAW_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
2914
2915 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2916 pPatch->pTempInfo->nrIllegalInstr = 0;
2917
2918 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2919
2920 pPatch->uState = PATCH_ENABLED;
2921 return VINF_SUCCESS;
2922
2923failure:
2924 if (pPatchRec->CoreOffset.Key)
2925 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
2926
2927 patmEmptyTree(pVM, &pPatch->FixupTree);
2928 pPatch->nrFixups = 0;
2929
2930 patmEmptyTree(pVM, &pPatch->JumpTree);
2931 pPatch->nrJumpRecs = 0;
2932
2933 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2934 pPatch->pTempInfo->nrIllegalInstr = 0;
2935
2936 /* Turn this cli patch into a dummy. */
2937 pPatch->uState = PATCH_REFUSED;
2938 pPatch->pPatchBlockOffset = 0;
2939
2940 // Give back the patch memory we no longer need
2941 Assert(orgOffsetPatchMem != (uint32_t)~0);
2942 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2943
2944 return rc;
2945}
2946
2947/**
2948 * Patch IDT handler
2949 *
2950 * @returns VBox status code.
2951 * @param pVM The VM to operate on.
2952 * @param pInstrGC Guest context point to privileged instruction
2953 * @param uOpSize Size of starting instruction
2954 * @param pPatchRec Patch record
2955 * @param pCacheRec Cache record ptr
2956 *
2957 * @note returns failure if patching is not allowed or possible
2958 *
2959 */
2960static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
2961{
2962 PPATCHINFO pPatch = &pPatchRec->patch;
2963 bool disret;
2964 DISCPUSTATE cpuPush, cpuJmp;
2965 uint32_t cbInstr;
2966 RTRCPTR pCurInstrGC = pInstrGC;
2967 uint8_t *pCurInstrHC, *pInstrHC;
2968 uint32_t orgOffsetPatchMem = ~0;
2969
2970 pInstrHC = pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2971 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
2972
2973 /*
2974 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
2975 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
2976 * condition here and only patch the common entypoint once.
2977 */
2978 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &cbInstr);
2979 Assert(disret);
2980 if (disret && cpuPush.pCurInstr->uOpcode == OP_PUSH)
2981 {
2982 RTRCPTR pJmpInstrGC;
2983 int rc;
2984 pCurInstrGC += cbInstr;
2985
2986 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuJmp, &cbInstr);
2987 if ( disret
2988 && cpuJmp.pCurInstr->uOpcode == OP_JMP
2989 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
2990 )
2991 {
2992 bool fInserted;
2993 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2994 if (pJmpPatch == 0)
2995 {
2996 /* Patch it first! */
2997 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
2998 if (rc != VINF_SUCCESS)
2999 goto failure;
3000 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3001 Assert(pJmpPatch);
3002 }
3003 if (pJmpPatch->patch.uState != PATCH_ENABLED)
3004 goto failure;
3005
3006 /* save original offset (in case of failures later on) */
3007 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3008
3009 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3010 pPatch->uCurPatchOffset = 0;
3011 pPatch->nrPatch2GuestRecs = 0;
3012
3013#ifdef VBOX_WITH_STATISTICS
3014 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3015 if (RT_FAILURE(rc))
3016 goto failure;
3017#endif
3018
3019 /* Install fake cli patch (to clear the virtual IF) */
3020 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
3021 if (RT_FAILURE(rc))
3022 goto failure;
3023
3024 /* Add lookup record for patch to guest address translation (for the push) */
3025 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
3026
3027 /* Duplicate push. */
3028 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
3029 if (RT_FAILURE(rc))
3030 goto failure;
3031
3032 /* Generate jump to common entrypoint. */
3033 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
3034 if (RT_FAILURE(rc))
3035 goto failure;
3036
3037 /* size of patch block */
3038 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3039
3040 /* Update free pointer in patch memory. */
3041 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3042 /* Round to next 8 byte boundary */
3043 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3044
3045 /* There's no jump from guest to patch code. */
3046 pPatch->cbPatchJump = 0;
3047
3048
3049#ifdef LOG_ENABLED
3050 Log(("Patch code ----------------------------------------------------------\n"));
3051 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3052 Log(("Patch code ends -----------------------------------------------------\n"));
3053#endif
3054 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3055
3056 /*
3057 * Insert into patch to guest lookup tree
3058 */
3059 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3060 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3061 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3062 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3063
3064 pPatch->uState = PATCH_ENABLED;
3065
3066 return VINF_SUCCESS;
3067 }
3068 }
3069failure:
3070 /* Give back the patch memory we no longer need */
3071 if (orgOffsetPatchMem != (uint32_t)~0)
3072 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3073
3074 return PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3075}
3076
3077/**
3078 * Install a trampoline to call a guest trap handler directly
3079 *
3080 * @returns VBox status code.
3081 * @param pVM The VM to operate on.
3082 * @param pInstrGC Guest context point to privileged instruction
3083 * @param pPatchRec Patch record
3084 * @param pCacheRec Cache record ptr
3085 *
3086 */
3087static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3088{
3089 PPATCHINFO pPatch = &pPatchRec->patch;
3090 int rc = VERR_PATCHING_REFUSED;
3091 uint32_t orgOffsetPatchMem = ~0;
3092 bool fInserted;
3093
3094 // save original offset (in case of failures later on)
3095 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3096
3097 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3098 pPatch->uCurPatchOffset = 0;
3099 pPatch->nrPatch2GuestRecs = 0;
3100
3101#ifdef VBOX_WITH_STATISTICS
3102 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3103 if (RT_FAILURE(rc))
3104 goto failure;
3105#endif
3106
3107 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3108 if (RT_FAILURE(rc))
3109 goto failure;
3110
3111 /* size of patch block */
3112 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3113
3114 /* Update free pointer in patch memory. */
3115 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3116 /* Round to next 8 byte boundary */
3117 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3118
3119 /* There's no jump from guest to patch code. */
3120 pPatch->cbPatchJump = 0;
3121
3122#ifdef LOG_ENABLED
3123 Log(("Patch code ----------------------------------------------------------\n"));
3124 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3125 Log(("Patch code ends -----------------------------------------------------\n"));
3126#endif
3127 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "TRAP handler");
3128 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3129
3130 /*
3131 * Insert into patch to guest lookup tree
3132 */
3133 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3134 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3135 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3136 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3137
3138 pPatch->uState = PATCH_ENABLED;
3139 return VINF_SUCCESS;
3140
3141failure:
3142 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3143
3144 /* Turn this cli patch into a dummy. */
3145 pPatch->uState = PATCH_REFUSED;
3146 pPatch->pPatchBlockOffset = 0;
3147
3148 /* Give back the patch memory we no longer need */
3149 Assert(orgOffsetPatchMem != (uint32_t)~0);
3150 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3151
3152 return rc;
3153}
3154
3155
3156#ifdef LOG_ENABLED
3157/**
3158 * Check if the instruction is patched as a common idt handler
3159 *
3160 * @returns true or false
3161 * @param pVM The VM to operate on.
3162 * @param pInstrGC Guest context point to the instruction
3163 *
3164 */
3165static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3166{
3167 PPATMPATCHREC pRec;
3168
3169 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3170 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3171 return true;
3172 return false;
3173}
3174#endif //DEBUG
3175
3176
3177/**
3178 * Duplicates a complete function
3179 *
3180 * @returns VBox status code.
3181 * @param pVM The VM to operate on.
3182 * @param pInstrGC Guest context point to privileged instruction
3183 * @param pPatchRec Patch record
3184 * @param pCacheRec Cache record ptr
3185 *
3186 */
3187static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3188{
3189 PPATCHINFO pPatch = &pPatchRec->patch;
3190 int rc = VERR_PATCHING_REFUSED;
3191 uint32_t orgOffsetPatchMem = ~0;
3192 bool fInserted;
3193
3194 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3195 /* Save original offset (in case of failures later on). */
3196 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3197
3198 /* We will not go on indefinitely with call instruction handling. */
3199 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3200 {
3201 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3202 return VERR_PATCHING_REFUSED;
3203 }
3204
3205 pVM->patm.s.ulCallDepth++;
3206
3207#ifdef PATM_ENABLE_CALL
3208 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3209#endif
3210
3211 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3212
3213 pPatch->nrPatch2GuestRecs = 0;
3214 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3215 pPatch->uCurPatchOffset = 0;
3216
3217 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3218 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3219 if (RT_FAILURE(rc))
3220 goto failure;
3221
3222#ifdef VBOX_WITH_STATISTICS
3223 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3224 if (RT_FAILURE(rc))
3225 goto failure;
3226#endif
3227
3228 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3229 if (rc != VINF_SUCCESS)
3230 {
3231 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3232 goto failure;
3233 }
3234
3235 //size of patch block
3236 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3237
3238 //update free pointer in patch memory
3239 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3240 /* Round to next 8 byte boundary. */
3241 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3242
3243 pPatch->uState = PATCH_ENABLED;
3244
3245 /*
3246 * Insert into patch to guest lookup tree
3247 */
3248 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3249 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3250 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3251 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3252 if (!fInserted)
3253 {
3254 rc = VERR_PATCHING_REFUSED;
3255 goto failure;
3256 }
3257
3258 /* Note that patmr3SetBranchTargets can install additional patches!! */
3259 rc = patmr3SetBranchTargets(pVM, pPatch);
3260 if (rc != VINF_SUCCESS)
3261 {
3262 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3263 goto failure;
3264 }
3265
3266#ifdef LOG_ENABLED
3267 Log(("Patch code ----------------------------------------------------------\n"));
3268 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3269 Log(("Patch code ends -----------------------------------------------------\n"));
3270#endif
3271
3272 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3273
3274 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3275 pPatch->pTempInfo->nrIllegalInstr = 0;
3276
3277 pVM->patm.s.ulCallDepth--;
3278 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3279 return VINF_SUCCESS;
3280
3281failure:
3282 if (pPatchRec->CoreOffset.Key)
3283 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3284
3285 patmEmptyTree(pVM, &pPatch->FixupTree);
3286 pPatch->nrFixups = 0;
3287
3288 patmEmptyTree(pVM, &pPatch->JumpTree);
3289 pPatch->nrJumpRecs = 0;
3290
3291 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3292 pPatch->pTempInfo->nrIllegalInstr = 0;
3293
3294 /* Turn this cli patch into a dummy. */
3295 pPatch->uState = PATCH_REFUSED;
3296 pPatch->pPatchBlockOffset = 0;
3297
3298 // Give back the patch memory we no longer need
3299 Assert(orgOffsetPatchMem != (uint32_t)~0);
3300 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3301
3302 pVM->patm.s.ulCallDepth--;
3303 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3304 return rc;
3305}
3306
3307/**
3308 * Creates trampoline code to jump inside an existing patch
3309 *
3310 * @returns VBox status code.
3311 * @param pVM The VM to operate on.
3312 * @param pInstrGC Guest context point to privileged instruction
3313 * @param pPatchRec Patch record
3314 *
3315 */
3316static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3317{
3318 PPATCHINFO pPatch = &pPatchRec->patch;
3319 RTRCPTR pPage, pPatchTargetGC = 0;
3320 uint32_t orgOffsetPatchMem = ~0;
3321 int rc = VERR_PATCHING_REFUSED;
3322 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3323 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3324 bool fInserted = false;
3325
3326 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3327 /* Save original offset (in case of failures later on). */
3328 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3329
3330 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3331 /** @todo we already checked this before */
3332 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3333
3334 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3335 if (pPatchPage)
3336 {
3337 uint32_t i;
3338
3339 for (i=0;i<pPatchPage->cCount;i++)
3340 {
3341 if (pPatchPage->aPatch[i])
3342 {
3343 pPatchToJmp = pPatchPage->aPatch[i];
3344
3345 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3346 && pPatchToJmp->uState == PATCH_ENABLED)
3347 {
3348 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3349 if (pPatchTargetGC)
3350 {
3351 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3352 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3353 Assert(pPatchToGuestRec);
3354
3355 pPatchToGuestRec->fJumpTarget = true;
3356 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3357 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3358 break;
3359 }
3360 }
3361 }
3362 }
3363 }
3364 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3365
3366 /*
3367 * Only record the trampoline patch if this is the first patch to the target
3368 * or we recorded other patches already.
3369 * The goal is to refuse refreshing function duplicates if the guest
3370 * modifies code after a saved state was loaded because it is not possible
3371 * to save the relation between trampoline and target without changing the
3372 * saved satte version.
3373 */
3374 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3375 || pPatchToJmp->pTrampolinePatchesHead)
3376 {
3377 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3378 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3379 if (!pTrampRec)
3380 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3381
3382 pTrampRec->pPatchTrampoline = pPatchRec;
3383 }
3384
3385 pPatch->nrPatch2GuestRecs = 0;
3386 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3387 pPatch->uCurPatchOffset = 0;
3388
3389 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3390 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3391 if (RT_FAILURE(rc))
3392 goto failure;
3393
3394#ifdef VBOX_WITH_STATISTICS
3395 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3396 if (RT_FAILURE(rc))
3397 goto failure;
3398#endif
3399
3400 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3401 if (RT_FAILURE(rc))
3402 goto failure;
3403
3404 /*
3405 * Insert into patch to guest lookup tree
3406 */
3407 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3408 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3409 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3410 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3411 if (!fInserted)
3412 {
3413 rc = VERR_PATCHING_REFUSED;
3414 goto failure;
3415 }
3416
3417 /* size of patch block */
3418 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3419
3420 /* Update free pointer in patch memory. */
3421 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3422 /* Round to next 8 byte boundary */
3423 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3424
3425 /* There's no jump from guest to patch code. */
3426 pPatch->cbPatchJump = 0;
3427
3428 /* Enable the patch. */
3429 pPatch->uState = PATCH_ENABLED;
3430 /* We allow this patch to be called as a function. */
3431 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3432
3433 if (pTrampRec)
3434 {
3435 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3436 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3437 }
3438 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3439 return VINF_SUCCESS;
3440
3441failure:
3442 if (pPatchRec->CoreOffset.Key)
3443 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3444
3445 patmEmptyTree(pVM, &pPatch->FixupTree);
3446 pPatch->nrFixups = 0;
3447
3448 patmEmptyTree(pVM, &pPatch->JumpTree);
3449 pPatch->nrJumpRecs = 0;
3450
3451 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3452 pPatch->pTempInfo->nrIllegalInstr = 0;
3453
3454 /* Turn this cli patch into a dummy. */
3455 pPatch->uState = PATCH_REFUSED;
3456 pPatch->pPatchBlockOffset = 0;
3457
3458 // Give back the patch memory we no longer need
3459 Assert(orgOffsetPatchMem != (uint32_t)~0);
3460 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3461
3462 if (pTrampRec)
3463 MMR3HeapFree(pTrampRec);
3464
3465 return rc;
3466}
3467
3468
3469/**
3470 * Patch branch target function for call/jump at specified location.
3471 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3472 *
3473 * @returns VBox status code.
3474 * @param pVM The VM to operate on.
3475 * @param pCtx Guest context
3476 *
3477 */
3478VMMR3DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3479{
3480 RTRCPTR pBranchTarget, pPage;
3481 int rc;
3482 RTRCPTR pPatchTargetGC = 0;
3483
3484 pBranchTarget = pCtx->edx;
3485 pBranchTarget = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3486
3487 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3488 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3489
3490 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3491 if (pPatchPage)
3492 {
3493 uint32_t i;
3494
3495 for (i=0;i<pPatchPage->cCount;i++)
3496 {
3497 if (pPatchPage->aPatch[i])
3498 {
3499 PPATCHINFO pPatch = pPatchPage->aPatch[i];
3500
3501 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3502 && pPatch->uState == PATCH_ENABLED)
3503 {
3504 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3505 if (pPatchTargetGC)
3506 {
3507 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3508 break;
3509 }
3510 }
3511 }
3512 }
3513 }
3514
3515 if (pPatchTargetGC)
3516 {
3517 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3518 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3519 }
3520 else
3521 {
3522 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3523 }
3524
3525 if (rc == VINF_SUCCESS)
3526 {
3527 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3528 Assert(pPatchTargetGC);
3529 }
3530
3531 if (pPatchTargetGC)
3532 {
3533 pCtx->eax = pPatchTargetGC;
3534 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3535 }
3536 else
3537 {
3538 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3539 pCtx->eax = 0;
3540 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3541 }
3542 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3543 rc = PATMAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3544 AssertRC(rc);
3545
3546 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3547 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3548 return VINF_SUCCESS;
3549}
3550
3551/**
3552 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3553 *
3554 * @returns VBox status code.
3555 * @param pVM The VM to operate on.
3556 * @param pCpu Disassembly CPU structure ptr
3557 * @param pInstrGC Guest context point to privileged instruction
3558 * @param pCacheRec Cache record ptr
3559 *
3560 */
3561static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3562{
3563 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3564 int rc = VERR_PATCHING_REFUSED;
3565 DISCPUSTATE cpu;
3566 RTRCPTR pTargetGC;
3567 PPATMPATCHREC pPatchFunction;
3568 uint32_t cbInstr;
3569 bool disret;
3570
3571 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3572 Assert((pCpu->pCurInstr->uOpcode == OP_CALL || pCpu->pCurInstr->uOpcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32);
3573
3574 if ((pCpu->pCurInstr->uOpcode != OP_CALL && pCpu->pCurInstr->uOpcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32)
3575 {
3576 rc = VERR_PATCHING_REFUSED;
3577 goto failure;
3578 }
3579
3580 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3581 if (pTargetGC == 0)
3582 {
3583 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.fUse));
3584 rc = VERR_PATCHING_REFUSED;
3585 goto failure;
3586 }
3587
3588 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3589 if (pPatchFunction == NULL)
3590 {
3591 for(;;)
3592 {
3593 /* It could be an indirect call (call -> jmp dest).
3594 * Note that it's dangerous to assume the jump will never change...
3595 */
3596 uint8_t *pTmpInstrHC;
3597
3598 pTmpInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3599 Assert(pTmpInstrHC);
3600 if (pTmpInstrHC == 0)
3601 break;
3602
3603 disret = patmR3DisInstr(pVM, pPatch, pTargetGC, pTmpInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
3604 if (disret == false || cpu.pCurInstr->uOpcode != OP_JMP)
3605 break;
3606
3607 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3608 if (pTargetGC == 0)
3609 {
3610 break;
3611 }
3612
3613 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3614 break;
3615 }
3616 if (pPatchFunction == 0)
3617 {
3618 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3619 rc = VERR_PATCHING_REFUSED;
3620 goto failure;
3621 }
3622 }
3623
3624 // make a copy of the guest code bytes that will be overwritten
3625 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3626
3627 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3628 AssertRC(rc);
3629
3630 /* Now replace the original call in the guest code */
3631 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3632 AssertRC(rc);
3633 if (RT_FAILURE(rc))
3634 goto failure;
3635
3636 /* Lowest and highest address for write monitoring. */
3637 pPatch->pInstrGCLowest = pInstrGC;
3638 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3639 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "Call");
3640
3641 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3642
3643 pPatch->uState = PATCH_ENABLED;
3644 return VINF_SUCCESS;
3645
3646failure:
3647 /* Turn this patch into a dummy. */
3648 pPatch->uState = PATCH_REFUSED;
3649
3650 return rc;
3651}
3652
3653/**
3654 * Replace the address in an MMIO instruction with the cached version.
3655 *
3656 * @returns VBox status code.
3657 * @param pVM The VM to operate on.
3658 * @param pInstrGC Guest context point to privileged instruction
3659 * @param pCpu Disassembly CPU structure ptr
3660 * @param pCacheRec Cache record ptr
3661 *
3662 * @note returns failure if patching is not allowed or possible
3663 *
3664 */
3665static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3666{
3667 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3668 uint8_t *pPB;
3669 int rc = VERR_PATCHING_REFUSED;
3670
3671 Assert(pVM->patm.s.mmio.pCachedData);
3672 if (!pVM->patm.s.mmio.pCachedData)
3673 goto failure;
3674
3675 if (pCpu->param2.fUse != DISUSE_DISPLACEMENT32)
3676 goto failure;
3677
3678 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3679 if (pPB == 0)
3680 goto failure;
3681
3682 /* Add relocation record for cached data access. */
3683 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3684 {
3685 Log(("Relocation failed for cached mmio address!!\n"));
3686 return VERR_PATCHING_REFUSED;
3687 }
3688 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "MMIO patch old instruction:", "");
3689
3690 /* Save original instruction. */
3691 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3692 AssertRC(rc);
3693
3694 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3695
3696 /* Replace address with that of the cached item. */
3697 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3698 AssertRC(rc);
3699 if (RT_FAILURE(rc))
3700 {
3701 goto failure;
3702 }
3703
3704 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3705 pVM->patm.s.mmio.pCachedData = 0;
3706 pVM->patm.s.mmio.GCPhys = 0;
3707 pPatch->uState = PATCH_ENABLED;
3708 return VINF_SUCCESS;
3709
3710failure:
3711 /* Turn this patch into a dummy. */
3712 pPatch->uState = PATCH_REFUSED;
3713
3714 return rc;
3715}
3716
3717
3718/**
3719 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3720 *
3721 * @returns VBox status code.
3722 * @param pVM The VM to operate on.
3723 * @param pInstrGC Guest context point to privileged instruction
3724 * @param pPatch Patch record
3725 *
3726 * @note returns failure if patching is not allowed or possible
3727 *
3728 */
3729static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3730{
3731 DISCPUSTATE cpu;
3732 uint32_t cbInstr;
3733 bool disret;
3734 uint8_t *pInstrHC;
3735
3736 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3737
3738 /* Convert GC to HC address. */
3739 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3740 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3741
3742 /* Disassemble mmio instruction. */
3743 disret = patmR3DisInstrNoStrOpMode(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE,
3744 &cpu, &cbInstr);
3745 if (disret == false)
3746 {
3747 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3748 return VERR_PATCHING_REFUSED;
3749 }
3750
3751 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
3752 if (cbInstr > MAX_INSTR_SIZE)
3753 return VERR_PATCHING_REFUSED;
3754 if (cpu.param2.fUse != DISUSE_DISPLACEMENT32)
3755 return VERR_PATCHING_REFUSED;
3756
3757 /* Add relocation record for cached data access. */
3758 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3759 {
3760 Log(("Relocation failed for cached mmio address!!\n"));
3761 return VERR_PATCHING_REFUSED;
3762 }
3763 /* Replace address with that of the cached item. */
3764 *(RTRCPTR *)&pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3765
3766 /* Lowest and highest address for write monitoring. */
3767 pPatch->pInstrGCLowest = pInstrGC;
3768 pPatch->pInstrGCHighest = pInstrGC + cpu.cbInstr;
3769
3770 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3771 pVM->patm.s.mmio.pCachedData = 0;
3772 pVM->patm.s.mmio.GCPhys = 0;
3773 return VINF_SUCCESS;
3774}
3775
3776/**
3777 * Activates an int3 patch
3778 *
3779 * @returns VBox status code.
3780 * @param pVM The VM to operate on.
3781 * @param pPatch Patch record
3782 */
3783static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3784{
3785 uint8_t bASMInt3 = 0xCC;
3786 int rc;
3787
3788 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3789 Assert(pPatch->uState != PATCH_ENABLED);
3790
3791 /* Replace first opcode byte with 'int 3'. */
3792 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
3793 AssertRC(rc);
3794
3795 pPatch->cbPatchJump = sizeof(bASMInt3);
3796
3797 return rc;
3798}
3799
3800/**
3801 * Deactivates an int3 patch
3802 *
3803 * @returns VBox status code.
3804 * @param pVM The VM to operate on.
3805 * @param pPatch Patch record
3806 */
3807static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3808{
3809 uint8_t ASMInt3 = 0xCC;
3810 int rc;
3811
3812 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3813 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3814
3815 /* Restore first opcode byte. */
3816 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3817 AssertRC(rc);
3818 return rc;
3819}
3820
3821/**
3822 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically
3823 * in the raw-mode context.
3824 *
3825 * @returns VBox status code.
3826 * @param pVM The VM to operate on.
3827 * @param pInstrGC Guest context point to privileged instruction
3828 * @param pInstrHC Host context point to privileged instruction
3829 * @param pCpu Disassembly CPU structure ptr
3830 * @param pPatch Patch record
3831 *
3832 * @note returns failure if patching is not allowed or possible
3833 *
3834 */
3835VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu,
3836 PPATCHINFO pPatch)
3837{
3838 uint8_t bASMInt3 = 0xCC;
3839 int rc;
3840
3841 /* Note: Do not use patch memory here! It might called during patch installation too. */
3842 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "PATMR3PatchInstrInt3:", "");
3843
3844 /* Save the original instruction. */
3845 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3846 AssertRC(rc);
3847 pPatch->cbPatchJump = sizeof(bASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3848
3849 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3850
3851 /* Replace first opcode byte with 'int 3'. */
3852 rc = patmActivateInt3Patch(pVM, pPatch);
3853 if (RT_FAILURE(rc))
3854 goto failure;
3855
3856 /* Lowest and highest address for write monitoring. */
3857 pPatch->pInstrGCLowest = pInstrGC;
3858 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3859
3860 pPatch->uState = PATCH_ENABLED;
3861 return VINF_SUCCESS;
3862
3863failure:
3864 /* Turn this patch into a dummy. */
3865 return VERR_PATCHING_REFUSED;
3866}
3867
3868#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3869/**
3870 * Patch a jump instruction at specified location
3871 *
3872 * @returns VBox status code.
3873 * @param pVM The VM to operate on.
3874 * @param pInstrGC Guest context point to privileged instruction
3875 * @param pInstrHC Host context point to privileged instruction
3876 * @param pCpu Disassembly CPU structure ptr
3877 * @param pPatchRec Patch record
3878 *
3879 * @note returns failure if patching is not allowed or possible
3880 *
3881 */
3882int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3883{
3884 PPATCHINFO pPatch = &pPatchRec->patch;
3885 int rc = VERR_PATCHING_REFUSED;
3886
3887 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3888 pPatch->uCurPatchOffset = 0;
3889 pPatch->cbPatchBlockSize = 0;
3890 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3891
3892 /*
3893 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3894 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3895 */
3896 switch (pCpu->pCurInstr->uOpcode)
3897 {
3898 case OP_JO:
3899 case OP_JNO:
3900 case OP_JC:
3901 case OP_JNC:
3902 case OP_JE:
3903 case OP_JNE:
3904 case OP_JBE:
3905 case OP_JNBE:
3906 case OP_JS:
3907 case OP_JNS:
3908 case OP_JP:
3909 case OP_JNP:
3910 case OP_JL:
3911 case OP_JNL:
3912 case OP_JLE:
3913 case OP_JNLE:
3914 case OP_JMP:
3915 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3916 Assert(pCpu->param1.fUse & DISUSE_IMMEDIATE32_REL);
3917 if (!(pCpu->param1.fUse & DISUSE_IMMEDIATE32_REL))
3918 goto failure;
3919
3920 Assert(pCpu->cbInstr == SIZEOF_NEARJUMP32 || pCpu->cbInstr == SIZEOF_NEAR_COND_JUMP32);
3921 if (pCpu->cbInstr != SIZEOF_NEARJUMP32 && pCpu->cbInstr != SIZEOF_NEAR_COND_JUMP32)
3922 goto failure;
3923
3924 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->cbInstr))
3925 {
3926 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
3927 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
3928 rc = VERR_PATCHING_REFUSED;
3929 goto failure;
3930 }
3931
3932 break;
3933
3934 default:
3935 goto failure;
3936 }
3937
3938 // make a copy of the guest code bytes that will be overwritten
3939 Assert(pCpu->cbInstr <= sizeof(pPatch->aPrivInstr));
3940 Assert(pCpu->cbInstr >= SIZEOF_NEARJUMP32);
3941 pPatch->cbPatchJump = pCpu->cbInstr;
3942
3943 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3944 AssertRC(rc);
3945
3946 /* Now insert a jump in the guest code. */
3947 /*
3948 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
3949 * references the target instruction in the conflict patch.
3950 */
3951 RTRCPTR pJmpDest = PATMR3GuestGCPtrToPatchGCPtr(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->param1.parval);
3952
3953 AssertMsg(pJmpDest, ("PATMR3GuestGCPtrToPatchGCPtr failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->param1.parval));
3954 pPatch->pPatchJumpDestGC = pJmpDest;
3955
3956 PATMP2GLOOKUPREC cacheRec;
3957 RT_ZERO(cacheRec);
3958 cacheRec.pPatch = pPatch;
3959
3960 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
3961 /* Free leftover lock if any. */
3962 if (cacheRec.Lock.pvMap)
3963 {
3964 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
3965 cacheRec.Lock.pvMap = NULL;
3966 }
3967 AssertRC(rc);
3968 if (RT_FAILURE(rc))
3969 goto failure;
3970
3971 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
3972
3973 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
3974 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3975
3976 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
3977
3978 /* Lowest and highest address for write monitoring. */
3979 pPatch->pInstrGCLowest = pInstrGC;
3980 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
3981
3982 pPatch->uState = PATCH_ENABLED;
3983 return VINF_SUCCESS;
3984
3985failure:
3986 /* Turn this cli patch into a dummy. */
3987 pPatch->uState = PATCH_REFUSED;
3988
3989 return rc;
3990}
3991#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
3992
3993
3994/**
3995 * Gives hint to PATM about supervisor guest instructions
3996 *
3997 * @returns VBox status code.
3998 * @param pVM The VM to operate on.
3999 * @param pInstr Guest context point to privileged instruction
4000 * @param flags Patch flags
4001 */
4002VMMR3DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4003{
4004 Assert(pInstrGC);
4005 Assert(flags == PATMFL_CODE32);
4006
4007 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4008 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4009}
4010
4011/**
4012 * Patch privileged instruction at specified location
4013 *
4014 * @returns VBox status code.
4015 * @param pVM The VM to operate on.
4016 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4017 * @param flags Patch flags
4018 *
4019 * @note returns failure if patching is not allowed or possible
4020 */
4021VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4022{
4023 DISCPUSTATE cpu;
4024 R3PTRTYPE(uint8_t *) pInstrHC;
4025 uint32_t cbInstr;
4026 PPATMPATCHREC pPatchRec;
4027 PCPUMCTX pCtx = 0;
4028 bool disret;
4029 int rc;
4030 PVMCPU pVCpu = VMMGetCpu0(pVM);
4031
4032 if ( !pVM
4033 || pInstrGC == 0
4034 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4035 {
4036 AssertFailed();
4037 return VERR_INVALID_PARAMETER;
4038 }
4039
4040 if (PATMIsEnabled(pVM) == false)
4041 return VERR_PATCHING_REFUSED;
4042
4043 /* Test for patch conflict only with patches that actually change guest code. */
4044 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4045 {
4046 PPATCHINFO pConflictPatch = PATMFindActivePatchByEntrypoint(pVM, pInstrGC);
4047 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4048 if (pConflictPatch != 0)
4049 return VERR_PATCHING_REFUSED;
4050 }
4051
4052 if (!(flags & PATMFL_CODE32))
4053 {
4054 /** @todo Only 32 bits code right now */
4055 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4056 return VERR_NOT_IMPLEMENTED;
4057 }
4058
4059 /* We ran out of patch memory; don't bother anymore. */
4060 if (pVM->patm.s.fOutOfMemory == true)
4061 return VERR_PATCHING_REFUSED;
4062
4063 /* Make sure the code selector is wide open; otherwise refuse. */
4064 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4065 if (CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0)
4066 {
4067 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4068 if (pInstrGCFlat != pInstrGC)
4069 {
4070 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4071 return VERR_PATCHING_REFUSED;
4072 }
4073 }
4074
4075 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4076 if (!(flags & PATMFL_GUEST_SPECIFIC))
4077 {
4078 /* New code. Make sure CSAM has a go at it first. */
4079 CSAMR3CheckCode(pVM, pInstrGC);
4080 }
4081
4082 /* Note: obsolete */
4083 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4084 && (flags & PATMFL_MMIO_ACCESS))
4085 {
4086 RTRCUINTPTR offset;
4087 void *pvPatchCoreOffset;
4088
4089 /* Find the patch record. */
4090 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4091 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4092 if (pvPatchCoreOffset == NULL)
4093 {
4094 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4095 return VERR_PATCH_NOT_FOUND; //fatal error
4096 }
4097 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4098
4099 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4100 }
4101
4102 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4103
4104 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4105 if (pPatchRec)
4106 {
4107 Assert(!(flags & PATMFL_TRAMPOLINE));
4108
4109 /* Hints about existing patches are ignored. */
4110 if (flags & PATMFL_INSTR_HINT)
4111 return VERR_PATCHING_REFUSED;
4112
4113 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4114 {
4115 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4116 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4117 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4118 }
4119
4120 if (pPatchRec->patch.uState == PATCH_DISABLED)
4121 {
4122 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4123 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4124 {
4125 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4126 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4127 }
4128 else
4129 Log(("Enabling patch %RRv again\n", pInstrGC));
4130
4131 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4132 rc = PATMR3EnablePatch(pVM, pInstrGC);
4133 if (RT_SUCCESS(rc))
4134 return VWRN_PATCH_ENABLED;
4135
4136 return rc;
4137 }
4138 if ( pPatchRec->patch.uState == PATCH_ENABLED
4139 || pPatchRec->patch.uState == PATCH_DIRTY)
4140 {
4141 /*
4142 * The patch might have been overwritten.
4143 */
4144 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4145 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4146 {
4147 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4148 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4149 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4150 {
4151 if (flags & PATMFL_IDTHANDLER)
4152 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4153
4154 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4155 }
4156 }
4157 rc = PATMR3RemovePatch(pVM, pInstrGC);
4158 if (RT_FAILURE(rc))
4159 return VERR_PATCHING_REFUSED;
4160 }
4161 else
4162 {
4163 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4164 /* already tried it once! */
4165 return VERR_PATCHING_REFUSED;
4166 }
4167 }
4168
4169 RTGCPHYS GCPhys;
4170 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4171 if (rc != VINF_SUCCESS)
4172 {
4173 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4174 return rc;
4175 }
4176 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4177 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4178 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4179 {
4180 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4181 return VERR_PATCHING_REFUSED;
4182 }
4183
4184 /* Initialize cache record for guest address translations. */
4185 bool fInserted;
4186 PATMP2GLOOKUPREC cacheRec;
4187 RT_ZERO(cacheRec);
4188
4189 pInstrHC = PATMGCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4190 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4191
4192 /* Allocate patch record. */
4193 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4194 if (RT_FAILURE(rc))
4195 {
4196 Log(("Out of memory!!!!\n"));
4197 return VERR_NO_MEMORY;
4198 }
4199 pPatchRec->Core.Key = pInstrGC;
4200 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4201 /* Insert patch record into the lookup tree. */
4202 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4203 Assert(fInserted);
4204
4205 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4206 pPatchRec->patch.flags = flags;
4207 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
4208 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4209
4210 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4211 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4212
4213 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4214 {
4215 /*
4216 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4217 */
4218 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4219 if (pPatchNear)
4220 {
4221 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4222 {
4223 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4224
4225 pPatchRec->patch.uState = PATCH_UNUSABLE;
4226 /*
4227 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4228 */
4229 return VERR_PATCHING_REFUSED;
4230 }
4231 }
4232 }
4233
4234 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4235 if (pPatchRec->patch.pTempInfo == 0)
4236 {
4237 Log(("Out of memory!!!!\n"));
4238 return VERR_NO_MEMORY;
4239 }
4240
4241 disret = patmR3DisInstrNoStrOpMode(pVM, &pPatchRec->patch, pInstrGC, NULL, PATMREAD_ORGCODE, &cpu, &cbInstr);
4242 if (disret == false)
4243 {
4244 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4245 return VERR_PATCHING_REFUSED;
4246 }
4247
4248 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
4249 if (cbInstr > MAX_INSTR_SIZE)
4250 return VERR_PATCHING_REFUSED;
4251
4252 pPatchRec->patch.cbPrivInstr = cbInstr;
4253 pPatchRec->patch.opcode = cpu.pCurInstr->uOpcode;
4254
4255 /* Restricted hinting for now. */
4256 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->uOpcode == OP_CLI);
4257
4258 /* Initialize cache record patch pointer. */
4259 cacheRec.pPatch = &pPatchRec->patch;
4260
4261 /* Allocate statistics slot */
4262 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4263 {
4264 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4265 }
4266 else
4267 {
4268 Log(("WARNING: Patch index wrap around!!\n"));
4269 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4270 }
4271
4272 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4273 {
4274 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4275 }
4276 else
4277 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4278 {
4279 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4280 }
4281 else
4282 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4283 {
4284 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4285 }
4286 else
4287 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4288 {
4289 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4290 }
4291 else
4292 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4293 {
4294 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4295 }
4296 else
4297 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4298 {
4299 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4300 }
4301 else
4302 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4303 {
4304 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4305 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4306
4307 rc = patmIdtHandler(pVM, pInstrGC, cbInstr, pPatchRec, &cacheRec);
4308#ifdef VBOX_WITH_STATISTICS
4309 if ( rc == VINF_SUCCESS
4310 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4311 {
4312 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4313 }
4314#endif
4315 }
4316 else
4317 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4318 {
4319 switch (cpu.pCurInstr->uOpcode)
4320 {
4321 case OP_SYSENTER:
4322 case OP_PUSH:
4323 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4324 if (rc == VINF_SUCCESS)
4325 {
4326 if (rc == VINF_SUCCESS)
4327 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4328 return rc;
4329 }
4330 break;
4331
4332 default:
4333 rc = VERR_NOT_IMPLEMENTED;
4334 break;
4335 }
4336 }
4337 else
4338 {
4339 switch (cpu.pCurInstr->uOpcode)
4340 {
4341 case OP_SYSENTER:
4342 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4343 if (rc == VINF_SUCCESS)
4344 {
4345 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4346 return VINF_SUCCESS;
4347 }
4348 break;
4349
4350#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4351 case OP_JO:
4352 case OP_JNO:
4353 case OP_JC:
4354 case OP_JNC:
4355 case OP_JE:
4356 case OP_JNE:
4357 case OP_JBE:
4358 case OP_JNBE:
4359 case OP_JS:
4360 case OP_JNS:
4361 case OP_JP:
4362 case OP_JNP:
4363 case OP_JL:
4364 case OP_JNL:
4365 case OP_JLE:
4366 case OP_JNLE:
4367 case OP_JECXZ:
4368 case OP_LOOP:
4369 case OP_LOOPNE:
4370 case OP_LOOPE:
4371 case OP_JMP:
4372 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4373 {
4374 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4375 break;
4376 }
4377 return VERR_NOT_IMPLEMENTED;
4378#endif
4379
4380 case OP_PUSHF:
4381 case OP_CLI:
4382 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4383 rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec);
4384 break;
4385
4386 case OP_STR:
4387 case OP_SGDT:
4388 case OP_SLDT:
4389 case OP_SIDT:
4390 case OP_CPUID:
4391 case OP_LSL:
4392 case OP_LAR:
4393 case OP_SMSW:
4394 case OP_VERW:
4395 case OP_VERR:
4396 case OP_IRET:
4397 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4398 break;
4399
4400 default:
4401 return VERR_NOT_IMPLEMENTED;
4402 }
4403 }
4404
4405 if (rc != VINF_SUCCESS)
4406 {
4407 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4408 {
4409 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4410 pPatchRec->patch.nrPatch2GuestRecs = 0;
4411 }
4412 pVM->patm.s.uCurrentPatchIdx--;
4413 }
4414 else
4415 {
4416 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4417 AssertRCReturn(rc, rc);
4418
4419 /* Keep track upper and lower boundaries of patched instructions */
4420 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4421 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4422 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4423 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4424
4425 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4426 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4427
4428 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4429 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4430
4431 rc = VINF_SUCCESS;
4432
4433 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4434 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4435 {
4436 rc = PATMR3DisablePatch(pVM, pInstrGC);
4437 AssertRCReturn(rc, rc);
4438 }
4439
4440#ifdef VBOX_WITH_STATISTICS
4441 /* Register statistics counter */
4442 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4443 {
4444 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4445 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4446#ifndef DEBUG_sandervl
4447 /* Full breakdown for the GUI. */
4448 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4449 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4450 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4451 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4452 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4453 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4454 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4455 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4456 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4457 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4458 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4459 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4460 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4461 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4462 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4463 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4464#endif
4465 }
4466#endif
4467 }
4468 /* Free leftover lock if any. */
4469 if (cacheRec.Lock.pvMap)
4470 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4471 return rc;
4472}
4473
4474/**
4475 * Query instruction size
4476 *
4477 * @returns VBox status code.
4478 * @param pVM The VM to operate on.
4479 * @param pPatch Patch record
4480 * @param pInstrGC Instruction address
4481 */
4482static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4483{
4484 uint8_t *pInstrHC;
4485 PGMPAGEMAPLOCK Lock;
4486
4487 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4488 if (rc == VINF_SUCCESS)
4489 {
4490 DISCPUSTATE cpu;
4491 bool disret;
4492 uint32_t cbInstr;
4493
4494 disret = patmR3DisInstr(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE | PATMREAD_NOCHECK, &cpu, &cbInstr);
4495 PGMPhysReleasePageMappingLock(pVM, &Lock);
4496 if (disret)
4497 return cbInstr;
4498 }
4499 return 0;
4500}
4501
4502/**
4503 * Add patch to page record
4504 *
4505 * @returns VBox status code.
4506 * @param pVM The VM to operate on.
4507 * @param pPage Page address
4508 * @param pPatch Patch record
4509 */
4510int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4511{
4512 PPATMPATCHPAGE pPatchPage;
4513 int rc;
4514
4515 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4516
4517 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4518 if (pPatchPage)
4519 {
4520 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4521 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4522 {
4523 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4524 PPATCHINFO *paPatchOld = pPatchPage->aPatch;
4525
4526 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4527 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4528 if (RT_FAILURE(rc))
4529 {
4530 Log(("Out of memory!!!!\n"));
4531 return VERR_NO_MEMORY;
4532 }
4533 memcpy(pPatchPage->aPatch, paPatchOld, cMaxPatchesOld*sizeof(PPATCHINFO));
4534 MMHyperFree(pVM, paPatchOld);
4535 }
4536 pPatchPage->aPatch[pPatchPage->cCount] = pPatch;
4537 pPatchPage->cCount++;
4538 }
4539 else
4540 {
4541 bool fInserted;
4542
4543 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4544 if (RT_FAILURE(rc))
4545 {
4546 Log(("Out of memory!!!!\n"));
4547 return VERR_NO_MEMORY;
4548 }
4549 pPatchPage->Core.Key = pPage;
4550 pPatchPage->cCount = 1;
4551 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4552
4553 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4554 if (RT_FAILURE(rc))
4555 {
4556 Log(("Out of memory!!!!\n"));
4557 MMHyperFree(pVM, pPatchPage);
4558 return VERR_NO_MEMORY;
4559 }
4560 pPatchPage->aPatch[0] = pPatch;
4561
4562 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4563 Assert(fInserted);
4564 pVM->patm.s.cPageRecords++;
4565
4566 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4567 }
4568 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4569
4570 /* Get the closest guest instruction (from below) */
4571 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4572 Assert(pGuestToPatchRec);
4573 if (pGuestToPatchRec)
4574 {
4575 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4576 if ( pPatchPage->pLowestAddrGC == 0
4577 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4578 {
4579 RTRCUINTPTR offset;
4580
4581 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4582
4583 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4584 /* If we're too close to the page boundary, then make sure an instruction from the previous page doesn't cross the boundary itself. */
4585 if (offset && offset < MAX_INSTR_SIZE)
4586 {
4587 /* Get the closest guest instruction (from above) */
4588 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4589
4590 if (pGuestToPatchRec)
4591 {
4592 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4593 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4594 {
4595 pPatchPage->pLowestAddrGC = pPage;
4596 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4597 }
4598 }
4599 }
4600 }
4601 }
4602
4603 /* Get the closest guest instruction (from above) */
4604 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4605 Assert(pGuestToPatchRec);
4606 if (pGuestToPatchRec)
4607 {
4608 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4609 if ( pPatchPage->pHighestAddrGC == 0
4610 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4611 {
4612 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4613 /* Increase by instruction size. */
4614 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4615//// Assert(size);
4616 pPatchPage->pHighestAddrGC += size;
4617 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4618 }
4619 }
4620
4621 return VINF_SUCCESS;
4622}
4623
4624/**
4625 * Remove patch from page record
4626 *
4627 * @returns VBox status code.
4628 * @param pVM The VM to operate on.
4629 * @param pPage Page address
4630 * @param pPatch Patch record
4631 */
4632int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4633{
4634 PPATMPATCHPAGE pPatchPage;
4635 int rc;
4636
4637 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4638 Assert(pPatchPage);
4639
4640 if (!pPatchPage)
4641 return VERR_INVALID_PARAMETER;
4642
4643 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4644
4645 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4646 if (pPatchPage->cCount > 1)
4647 {
4648 uint32_t i;
4649
4650 /* Used by multiple patches */
4651 for (i=0;i<pPatchPage->cCount;i++)
4652 {
4653 if (pPatchPage->aPatch[i] == pPatch)
4654 {
4655 pPatchPage->aPatch[i] = 0;
4656 break;
4657 }
4658 }
4659 /* close the gap between the remaining pointers. */
4660 if (i < pPatchPage->cCount - 1)
4661 {
4662 memcpy(&pPatchPage->aPatch[i], &pPatchPage->aPatch[i+1], sizeof(PPATCHINFO)*(pPatchPage->cCount - (i+1)));
4663 }
4664 AssertMsg(i < pPatchPage->cCount, ("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4665
4666 pPatchPage->cCount--;
4667 }
4668 else
4669 {
4670 PPATMPATCHPAGE pPatchNode;
4671
4672 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4673
4674 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4675 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4676 Assert(pPatchNode && pPatchNode == pPatchPage);
4677
4678 Assert(pPatchPage->aPatch);
4679 rc = MMHyperFree(pVM, pPatchPage->aPatch);
4680 AssertRC(rc);
4681 rc = MMHyperFree(pVM, pPatchPage);
4682 AssertRC(rc);
4683 pVM->patm.s.cPageRecords--;
4684 }
4685 return VINF_SUCCESS;
4686}
4687
4688/**
4689 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4690 *
4691 * @returns VBox status code.
4692 * @param pVM The VM to operate on.
4693 * @param pPatch Patch record
4694 */
4695int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4696{
4697 int rc;
4698 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4699
4700 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4701 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4702 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4703
4704 /** @todo optimize better (large gaps between current and next used page) */
4705 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4706 {
4707 /* Get the closest guest instruction (from above) */
4708 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4709 if ( pGuestToPatchRec
4710 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4711 )
4712 {
4713 /* Code in page really patched -> add record */
4714 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4715 AssertRC(rc);
4716 }
4717 }
4718 pPatch->flags |= PATMFL_CODE_MONITORED;
4719 return VINF_SUCCESS;
4720}
4721
4722/**
4723 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4724 *
4725 * @returns VBox status code.
4726 * @param pVM The VM to operate on.
4727 * @param pPatch Patch record
4728 */
4729int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4730{
4731 int rc;
4732 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4733
4734 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4735 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4736 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4737
4738 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4739 {
4740 /* Get the closest guest instruction (from above) */
4741 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4742 if ( pGuestToPatchRec
4743 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4744 )
4745 {
4746 /* Code in page really patched -> remove record */
4747 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4748 AssertRC(rc);
4749 }
4750 }
4751 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4752 return VINF_SUCCESS;
4753}
4754
4755/**
4756 * Notifies PATM about a (potential) write to code that has been patched.
4757 *
4758 * @returns VBox status code.
4759 * @param pVM The VM to operate on.
4760 * @param GCPtr GC pointer to write address
4761 * @param cbWrite Nr of bytes to write
4762 *
4763 */
4764VMMR3DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4765{
4766 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4767
4768 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4769
4770 Assert(VM_IS_EMT(pVM));
4771
4772 /* Quick boundary check */
4773 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4774 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4775 )
4776 return VINF_SUCCESS;
4777
4778 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4779
4780 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4781 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4782
4783 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4784 {
4785loop_start:
4786 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4787 if (pPatchPage)
4788 {
4789 uint32_t i;
4790 bool fValidPatchWrite = false;
4791
4792 /* Quick check to see if the write is in the patched part of the page */
4793 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4794 || pPatchPage->pHighestAddrGC < GCPtr)
4795 {
4796 break;
4797 }
4798
4799 for (i=0;i<pPatchPage->cCount;i++)
4800 {
4801 if (pPatchPage->aPatch[i])
4802 {
4803 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4804 RTRCPTR pPatchInstrGC;
4805 //unused: bool fForceBreak = false;
4806
4807 Assert(pPatchPage->aPatch[i]->flags & PATMFL_CODE_MONITORED);
4808 /** @todo inefficient and includes redundant checks for multiple pages. */
4809 for (uint32_t j=0; j<cbWrite; j++)
4810 {
4811 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4812
4813 if ( pPatch->cbPatchJump
4814 && pGuestPtrGC >= pPatch->pPrivInstrGC
4815 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4816 {
4817 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4818 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4819 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4820 if (rc == VINF_SUCCESS)
4821 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4822 goto loop_start;
4823
4824 continue;
4825 }
4826
4827 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4828 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4829 if (!pPatchInstrGC)
4830 {
4831 RTRCPTR pClosestInstrGC;
4832 uint32_t size;
4833
4834 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4835 if (pPatchInstrGC)
4836 {
4837 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4838 Assert(pClosestInstrGC <= pGuestPtrGC);
4839 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4840 /* Check if this is not a write into a gap between two patches */
4841 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4842 pPatchInstrGC = 0;
4843 }
4844 }
4845 if (pPatchInstrGC)
4846 {
4847 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4848
4849 fValidPatchWrite = true;
4850
4851 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4852 Assert(pPatchToGuestRec);
4853 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4854 {
4855 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4856
4857 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4858 {
4859 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4860
4861 PATMR3MarkDirtyPatch(pVM, pPatch);
4862
4863 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4864 goto loop_start;
4865 }
4866 else
4867 {
4868 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4869 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4870
4871 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4872 pPatchToGuestRec->fDirty = true;
4873
4874 *pInstrHC = 0xCC;
4875
4876 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4877 }
4878 }
4879 /* else already marked dirty */
4880 }
4881 }
4882 }
4883 } /* for each patch */
4884
4885 if (fValidPatchWrite == false)
4886 {
4887 /* Write to a part of the page that either:
4888 * - doesn't contain any code (shared code/data); rather unlikely
4889 * - old code page that's no longer in active use.
4890 */
4891invalid_write_loop_start:
4892 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4893
4894 if (pPatchPage)
4895 {
4896 for (i=0;i<pPatchPage->cCount;i++)
4897 {
4898 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4899
4900 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
4901 {
4902 /* Note: possibly dangerous assumption that all future writes will be harmless. */
4903 if (pPatch->flags & PATMFL_IDTHANDLER)
4904 {
4905 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4906
4907 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
4908 int rc = patmRemovePatchPages(pVM, pPatch);
4909 AssertRC(rc);
4910 }
4911 else
4912 {
4913 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4914 PATMR3MarkDirtyPatch(pVM, pPatch);
4915 }
4916 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4917 goto invalid_write_loop_start;
4918 }
4919 } /* for */
4920 }
4921 }
4922 }
4923 }
4924 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
4925 return VINF_SUCCESS;
4926
4927}
4928
4929/**
4930 * Disable all patches in a flushed page
4931 *
4932 * @returns VBox status code
4933 * @param pVM The VM to operate on.
4934 * @param addr GC address of the page to flush
4935 */
4936/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
4937 */
4938VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
4939{
4940 addr &= PAGE_BASE_GC_MASK;
4941
4942 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
4943 if (pPatchPage)
4944 {
4945 int i;
4946
4947 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
4948 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
4949 {
4950 if (pPatchPage->aPatch[i])
4951 {
4952 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4953
4954 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
4955 PATMR3MarkDirtyPatch(pVM, pPatch);
4956 }
4957 }
4958 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
4959 }
4960 return VINF_SUCCESS;
4961}
4962
4963/**
4964 * Checks if the instructions at the specified address has been patched already.
4965 *
4966 * @returns boolean, patched or not
4967 * @param pVM The VM to operate on.
4968 * @param pInstrGC Guest context pointer to instruction
4969 */
4970VMMR3DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
4971{
4972 PPATMPATCHREC pPatchRec;
4973 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4974 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
4975 return true;
4976 return false;
4977}
4978
4979/**
4980 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
4981 *
4982 * @returns VBox status code.
4983 * @param pVM The VM to operate on.
4984 * @param pInstrGC GC address of instr
4985 * @param pByte opcode byte pointer (OUT)
4986 *
4987 */
4988VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
4989{
4990 PPATMPATCHREC pPatchRec;
4991
4992 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
4993
4994 /* Shortcut. */
4995 if ( !PATMIsEnabled(pVM)
4996 || pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
4997 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
4998 {
4999 return VERR_PATCH_NOT_FOUND;
5000 }
5001
5002 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5003 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5004 if ( pPatchRec
5005 && pPatchRec->patch.uState == PATCH_ENABLED
5006 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5007 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5008 {
5009 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5010 *pByte = pPatchRec->patch.aPrivInstr[offset];
5011
5012 if (pPatchRec->patch.cbPatchJump == 1)
5013 {
5014 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5015 }
5016 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5017 return VINF_SUCCESS;
5018 }
5019 return VERR_PATCH_NOT_FOUND;
5020}
5021
5022/**
5023 * Disable patch for privileged instruction at specified location
5024 *
5025 * @returns VBox status code.
5026 * @param pVM The VM to operate on.
5027 * @param pInstr Guest context point to privileged instruction
5028 *
5029 * @note returns failure if patching is not allowed or possible
5030 *
5031 */
5032VMMR3DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5033{
5034 PPATMPATCHREC pPatchRec;
5035 PPATCHINFO pPatch;
5036
5037 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5038 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5039 if (pPatchRec)
5040 {
5041 int rc = VINF_SUCCESS;
5042
5043 pPatch = &pPatchRec->patch;
5044
5045 /* Already disabled? */
5046 if (pPatch->uState == PATCH_DISABLED)
5047 return VINF_SUCCESS;
5048
5049 /* Clear the IDT entries for the patch we're disabling. */
5050 /* Note: very important as we clear IF in the patch itself */
5051 /** @todo this needs to be changed */
5052 if (pPatch->flags & PATMFL_IDTHANDLER)
5053 {
5054 uint32_t iGate;
5055
5056 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5057 if (iGate != (uint32_t)~0)
5058 {
5059 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5060 if (++cIDTHandlersDisabled < 256)
5061 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5062 }
5063 }
5064
5065 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5066 if ( pPatch->pPatchBlockOffset
5067 && pPatch->uState == PATCH_ENABLED)
5068 {
5069 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5070 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5071 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5072 }
5073
5074 /* IDT or function patches haven't changed any guest code. */
5075 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5076 {
5077 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5078 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5079
5080 if (pPatch->uState != PATCH_REFUSED)
5081 {
5082 uint8_t temp[16];
5083
5084 Assert(pPatch->cbPatchJump < sizeof(temp));
5085
5086 /* Let's first check if the guest code is still the same. */
5087 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5088 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5089 if (rc == VINF_SUCCESS)
5090 {
5091 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5092
5093 if ( temp[0] != 0xE9 /* jmp opcode */
5094 || *(RTRCINTPTR *)(&temp[1]) != displ
5095 )
5096 {
5097 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5098 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5099 /* Remove it completely */
5100 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5101 rc = PATMR3RemovePatch(pVM, pInstrGC);
5102 AssertRC(rc);
5103 return VWRN_PATCH_REMOVED;
5104 }
5105 patmRemoveJumpToPatch(pVM, pPatch);
5106 }
5107 else
5108 {
5109 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5110 pPatch->uState = PATCH_DISABLE_PENDING;
5111 }
5112 }
5113 else
5114 {
5115 AssertMsgFailed(("Patch was refused!\n"));
5116 return VERR_PATCH_ALREADY_DISABLED;
5117 }
5118 }
5119 else
5120 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5121 {
5122 uint8_t temp[16];
5123
5124 Assert(pPatch->cbPatchJump < sizeof(temp));
5125
5126 /* Let's first check if the guest code is still the same. */
5127 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5128 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5129 if (rc == VINF_SUCCESS)
5130 {
5131 if (temp[0] != 0xCC)
5132 {
5133 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5134 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5135 /* Remove it completely */
5136 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5137 rc = PATMR3RemovePatch(pVM, pInstrGC);
5138 AssertRC(rc);
5139 return VWRN_PATCH_REMOVED;
5140 }
5141 patmDeactivateInt3Patch(pVM, pPatch);
5142 }
5143 }
5144
5145 if (rc == VINF_SUCCESS)
5146 {
5147 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5148 if (pPatch->uState == PATCH_DISABLE_PENDING)
5149 {
5150 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5151 pPatch->uState = PATCH_UNUSABLE;
5152 }
5153 else
5154 if (pPatch->uState != PATCH_DIRTY)
5155 {
5156 pPatch->uOldState = pPatch->uState;
5157 pPatch->uState = PATCH_DISABLED;
5158 }
5159 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5160 }
5161
5162 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5163 return VINF_SUCCESS;
5164 }
5165 Log(("Patch not found!\n"));
5166 return VERR_PATCH_NOT_FOUND;
5167}
5168
5169/**
5170 * Permanently disable patch for privileged instruction at specified location
5171 *
5172 * @returns VBox status code.
5173 * @param pVM The VM to operate on.
5174 * @param pInstr Guest context instruction pointer
5175 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5176 * @param pConflictPatch Conflicting patch
5177 *
5178 */
5179static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5180{
5181 NOREF(pConflictAddr);
5182#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5183 PATCHINFO patch;
5184 DISCPUSTATE cpu;
5185 R3PTRTYPE(uint8_t *) pInstrHC;
5186 uint32_t cbInstr;
5187 bool disret;
5188 int rc;
5189
5190 RT_ZERO(patch);
5191 pInstrHC = PATMGCVirtToHCVirt(pVM, &patch, pInstrGC);
5192 disret = patmR3DisInstr(pVM, &patch, pInstrGC, pInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
5193 /*
5194 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5195 * with one that jumps right into the conflict patch.
5196 * Otherwise we must disable the conflicting patch to avoid serious problems.
5197 */
5198 if ( disret == true
5199 && (pConflictPatch->flags & PATMFL_CODE32)
5200 && (cpu.pCurInstr->uOpcode == OP_JMP || (cpu.pCurInstr->fOpType & DISOPTYPE_COND_CONTROLFLOW))
5201 && (cpu.param1.fUse & DISUSE_IMMEDIATE32_REL))
5202 {
5203 /* Hint patches must be enabled first. */
5204 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5205 {
5206 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5207 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5208 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5209 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5210 /* Enabling might fail if the patched code has changed in the meantime. */
5211 if (rc != VINF_SUCCESS)
5212 return rc;
5213 }
5214
5215 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5216 if (RT_SUCCESS(rc))
5217 {
5218 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5219 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5220 return VINF_SUCCESS;
5221 }
5222 }
5223#endif
5224
5225 if (pConflictPatch->opcode == OP_CLI)
5226 {
5227 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5228 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5229 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5230 if (rc == VWRN_PATCH_REMOVED)
5231 return VINF_SUCCESS;
5232 if (RT_SUCCESS(rc))
5233 {
5234 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5235 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5236 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5237 if (rc == VERR_PATCH_NOT_FOUND)
5238 return VINF_SUCCESS; /* removed already */
5239
5240 AssertRC(rc);
5241 if (RT_SUCCESS(rc))
5242 {
5243 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5244 return VINF_SUCCESS;
5245 }
5246 }
5247 /* else turned into unusable patch (see below) */
5248 }
5249 else
5250 {
5251 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5252 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5253 if (rc == VWRN_PATCH_REMOVED)
5254 return VINF_SUCCESS;
5255 }
5256
5257 /* No need to monitor the code anymore. */
5258 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5259 {
5260 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5261 AssertRC(rc);
5262 }
5263 pConflictPatch->uState = PATCH_UNUSABLE;
5264 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5265 return VERR_PATCH_DISABLED;
5266}
5267
5268/**
5269 * Enable patch for privileged instruction at specified location
5270 *
5271 * @returns VBox status code.
5272 * @param pVM The VM to operate on.
5273 * @param pInstr Guest context point to privileged instruction
5274 *
5275 * @note returns failure if patching is not allowed or possible
5276 *
5277 */
5278VMMR3DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5279{
5280 PPATMPATCHREC pPatchRec;
5281 PPATCHINFO pPatch;
5282
5283 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5284 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5285 if (pPatchRec)
5286 {
5287 int rc = VINF_SUCCESS;
5288
5289 pPatch = &pPatchRec->patch;
5290
5291 if (pPatch->uState == PATCH_DISABLED)
5292 {
5293 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5294 {
5295 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5296 uint8_t temp[16];
5297
5298 Assert(pPatch->cbPatchJump < sizeof(temp));
5299
5300 /* Let's first check if the guest code is still the same. */
5301 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5302 AssertRC(rc2);
5303 if (rc2 == VINF_SUCCESS)
5304 {
5305 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5306 {
5307 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5308 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5309 /* Remove it completely */
5310 rc = PATMR3RemovePatch(pVM, pInstrGC);
5311 AssertRC(rc);
5312 return VERR_PATCH_NOT_FOUND;
5313 }
5314
5315 PATMP2GLOOKUPREC cacheRec;
5316 RT_ZERO(cacheRec);
5317 cacheRec.pPatch = pPatch;
5318
5319 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5320 /* Free leftover lock if any. */
5321 if (cacheRec.Lock.pvMap)
5322 {
5323 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5324 cacheRec.Lock.pvMap = NULL;
5325 }
5326 AssertRC(rc2);
5327 if (RT_FAILURE(rc2))
5328 return rc2;
5329
5330#ifdef DEBUG
5331 {
5332 DISCPUSTATE cpu;
5333 char szOutput[256];
5334 uint32_t cbInstr;
5335 uint32_t i = 0;
5336 bool disret;
5337 while(i < pPatch->cbPatchJump)
5338 {
5339 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
5340 &cpu, &cbInstr, szOutput, sizeof(szOutput));
5341 Log(("Renewed patch instr: %s", szOutput));
5342 i += cbInstr;
5343 }
5344 }
5345#endif
5346 }
5347 }
5348 else
5349 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5350 {
5351 uint8_t temp[16];
5352
5353 Assert(pPatch->cbPatchJump < sizeof(temp));
5354
5355 /* Let's first check if the guest code is still the same. */
5356 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5357 AssertRC(rc2);
5358
5359 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5360 {
5361 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5362 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5363 rc = PATMR3RemovePatch(pVM, pInstrGC);
5364 AssertRC(rc);
5365 return VERR_PATCH_NOT_FOUND;
5366 }
5367
5368 rc2 = patmActivateInt3Patch(pVM, pPatch);
5369 if (RT_FAILURE(rc2))
5370 return rc2;
5371 }
5372
5373 pPatch->uState = pPatch->uOldState; //restore state
5374
5375 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5376 if (pPatch->pPatchBlockOffset)
5377 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5378
5379 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5380 }
5381 else
5382 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5383
5384 return rc;
5385 }
5386 return VERR_PATCH_NOT_FOUND;
5387}
5388
5389/**
5390 * Remove patch for privileged instruction at specified location
5391 *
5392 * @returns VBox status code.
5393 * @param pVM The VM to operate on.
5394 * @param pPatchRec Patch record
5395 * @param fForceRemove Remove *all* patches
5396 */
5397int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5398{
5399 PPATCHINFO pPatch;
5400
5401 pPatch = &pPatchRec->patch;
5402
5403 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5404 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5405 {
5406 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5407 return VERR_ACCESS_DENIED;
5408 }
5409 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5410
5411 /* Note: NEVER EVER REUSE PATCH MEMORY */
5412 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5413
5414 if (pPatchRec->patch.pPatchBlockOffset)
5415 {
5416 PAVLOU32NODECORE pNode;
5417
5418 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5419 Assert(pNode);
5420 }
5421
5422 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5423 {
5424 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5425 AssertRC(rc);
5426 }
5427
5428#ifdef VBOX_WITH_STATISTICS
5429 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5430 {
5431 STAMR3Deregister(pVM, &pPatchRec->patch);
5432#ifndef DEBUG_sandervl
5433 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5434 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5435 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5436 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5437 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5438 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5439 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5440 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5441 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5442 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5443 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5444 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5445 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5446 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5447#endif
5448 }
5449#endif
5450
5451 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5452 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5453 pPatch->nrPatch2GuestRecs = 0;
5454 Assert(pPatch->Patch2GuestAddrTree == 0);
5455
5456 patmEmptyTree(pVM, &pPatch->FixupTree);
5457 pPatch->nrFixups = 0;
5458 Assert(pPatch->FixupTree == 0);
5459
5460 if (pPatchRec->patch.pTempInfo)
5461 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5462
5463 /* Note: might fail, because it has already been removed (e.g. during reset). */
5464 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5465
5466 /* Free the patch record */
5467 MMHyperFree(pVM, pPatchRec);
5468 return VINF_SUCCESS;
5469}
5470
5471/**
5472 * RTAvlU32DoWithAll() worker.
5473 * Checks whether the current trampoline instruction is the jump to the target patch
5474 * and updates the displacement to jump to the new target.
5475 *
5476 * @returns VBox status code.
5477 * @retval VERR_ALREADY_EXISTS if the jump was found.
5478 * @param pNode The current patch to guest record to check.
5479 * @param pvUser The refresh state.
5480 */
5481static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5482{
5483 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5484 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5485 PVM pVM = pRefreshPatchState->pVM;
5486
5487 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5488
5489 /*
5490 * Check if the patch instruction starts with a jump.
5491 * ASSUMES that there is no other patch to guest record that starts
5492 * with a jump.
5493 */
5494 if (*pPatchInstr == 0xE9)
5495 {
5496 /* Jump found, update the displacement. */
5497 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5498 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5499 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5500
5501 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5502 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5503
5504 *(uint32_t *)&pPatchInstr[1] = displ;
5505 return VERR_ALREADY_EXISTS; /** @todo better return code */
5506 }
5507
5508 return VINF_SUCCESS;
5509}
5510
5511/**
5512 * Attempt to refresh the patch by recompiling its entire code block
5513 *
5514 * @returns VBox status code.
5515 * @param pVM The VM to operate on.
5516 * @param pPatchRec Patch record
5517 */
5518int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5519{
5520 PPATCHINFO pPatch;
5521 int rc;
5522 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5523 PTRAMPREC pTrampolinePatchesHead = NULL;
5524
5525 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5526
5527 pPatch = &pPatchRec->patch;
5528 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5529 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5530 {
5531 if (!pPatch->pTrampolinePatchesHead)
5532 {
5533 /*
5534 * It is sometimes possible that there are trampoline patches to this patch
5535 * but they are not recorded (after a saved state load for example).
5536 * Refuse to refresh those patches.
5537 * Can hurt performance in theory if the patched code is modified by the guest
5538 * and is executed often. However most of the time states are saved after the guest
5539 * code was modified and is not updated anymore afterwards so this shouldn't be a
5540 * big problem.
5541 */
5542 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5543 return VERR_PATCHING_REFUSED;
5544 }
5545 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5546 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5547 }
5548
5549 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5550
5551 rc = PATMR3DisablePatch(pVM, pInstrGC);
5552 AssertRC(rc);
5553
5554 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5555 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5556#ifdef VBOX_WITH_STATISTICS
5557 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5558 {
5559 STAMR3Deregister(pVM, &pPatchRec->patch);
5560#ifndef DEBUG_sandervl
5561 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5562 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5563 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5564 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5565 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5566 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5567 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5568 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5569 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5570 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5571 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5572 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5573 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5574 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5575#endif
5576 }
5577#endif
5578
5579 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5580
5581 /* Attempt to install a new patch. */
5582 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5583 if (RT_SUCCESS(rc))
5584 {
5585 RTRCPTR pPatchTargetGC;
5586 PPATMPATCHREC pNewPatchRec;
5587
5588 /* Determine target address in new patch */
5589 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5590 Assert(pPatchTargetGC);
5591 if (!pPatchTargetGC)
5592 {
5593 rc = VERR_PATCHING_REFUSED;
5594 goto failure;
5595 }
5596
5597 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5598 pPatch->uCurPatchOffset = 0;
5599
5600 /* insert jump to new patch in old patch block */
5601 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5602 if (RT_FAILURE(rc))
5603 goto failure;
5604
5605 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5606 Assert(pNewPatchRec); /* can't fail */
5607
5608 /* Remove old patch (only do that when everything is finished) */
5609 int rc2 = PATMRemovePatch(pVM, pPatchRec, true /* force removal */);
5610 AssertRC(rc2);
5611
5612 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5613 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5614 Assert(fInserted); NOREF(fInserted);
5615
5616 Log(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5617 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5618
5619 /* Used by another patch, so don't remove it! */
5620 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5621
5622 if (pTrampolinePatchesHead)
5623 {
5624 /* Update all trampoline patches to jump to the new patch. */
5625 PTRAMPREC pTrampRec = NULL;
5626 PATMREFRESHPATCH RefreshPatch;
5627
5628 RefreshPatch.pVM = pVM;
5629 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5630
5631 pTrampRec = pTrampolinePatchesHead;
5632
5633 while (pTrampRec)
5634 {
5635 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5636
5637 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5638 /*
5639 * We have to find the right patch2guest record because there might be others
5640 * for statistics.
5641 */
5642 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5643 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5644 Assert(rc == VERR_ALREADY_EXISTS);
5645 rc = VINF_SUCCESS;
5646 pTrampRec = pTrampRec->pNext;
5647 }
5648 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5649 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5650 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5651 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5652 }
5653 }
5654
5655failure:
5656 if (RT_FAILURE(rc))
5657 {
5658 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5659
5660 /* Remove the new inactive patch */
5661 rc = PATMR3RemovePatch(pVM, pInstrGC);
5662 AssertRC(rc);
5663
5664 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5665 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5666 Assert(fInserted); NOREF(fInserted);
5667
5668 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5669 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5670 AssertRC(rc2);
5671
5672 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5673 }
5674 return rc;
5675}
5676
5677/**
5678 * Find patch for privileged instruction at specified location
5679 *
5680 * @returns Patch structure pointer if found; else NULL
5681 * @param pVM The VM to operate on.
5682 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5683 * @param fIncludeHints Include hinted patches or not
5684 *
5685 */
5686PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5687{
5688 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5689 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5690 if (pPatchRec)
5691 {
5692 if ( pPatchRec->patch.uState == PATCH_ENABLED
5693 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5694 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5695 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5696 {
5697 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5698 return &pPatchRec->patch;
5699 }
5700 else
5701 if ( fIncludeHints
5702 && pPatchRec->patch.uState == PATCH_DISABLED
5703 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5704 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5705 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5706 {
5707 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5708 return &pPatchRec->patch;
5709 }
5710 }
5711 return NULL;
5712}
5713
5714/**
5715 * Checks whether the GC address is inside a generated patch jump
5716 *
5717 * @returns true -> yes, false -> no
5718 * @param pVM The VM to operate on.
5719 * @param pAddr Guest context address
5720 * @param pPatchAddr Guest context patch address (if true)
5721 */
5722VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5723{
5724 RTRCPTR addr;
5725 PPATCHINFO pPatch;
5726
5727 if (PATMIsEnabled(pVM) == false)
5728 return false;
5729
5730 if (pPatchAddr == NULL)
5731 pPatchAddr = &addr;
5732
5733 *pPatchAddr = 0;
5734
5735 pPatch = PATMFindActivePatchByEntrypoint(pVM, pAddr);
5736 if (pPatch)
5737 *pPatchAddr = pPatch->pPrivInstrGC;
5738
5739 return *pPatchAddr == 0 ? false : true;
5740}
5741
5742/**
5743 * Remove patch for privileged instruction at specified location
5744 *
5745 * @returns VBox status code.
5746 * @param pVM The VM to operate on.
5747 * @param pInstr Guest context point to privileged instruction
5748 *
5749 * @note returns failure if patching is not allowed or possible
5750 *
5751 */
5752VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5753{
5754 PPATMPATCHREC pPatchRec;
5755
5756 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5757 if (pPatchRec)
5758 {
5759 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5760 if (rc == VWRN_PATCH_REMOVED)
5761 return VINF_SUCCESS;
5762
5763 return PATMRemovePatch(pVM, pPatchRec, false);
5764 }
5765 AssertFailed();
5766 return VERR_PATCH_NOT_FOUND;
5767}
5768
5769/**
5770 * Mark patch as dirty
5771 *
5772 * @returns VBox status code.
5773 * @param pVM The VM to operate on.
5774 * @param pPatch Patch record
5775 *
5776 * @note returns failure if patching is not allowed or possible
5777 *
5778 */
5779VMMR3DECL(int) PATMR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5780{
5781 if (pPatch->pPatchBlockOffset)
5782 {
5783 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5784 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5785 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5786 }
5787
5788 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5789 /* Put back the replaced instruction. */
5790 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5791 if (rc == VWRN_PATCH_REMOVED)
5792 return VINF_SUCCESS;
5793
5794 /* Note: we don't restore patch pages for patches that are not enabled! */
5795 /* Note: be careful when changing this behaviour!! */
5796
5797 /* The patch pages are no longer marked for self-modifying code detection */
5798 if (pPatch->flags & PATMFL_CODE_MONITORED)
5799 {
5800 rc = patmRemovePatchPages(pVM, pPatch);
5801 AssertRCReturn(rc, rc);
5802 }
5803 pPatch->uState = PATCH_DIRTY;
5804
5805 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5806 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5807
5808 return VINF_SUCCESS;
5809}
5810
5811/**
5812 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5813 *
5814 * @returns VBox status code.
5815 * @param pVM The VM to operate on.
5816 * @param pPatch Patch block structure pointer
5817 * @param pPatchGC GC address in patch block
5818 */
5819RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5820{
5821 Assert(pPatch->Patch2GuestAddrTree);
5822 /* Get the closest record from below. */
5823 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5824 if (pPatchToGuestRec)
5825 return pPatchToGuestRec->pOrgInstrGC;
5826
5827 return 0;
5828}
5829
5830/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5831 *
5832 * @returns corresponding GC pointer in patch block
5833 * @param pVM The VM to operate on.
5834 * @param pPatch Current patch block pointer
5835 * @param pInstrGC Guest context pointer to privileged instruction
5836 *
5837 */
5838RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5839{
5840 if (pPatch->Guest2PatchAddrTree)
5841 {
5842 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
5843 if (pGuestToPatchRec)
5844 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5845 }
5846
5847 return 0;
5848}
5849
5850/* Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no identical match)
5851 *
5852 * @returns corresponding GC pointer in patch block
5853 * @param pVM The VM to operate on.
5854 * @param pPatch Current patch block pointer
5855 * @param pInstrGC Guest context pointer to privileged instruction
5856 *
5857 */
5858RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5859{
5860 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
5861 if (pGuestToPatchRec)
5862 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5863
5864 return 0;
5865}
5866
5867/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5868 *
5869 * @returns corresponding GC pointer in patch block
5870 * @param pVM The VM to operate on.
5871 * @param pInstrGC Guest context pointer to privileged instruction
5872 *
5873 */
5874VMMR3DECL(RTRCPTR) PATMR3GuestGCPtrToPatchGCPtr(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
5875{
5876 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5877 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
5878 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
5879 else
5880 return 0;
5881}
5882
5883/**
5884 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5885 *
5886 * @returns original GC instruction pointer or 0 if not found
5887 * @param pVM The VM to operate on.
5888 * @param pPatchGC GC address in patch block
5889 * @param pEnmState State of the translated address (out)
5890 *
5891 */
5892VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
5893{
5894 PPATMPATCHREC pPatchRec;
5895 void *pvPatchCoreOffset;
5896 RTRCPTR pPrivInstrGC;
5897
5898 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
5899 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5900 if (pvPatchCoreOffset == 0)
5901 {
5902 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
5903 return 0;
5904 }
5905 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
5906 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
5907 if (pEnmState)
5908 {
5909 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
5910 || pPatchRec->patch.uState == PATCH_DIRTY
5911 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
5912 || pPatchRec->patch.uState == PATCH_UNUSABLE),
5913 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
5914
5915 if ( !pPrivInstrGC
5916 || pPatchRec->patch.uState == PATCH_UNUSABLE
5917 || pPatchRec->patch.uState == PATCH_REFUSED)
5918 {
5919 pPrivInstrGC = 0;
5920 *pEnmState = PATMTRANS_FAILED;
5921 }
5922 else
5923 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
5924 {
5925 *pEnmState = PATMTRANS_INHIBITIRQ;
5926 }
5927 else
5928 if ( pPatchRec->patch.uState == PATCH_ENABLED
5929 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
5930 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
5931 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5932 {
5933 *pEnmState = PATMTRANS_OVERWRITTEN;
5934 }
5935 else
5936 if (PATMFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
5937 {
5938 *pEnmState = PATMTRANS_OVERWRITTEN;
5939 }
5940 else
5941 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
5942 {
5943 *pEnmState = PATMTRANS_PATCHSTART;
5944 }
5945 else
5946 *pEnmState = PATMTRANS_SAFE;
5947 }
5948 return pPrivInstrGC;
5949}
5950
5951/**
5952 * Returns the GC pointer of the patch for the specified GC address
5953 *
5954 * @returns VBox status code.
5955 * @param pVM The VM to operate on.
5956 * @param pAddrGC Guest context address
5957 */
5958VMMR3DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
5959{
5960 PPATMPATCHREC pPatchRec;
5961
5962 /* Find the patch record. */
5963 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
5964 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
5965 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
5966 return PATCHCODE_PTR_GC(&pPatchRec->patch);
5967 else
5968 return 0;
5969}
5970
5971/**
5972 * Attempt to recover dirty instructions
5973 *
5974 * @returns VBox status code.
5975 * @param pVM The VM to operate on.
5976 * @param pCtx CPU context
5977 * @param pPatch Patch record
5978 * @param pPatchToGuestRec Patch to guest address record
5979 * @param pEip GC pointer of trapping instruction
5980 */
5981static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
5982{
5983 DISCPUSTATE CpuOld, CpuNew;
5984 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
5985 int rc;
5986 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
5987 uint32_t cbDirty;
5988 PRECPATCHTOGUEST pRec;
5989 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
5990 PVMCPU pVCpu = VMMGetCpu0(pVM);
5991 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
5992
5993 pRec = pPatchToGuestRec;
5994 pCurInstrGC = pOrgInstrGC;
5995 pCurPatchInstrGC = pEip;
5996 cbDirty = 0;
5997 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5998
5999 /* Find all adjacent dirty instructions */
6000 while (true)
6001 {
6002 if (pRec->fJumpTarget)
6003 {
6004 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6005 pRec->fDirty = false;
6006 return VERR_PATCHING_REFUSED;
6007 }
6008
6009 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6010 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6011 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6012
6013 /* Only harmless instructions are acceptable. */
6014 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6015 if ( RT_FAILURE(rc)
6016 || !(CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS))
6017 {
6018 if (RT_SUCCESS(rc))
6019 cbDirty += CpuOld.cbInstr;
6020 else
6021 if (!cbDirty)
6022 cbDirty = 1;
6023 break;
6024 }
6025
6026#ifdef DEBUG
6027 char szBuf[256];
6028 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6029 szBuf, sizeof(szBuf), NULL);
6030 Log(("DIRTY: %s\n", szBuf));
6031#endif
6032 /* Mark as clean; if we fail we'll let it always fault. */
6033 pRec->fDirty = false;
6034
6035 /* Remove old lookup record. */
6036 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6037 pPatchToGuestRec = NULL;
6038
6039 pCurPatchInstrGC += CpuOld.cbInstr;
6040 cbDirty += CpuOld.cbInstr;
6041
6042 /* Let's see if there's another dirty instruction right after. */
6043 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6044 if (!pRec || !pRec->fDirty)
6045 break; /* no more dirty instructions */
6046
6047 /* In case of complex instructions the next guest instruction could be quite far off. */
6048 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6049 }
6050
6051 if ( RT_SUCCESS(rc)
6052 && (CpuOld.pCurInstr->fOpType & DISOPTYPE_HARMLESS)
6053 )
6054 {
6055 uint32_t cbLeft;
6056
6057 pCurPatchInstrHC = pPatchInstrHC;
6058 pCurPatchInstrGC = pEip;
6059 cbLeft = cbDirty;
6060
6061 while (cbLeft && RT_SUCCESS(rc))
6062 {
6063 bool fValidInstr;
6064
6065 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6066
6067 fValidInstr = !!(CpuNew.pCurInstr->fOpType & DISOPTYPE_HARMLESS);
6068 if ( !fValidInstr
6069 && (CpuNew.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)
6070 )
6071 {
6072 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6073
6074 if ( pTargetGC >= pOrgInstrGC
6075 && pTargetGC <= pOrgInstrGC + cbDirty
6076 )
6077 {
6078 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6079 fValidInstr = true;
6080 }
6081 }
6082
6083 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6084 if ( rc == VINF_SUCCESS
6085 && CpuNew.cbInstr <= cbLeft /* must still fit */
6086 && fValidInstr
6087 )
6088 {
6089#ifdef DEBUG
6090 char szBuf[256];
6091 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6092 szBuf, sizeof(szBuf), NULL);
6093 Log(("NEW: %s\n", szBuf));
6094#endif
6095
6096 /* Copy the new instruction. */
6097 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.cbInstr);
6098 AssertRC(rc);
6099
6100 /* Add a new lookup record for the duplicated instruction. */
6101 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6102 }
6103 else
6104 {
6105#ifdef DEBUG
6106 char szBuf[256];
6107 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6108 szBuf, sizeof(szBuf), NULL);
6109 Log(("NEW: %s (FAILED)\n", szBuf));
6110#endif
6111 /* Restore the old lookup record for the duplicated instruction. */
6112 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6113
6114 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6115 rc = VERR_PATCHING_REFUSED;
6116 break;
6117 }
6118 pCurInstrGC += CpuNew.cbInstr;
6119 pCurPatchInstrHC += CpuNew.cbInstr;
6120 pCurPatchInstrGC += CpuNew.cbInstr;
6121 cbLeft -= CpuNew.cbInstr;
6122
6123 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6124 if (!cbLeft)
6125 {
6126 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6127 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6128 {
6129 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6130 if (pRec)
6131 {
6132 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6133 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6134
6135 Assert(!pRec->fDirty);
6136
6137 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6138 if (cbFiller >= SIZEOF_NEARJUMP32)
6139 {
6140 pPatchFillHC[0] = 0xE9;
6141 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6142#ifdef DEBUG
6143 char szBuf[256];
6144 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6145 szBuf, sizeof(szBuf), NULL);
6146 Log(("FILL: %s\n", szBuf));
6147#endif
6148 }
6149 else
6150 {
6151 for (unsigned i = 0; i < cbFiller; i++)
6152 {
6153 pPatchFillHC[i] = 0x90; /* NOP */
6154#ifdef DEBUG
6155 char szBuf[256];
6156 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC + i,
6157 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6158 Log(("FILL: %s\n", szBuf));
6159#endif
6160 }
6161 }
6162 }
6163 }
6164 }
6165 }
6166 }
6167 else
6168 rc = VERR_PATCHING_REFUSED;
6169
6170 if (RT_SUCCESS(rc))
6171 {
6172 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6173 }
6174 else
6175 {
6176 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6177 Assert(cbDirty);
6178
6179 /* Mark the whole instruction stream with breakpoints. */
6180 if (cbDirty)
6181 memset(pPatchInstrHC, 0xCC, cbDirty);
6182
6183 if ( pVM->patm.s.fOutOfMemory == false
6184 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6185 {
6186 rc = patmR3RefreshPatch(pVM, pPatch);
6187 if (RT_FAILURE(rc))
6188 {
6189 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6190 }
6191 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6192 rc = VERR_PATCHING_REFUSED;
6193 }
6194 }
6195 return rc;
6196}
6197
6198/**
6199 * Handle trap inside patch code
6200 *
6201 * @returns VBox status code.
6202 * @param pVM The VM to operate on.
6203 * @param pCtx CPU context
6204 * @param pEip GC pointer of trapping instruction
6205 * @param ppNewEip GC pointer to new instruction
6206 */
6207VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6208{
6209 PPATMPATCHREC pPatch = 0;
6210 void *pvPatchCoreOffset;
6211 RTRCUINTPTR offset;
6212 RTRCPTR pNewEip;
6213 int rc ;
6214 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6215 PVMCPU pVCpu = VMMGetCpu0(pVM);
6216
6217 Assert(pVM->cCpus == 1);
6218
6219 pNewEip = 0;
6220 *ppNewEip = 0;
6221
6222 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6223
6224 /* Find the patch record. */
6225 /* Note: there might not be a patch to guest translation record (global function) */
6226 offset = pEip - pVM->patm.s.pPatchMemGC;
6227 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6228 if (pvPatchCoreOffset)
6229 {
6230 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6231
6232 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6233
6234 if (pPatch->patch.uState == PATCH_DIRTY)
6235 {
6236 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6237 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6238 {
6239 /* Function duplication patches set fPIF to 1 on entry */
6240 pVM->patm.s.pGCStateHC->fPIF = 1;
6241 }
6242 }
6243 else
6244 if (pPatch->patch.uState == PATCH_DISABLED)
6245 {
6246 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6247 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6248 {
6249 /* Function duplication patches set fPIF to 1 on entry */
6250 pVM->patm.s.pGCStateHC->fPIF = 1;
6251 }
6252 }
6253 else
6254 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6255 {
6256 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6257
6258 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6259 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6260 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6261 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6262 }
6263
6264 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6265 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6266
6267 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6268 pPatch->patch.cTraps++;
6269 PATM_STAT_FAULT_INC(&pPatch->patch);
6270 }
6271 else
6272 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6273
6274 /* Check if we were interrupted in PATM generated instruction code. */
6275 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6276 {
6277 DISCPUSTATE Cpu;
6278 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6279 AssertRC(rc);
6280
6281 if ( rc == VINF_SUCCESS
6282 && ( Cpu.pCurInstr->uOpcode == OP_PUSHF
6283 || Cpu.pCurInstr->uOpcode == OP_PUSH
6284 || Cpu.pCurInstr->uOpcode == OP_CALL)
6285 )
6286 {
6287 uint64_t fFlags;
6288
6289 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6290
6291 if (Cpu.pCurInstr->uOpcode == OP_PUSH)
6292 {
6293 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6294 if ( rc == VINF_SUCCESS
6295 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6296 {
6297 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6298
6299 /* Reset the PATM stack. */
6300 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6301
6302 pVM->patm.s.pGCStateHC->fPIF = 1;
6303
6304 Log(("Faulting push -> go back to the original instruction\n"));
6305
6306 /* continue at the original instruction */
6307 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6308 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6309 return VINF_SUCCESS;
6310 }
6311 }
6312
6313 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6314 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6315 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6316 if (rc == VINF_SUCCESS)
6317 {
6318 /* The guest page *must* be present. */
6319 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6320 if ( rc == VINF_SUCCESS
6321 && (fFlags & X86_PTE_P))
6322 {
6323 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6324 return VINF_PATCH_CONTINUE;
6325 }
6326 }
6327 }
6328 else
6329 if (pPatch->patch.pPrivInstrGC == pNewEip)
6330 {
6331 /* Invalidated patch or first instruction overwritten.
6332 * We can ignore the fPIF state in this case.
6333 */
6334 /* Reset the PATM stack. */
6335 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6336
6337 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6338
6339 pVM->patm.s.pGCStateHC->fPIF = 1;
6340
6341 /* continue at the original instruction */
6342 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6343 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6344 return VINF_SUCCESS;
6345 }
6346
6347 char szBuf[256];
6348 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6349
6350 /* Very bad. We crashed in emitted code. Probably stack? */
6351 if (pPatch)
6352 {
6353 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6354 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6355 }
6356 else
6357 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6358 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6359 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6360 }
6361
6362 /* From here on, we must have a valid patch to guest translation. */
6363 if (pvPatchCoreOffset == 0)
6364 {
6365 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6366 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6367 return VERR_PATCH_NOT_FOUND;
6368 }
6369
6370 /* Take care of dirty/changed instructions. */
6371 if (pPatchToGuestRec->fDirty)
6372 {
6373 Assert(pPatchToGuestRec->Core.Key == offset);
6374 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6375
6376 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6377 if (RT_SUCCESS(rc))
6378 {
6379 /* Retry the current instruction. */
6380 pNewEip = pEip;
6381 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6382 }
6383 else
6384 {
6385 /* Reset the PATM stack. */
6386 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6387
6388 rc = VINF_SUCCESS; /* Continue at original instruction. */
6389 }
6390
6391 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6392 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6393 return rc;
6394 }
6395
6396#ifdef VBOX_STRICT
6397 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6398 {
6399 DISCPUSTATE cpu;
6400 bool disret;
6401 uint32_t cbInstr;
6402 PATMP2GLOOKUPREC cacheRec;
6403 RT_ZERO(cacheRec);
6404 cacheRec.pPatch = &pPatch->patch;
6405
6406 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6407 &cpu, &cbInstr);
6408 if (cacheRec.Lock.pvMap)
6409 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6410
6411 if (disret && cpu.pCurInstr->uOpcode == OP_RETN)
6412 {
6413 RTRCPTR retaddr;
6414 PCPUMCTX pCtx2;
6415
6416 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6417
6418 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6419 AssertRC(rc);
6420
6421 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6422 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6423 }
6424 }
6425#endif
6426
6427 /* Return original address, correct by subtracting the CS base address. */
6428 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6429
6430 /* Reset the PATM stack. */
6431 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6432
6433 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6434 {
6435 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6436 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6437#ifdef VBOX_STRICT
6438 DISCPUSTATE cpu;
6439 bool disret;
6440 uint32_t cbInstr;
6441 PATMP2GLOOKUPREC cacheRec;
6442 RT_ZERO(cacheRec);
6443 cacheRec.pPatch = &pPatch->patch;
6444
6445 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
6446 &cpu, &cbInstr);
6447 if (cacheRec.Lock.pvMap)
6448 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6449
6450 if (disret && (cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_INT3))
6451 {
6452 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6453 &cpu, &cbInstr);
6454 if (cacheRec.Lock.pvMap)
6455 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6456
6457 Assert(cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_IRET);
6458 }
6459#endif
6460 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6461 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6462 }
6463
6464 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6465#ifdef LOG_ENABLED
6466 CPUMR3DisasmInstr(pVM, pVCpu, pCtx, pNewEip, "PATCHRET: ");
6467#endif
6468 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6469 {
6470 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6471 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6472 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6473 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6474 return VERR_PATCH_DISABLED;
6475 }
6476
6477#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6478 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6479 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6480 {
6481 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6482 //we are only wasting time, back out the patch
6483 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6484 pTrapRec->pNextPatchInstr = 0;
6485 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6486 return VERR_PATCH_DISABLED;
6487 }
6488#endif
6489
6490 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6491 return VINF_SUCCESS;
6492}
6493
6494
6495/**
6496 * Handle page-fault in monitored page
6497 *
6498 * @returns VBox status code.
6499 * @param pVM The VM to operate on.
6500 */
6501VMMR3DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6502{
6503 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6504
6505 addr &= PAGE_BASE_GC_MASK;
6506
6507 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6508 AssertRC(rc); NOREF(rc);
6509
6510 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6511 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6512 {
6513 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6514 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6515 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6516 if (rc == VWRN_PATCH_REMOVED)
6517 return VINF_SUCCESS;
6518
6519 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6520
6521 if (addr == pPatchRec->patch.pPrivInstrGC)
6522 addr++;
6523 }
6524
6525 for(;;)
6526 {
6527 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6528
6529 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6530 break;
6531
6532 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6533 {
6534 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6535 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6536 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6537 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6538 }
6539 addr = pPatchRec->patch.pPrivInstrGC + 1;
6540 }
6541
6542 pVM->patm.s.pvFaultMonitor = 0;
6543 return VINF_SUCCESS;
6544}
6545
6546
6547#ifdef VBOX_WITH_STATISTICS
6548
6549static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6550{
6551 if (pPatch->flags & PATMFL_SYSENTER)
6552 {
6553 return "SYSENT";
6554 }
6555 else
6556 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6557 {
6558 static char szTrap[16];
6559 uint32_t iGate;
6560
6561 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6562 if (iGate < 256)
6563 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6564 else
6565 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6566 return szTrap;
6567 }
6568 else
6569 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6570 return "DUPFUNC";
6571 else
6572 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6573 return "FUNCCALL";
6574 else
6575 if (pPatch->flags & PATMFL_TRAMPOLINE)
6576 return "TRAMP";
6577 else
6578 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6579}
6580
6581static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6582{
6583 NOREF(pVM);
6584 switch(pPatch->uState)
6585 {
6586 case PATCH_ENABLED:
6587 return "ENA";
6588 case PATCH_DISABLED:
6589 return "DIS";
6590 case PATCH_DIRTY:
6591 return "DIR";
6592 case PATCH_UNUSABLE:
6593 return "UNU";
6594 case PATCH_REFUSED:
6595 return "REF";
6596 case PATCH_DISABLE_PENDING:
6597 return "DIP";
6598 default:
6599 AssertFailed();
6600 return " ";
6601 }
6602}
6603
6604/**
6605 * Resets the sample.
6606 * @param pVM The VM handle.
6607 * @param pvSample The sample registered using STAMR3RegisterCallback.
6608 */
6609static void patmResetStat(PVM pVM, void *pvSample)
6610{
6611 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6612 Assert(pPatch);
6613
6614 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6615 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6616}
6617
6618/**
6619 * Prints the sample into the buffer.
6620 *
6621 * @param pVM The VM handle.
6622 * @param pvSample The sample registered using STAMR3RegisterCallback.
6623 * @param pszBuf The buffer to print into.
6624 * @param cchBuf The size of the buffer.
6625 */
6626static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6627{
6628 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6629 Assert(pPatch);
6630
6631 Assert(pPatch->uState != PATCH_REFUSED);
6632 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6633
6634 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6635 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6636 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6637}
6638
6639/**
6640 * Returns the GC address of the corresponding patch statistics counter
6641 *
6642 * @returns Stat address
6643 * @param pVM The VM to operate on.
6644 * @param pPatch Patch structure
6645 */
6646RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6647{
6648 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6649 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6650}
6651
6652#endif /* VBOX_WITH_STATISTICS */
6653
6654#ifdef VBOX_WITH_DEBUGGER
6655/**
6656 * The '.patmoff' command.
6657 *
6658 * @returns VBox status.
6659 * @param pCmd Pointer to the command descriptor (as registered).
6660 * @param pCmdHlp Pointer to command helper functions.
6661 * @param pVM Pointer to the current VM (if any).
6662 * @param paArgs Pointer to (readonly) array of arguments.
6663 * @param cArgs Number of arguments in the array.
6664 */
6665static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
6666{
6667 /*
6668 * Validate input.
6669 */
6670 NOREF(pCmd); NOREF(cArgs); NOREF(paArgs);
6671 if (!pVM)
6672 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6673
6674 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6675 PATMR3AllowPatching(pVM, false);
6676 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6677}
6678
6679/**
6680 * The '.patmon' command.
6681 *
6682 * @returns VBox status.
6683 * @param pCmd Pointer to the command descriptor (as registered).
6684 * @param pCmdHlp Pointer to command helper functions.
6685 * @param pVM Pointer to the current VM (if any).
6686 * @param paArgs Pointer to (readonly) array of arguments.
6687 * @param cArgs Number of arguments in the array.
6688 */
6689static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
6690{
6691 /*
6692 * Validate input.
6693 */
6694 NOREF(pCmd); NOREF(cArgs); NOREF(paArgs);
6695 if (!pVM)
6696 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6697
6698 PATMR3AllowPatching(pVM, true);
6699 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6700 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6701}
6702#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette