VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 41732

Last change on this file since 41732 was 41732, checked in by vboxsync, 13 years ago

DISCPUSTATE::opsize -> cbInstr.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 254.0 KB
Line 
1/* $Id: PATM.cpp 41732 2012-06-14 23:57:45Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * NOTE: Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2012 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/cpumdis.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/trpm.h>
34#include <VBox/vmm/cfgm.h>
35#include <VBox/param.h>
36#include <VBox/vmm/selm.h>
37#include <iprt/avl.h>
38#include "PATMInternal.h"
39#include "PATMPatch.h"
40#include <VBox/vmm/vm.h>
41#include <VBox/vmm/csam.h>
42#include <VBox/dbg.h>
43#include <VBox/err.h>
44#include <VBox/log.h>
45#include <iprt/assert.h>
46#include <iprt/asm.h>
47#include <VBox/dis.h>
48#include <VBox/disopcode.h>
49#include "internal/pgm.h"
50
51#include <iprt/string.h>
52#include "PATMA.h"
53
54//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
55//#define PATM_DISABLE_ALL
56
57/**
58 * Refresh trampoline patch state.
59 */
60typedef struct PATMREFRESHPATCH
61{
62 /** Pointer to the VM structure. */
63 PVM pVM;
64 /** The trampoline patch record. */
65 PPATCHINFO pPatchTrampoline;
66 /** The new patch we want to jump to. */
67 PPATCHINFO pPatchRec;
68} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
69
70
71#define PATMREAD_RAWCODE 1 /* read code as-is */
72#define PATMREAD_ORGCODE 2 /* read original guest opcode bytes; not the patched bytes */
73#define PATMREAD_NOCHECK 4 /* don't check for patch conflicts */
74
75/*
76 * Private structure used during disassembly
77 */
78typedef struct
79{
80 PVM pVM;
81 PPATCHINFO pPatchInfo;
82 R3PTRTYPE(uint8_t *) pInstrHC;
83 RTRCPTR pInstrGC;
84 uint32_t fReadFlags;
85} PATMDISASM, *PPATMDISASM;
86
87
88/*******************************************************************************
89* Internal Functions *
90*******************************************************************************/
91
92static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
93static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
94static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
95
96#ifdef LOG_ENABLED // keep gcc quiet
97static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
98#endif
99#ifdef VBOX_WITH_STATISTICS
100static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
101static void patmResetStat(PVM pVM, void *pvSample);
102static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
103#endif
104
105#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
106#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
107
108static int patmReinit(PVM pVM);
109static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
110
111#ifdef VBOX_WITH_DEBUGGER
112static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
113static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
114static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
115
116/** Command descriptors. */
117static const DBGCCMD g_aCmds[] =
118{
119 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
120 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
121 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
122};
123#endif
124
125/* Don't want to break saved states, so put it here as a global variable. */
126static unsigned int cIDTHandlersDisabled = 0;
127
128/**
129 * Initializes the PATM.
130 *
131 * @returns VBox status code.
132 * @param pVM The VM to operate on.
133 */
134VMMR3DECL(int) PATMR3Init(PVM pVM)
135{
136 int rc;
137
138 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
139
140 /* These values can't change as they are hardcoded in patch code (old saved states!) */
141 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
142 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
143 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
144 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
145
146 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
147 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
148
149 /* Allocate patch memory and GC patch state memory. */
150 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
151 /* Add another page in case the generated code is much larger than expected. */
152 /** @todo bad safety precaution */
153 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
154 if (RT_FAILURE(rc))
155 {
156 Log(("MMHyperAlloc failed with %Rrc\n", rc));
157 return rc;
158 }
159 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
160
161 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
162 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
163 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
164
165 /*
166 * Hypervisor memory for GC status data (read/write)
167 *
168 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
169 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
170 *
171 */
172 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
173 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
174 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
175
176 /* Hypervisor memory for patch statistics */
177 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
178 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
179
180 /* Memory for patch lookup trees. */
181 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
182 AssertRCReturn(rc, rc);
183 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
184
185#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
186 /* Check CFGM option. */
187 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
188 if (RT_FAILURE(rc))
189# ifdef PATM_DISABLE_ALL
190 pVM->fPATMEnabled = false;
191# else
192 pVM->fPATMEnabled = true;
193# endif
194#endif
195
196 rc = patmReinit(pVM);
197 AssertRC(rc);
198 if (RT_FAILURE(rc))
199 return rc;
200
201 /*
202 * Register save and load state notifiers.
203 */
204 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
205 NULL, NULL, NULL,
206 NULL, patmR3Save, NULL,
207 NULL, patmR3Load, NULL);
208 AssertRCReturn(rc, rc);
209
210#ifdef VBOX_WITH_DEBUGGER
211 /*
212 * Debugger commands.
213 */
214 static bool s_fRegisteredCmds = false;
215 if (!s_fRegisteredCmds)
216 {
217 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
218 if (RT_SUCCESS(rc2))
219 s_fRegisteredCmds = true;
220 }
221#endif
222
223#ifdef VBOX_WITH_STATISTICS
224 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
225 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
226 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
227 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
228 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
229 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
230 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
231 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
232
233 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
234 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
235
236 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
237 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
238 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
239
240 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
241 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
242 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
243 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
244 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
245
246 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
247 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
248
249 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
250 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
251
252 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
253 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
254 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
255
256 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
257 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
258 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
259
260 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
261 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
262
263 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
264 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
265 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
266 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
267
268 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
269 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
270
271 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
272 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
273
274 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
275 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
276 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
277
278 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
279 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
280 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
281 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
282
283 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
284 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
285 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
286 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
287 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
288
289 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
290#endif /* VBOX_WITH_STATISTICS */
291
292 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
293 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
294 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
295 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
296 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
297 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
298 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
299 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
300
301 return rc;
302}
303
304/**
305 * Finalizes HMA page attributes.
306 *
307 * @returns VBox status code.
308 * @param pVM The VM handle.
309 */
310VMMR3DECL(int) PATMR3InitFinalize(PVM pVM)
311{
312 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
313 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
314 if (RT_FAILURE(rc))
315 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
316
317 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
318 if (RT_FAILURE(rc))
319 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
320
321 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
322 if (RT_FAILURE(rc))
323 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
324
325 return rc;
326}
327
328/**
329 * (Re)initializes PATM
330 *
331 * @param pVM The VM.
332 */
333static int patmReinit(PVM pVM)
334{
335 int rc;
336
337 /*
338 * Assert alignment and sizes.
339 */
340 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
341 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
342
343 /*
344 * Setup any fixed pointers and offsets.
345 */
346 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
347
348#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
349#ifndef PATM_DISABLE_ALL
350 pVM->fPATMEnabled = true;
351#endif
352#endif
353
354 Assert(pVM->patm.s.pGCStateHC);
355 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
356 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
357
358 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
359 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
360
361 Assert(pVM->patm.s.pGCStackHC);
362 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
363 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
364 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
365 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
366
367 Assert(pVM->patm.s.pStatsHC);
368 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
369 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
370
371 Assert(pVM->patm.s.pPatchMemHC);
372 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
373 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
374 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
375
376 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
377 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
378
379 Assert(pVM->patm.s.PatchLookupTreeHC);
380 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
381
382 /*
383 * (Re)Initialize PATM structure
384 */
385 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
386 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
387 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
388 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
389 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
390 pVM->patm.s.pvFaultMonitor = 0;
391 pVM->patm.s.deltaReloc = 0;
392
393 /* Lowest and highest patched instruction */
394 pVM->patm.s.pPatchedInstrGCLowest = ~0;
395 pVM->patm.s.pPatchedInstrGCHighest = 0;
396
397 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
398 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
399 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
400
401 pVM->patm.s.pfnSysEnterPatchGC = 0;
402 pVM->patm.s.pfnSysEnterGC = 0;
403
404 pVM->patm.s.fOutOfMemory = false;
405
406 pVM->patm.s.pfnHelperCallGC = 0;
407
408 /* Generate all global functions to be used by future patches. */
409 /* We generate a fake patch in order to use the existing code for relocation. */
410 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
411 if (RT_FAILURE(rc))
412 {
413 Log(("Out of memory!!!!\n"));
414 return VERR_NO_MEMORY;
415 }
416 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
417 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
418 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
419
420 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
421 AssertRC(rc);
422
423 /* Update free pointer in patch memory. */
424 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
425 /* Round to next 8 byte boundary. */
426 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
427 return rc;
428}
429
430
431/**
432 * Applies relocations to data and code managed by this
433 * component. This function will be called at init and
434 * whenever the VMM need to relocate it self inside the GC.
435 *
436 * The PATM will update the addresses used by the switcher.
437 *
438 * @param pVM The VM.
439 */
440VMMR3DECL(void) PATMR3Relocate(PVM pVM)
441{
442 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
443 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
444
445 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
446 if (delta)
447 {
448 PCPUMCTX pCtx;
449
450 /* Update CPUMCTX guest context pointer. */
451 pVM->patm.s.pCPUMCtxGC += delta;
452
453 pVM->patm.s.deltaReloc = delta;
454
455 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
456
457 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
458
459 /* If we are running patch code right now, then also adjust EIP. */
460 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
461 pCtx->eip += delta;
462
463 pVM->patm.s.pGCStateGC = GCPtrNew;
464 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
465
466 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
467
468 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
469
470 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
471
472 if (pVM->patm.s.pfnSysEnterPatchGC)
473 pVM->patm.s.pfnSysEnterPatchGC += delta;
474
475 /* Deal with the global patch functions. */
476 pVM->patm.s.pfnHelperCallGC += delta;
477 pVM->patm.s.pfnHelperRetGC += delta;
478 pVM->patm.s.pfnHelperIretGC += delta;
479 pVM->patm.s.pfnHelperJumpGC += delta;
480
481 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
482 }
483}
484
485
486/**
487 * Terminates the PATM.
488 *
489 * Termination means cleaning up and freeing all resources,
490 * the VM it self is at this point powered off or suspended.
491 *
492 * @returns VBox status code.
493 * @param pVM The VM to operate on.
494 */
495VMMR3DECL(int) PATMR3Term(PVM pVM)
496{
497 /* Memory was all allocated from the two MM heaps and requires no freeing. */
498 NOREF(pVM);
499 return VINF_SUCCESS;
500}
501
502
503/**
504 * PATM reset callback.
505 *
506 * @returns VBox status code.
507 * @param pVM The VM which is reset.
508 */
509VMMR3DECL(int) PATMR3Reset(PVM pVM)
510{
511 Log(("PATMR3Reset\n"));
512
513 /* Free all patches. */
514 while (true)
515 {
516 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
517 if (pPatchRec)
518 {
519 PATMRemovePatch(pVM, pPatchRec, true);
520 }
521 else
522 break;
523 }
524 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
525 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
526 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
527 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
528
529 int rc = patmReinit(pVM);
530 if (RT_SUCCESS(rc))
531 rc = PATMR3InitFinalize(pVM); /* paranoia */
532
533 return rc;
534}
535
536DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDisState, uint8_t *pbDst, RTUINTPTR uSrcAddr, uint32_t cbToRead)
537{
538 PATMDISASM *pDisInfo = (PATMDISASM *)pDisState->pvUser;
539 int orgsize = cbToRead;
540
541 Assert(cbToRead);
542 if (cbToRead == 0)
543 return VERR_INVALID_PARAMETER;
544
545 /*
546 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
547 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
548 */
549 /** @todo could change in the future! */
550 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
551 {
552 for (int i = 0; i < orgsize; i++)
553 {
554 int rc = PATMR3QueryOpcode(pDisInfo->pVM, (RTRCPTR)uSrcAddr, pbDst);
555 if (RT_FAILURE(rc))
556 break;
557 uSrcAddr++;
558 pbDst++;
559 cbToRead--;
560 }
561 if (cbToRead == 0)
562 return VINF_SUCCESS;
563#ifdef VBOX_STRICT
564 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
565 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
566 {
567 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, uSrcAddr, NULL) == false);
568 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, uSrcAddr+cbToRead-1, NULL) == false);
569 }
570#endif
571 }
572
573 if ( !pDisInfo->pInstrHC
574 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(uSrcAddr + cbToRead - 1)
575 && !PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr)))
576 {
577 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr));
578 return PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], pbDst, uSrcAddr, cbToRead);
579 }
580
581 Assert(pDisInfo->pInstrHC);
582
583 uint8_t *pInstrHC = pDisInfo->pInstrHC;
584
585 Assert(pInstrHC);
586
587 /* pInstrHC is the base address; adjust according to the GC pointer. */
588 pInstrHC = pInstrHC + (uSrcAddr - pDisInfo->pInstrGC);
589
590 memcpy(pbDst, (void *)pInstrHC, cbToRead);
591
592 return VINF_SUCCESS;
593}
594
595
596DECLINLINE(bool) patmR3DisInstrToStr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
597 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
598{
599 PATMDISASM disinfo;
600 disinfo.pVM = pVM;
601 disinfo.pPatchInfo = pPatch;
602 disinfo.pInstrHC = pbInstrHC;
603 disinfo.pInstrGC = InstrGCPtr32;
604 disinfo.fReadFlags = fReadFlags;
605 return RT_SUCCESS(DISInstrToStrWithReader(InstrGCPtr32,
606 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
607 patmReadBytes, &disinfo,
608 pCpu, pcbInstr, pszOutput, cbOutput));
609}
610
611
612DECLINLINE(bool) patmR3DisInstr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
613 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
614{
615 PATMDISASM disinfo;
616 disinfo.pVM = pVM;
617 disinfo.pPatchInfo = pPatch;
618 disinfo.pInstrHC = pbInstrHC;
619 disinfo.pInstrGC = InstrGCPtr32;
620 disinfo.fReadFlags = fReadFlags;
621 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32,
622 (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
623 patmReadBytes, &disinfo,
624 pCpu, pcbInstr));
625}
626
627
628DECLINLINE(bool) patmR3DisInstrNoStrOpMode(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC,
629 uint32_t fReadFlags,
630 PDISCPUSTATE pCpu, uint32_t *pcbInstr)
631{
632 PATMDISASM disinfo;
633 disinfo.pVM = pVM;
634 disinfo.pPatchInfo = pPatch;
635 disinfo.pInstrHC = pbInstrHC;
636 disinfo.pInstrGC = InstrGCPtr32;
637 disinfo.fReadFlags = fReadFlags;
638 return RT_SUCCESS(DISInstrWithReader(InstrGCPtr32, pPatch->uOpMode, patmReadBytes, &disinfo,
639 pCpu, pcbInstr));
640}
641
642#ifdef LOG_ENABLED
643# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
644 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_ORGCODE, a_szComment, " patch:")
645# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) \
646 PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, PATMREAD_RAWCODE, a_szComment, " patch:")
647
648# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) \
649 do { \
650 if (LogIsEnabled()) \
651 patmLogRawPatchInstr(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2); \
652 } while (0)
653
654static void patmLogRawPatchInstr(PVM pVM, PPATCHINFO pPatch, uint32_t fFlags,
655 const char *pszComment1, const char *pszComment2)
656{
657 DISCPUSTATE DisState;
658 char szOutput[128];
659 szOutput[0] = '\0';
660 patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC, NULL, fFlags,
661 &DisState, NULL, szOutput, sizeof(szOutput));
662 Log(("%s%s %s", pszComment1, pszComment2, szOutput));
663}
664
665#else
666# define PATM_LOG_ORG_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
667# define PATM_LOG_RAW_PATCH_INSTR(a_pVM, a_pPatch, a_szComment) do { } while (0)
668# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) do { } while (0)
669#endif
670
671
672/**
673 * Callback function for RTAvloU32DoWithAll
674 *
675 * Updates all fixups in the patches
676 *
677 * @returns VBox status code.
678 * @param pNode Current node
679 * @param pParam The VM to operate on.
680 */
681static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
682{
683 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
684 PVM pVM = (PVM)pParam;
685 RTRCINTPTR delta;
686 int rc;
687
688 /* Nothing to do if the patch is not active. */
689 if (pPatch->patch.uState == PATCH_REFUSED)
690 return 0;
691
692 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
693 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Org patch jump:", "");
694
695 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
696 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
697
698 /*
699 * Apply fixups
700 */
701 PRELOCREC pRec = 0;
702 AVLPVKEY key = 0;
703
704 while (true)
705 {
706 /* Get the record that's closest from above */
707 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
708 if (pRec == 0)
709 break;
710
711 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
712
713 switch (pRec->uType)
714 {
715 case FIXUP_ABSOLUTE:
716 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
717 if ( !pRec->pSource
718 || PATMIsPatchGCAddr(pVM, pRec->pSource))
719 {
720 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
721 }
722 else
723 {
724 uint8_t curInstr[15];
725 uint8_t oldInstr[15];
726 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
727
728 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
729
730 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
731 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
732
733 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
734 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
735
736 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
737
738 if ( rc == VERR_PAGE_NOT_PRESENT
739 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
740 {
741 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
742
743 Log(("PATM: Patch page not present -> check later!\n"));
744 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
745 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
746 }
747 else
748 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
749 {
750 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
751 /*
752 * Disable patch; this is not a good solution
753 */
754 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
755 pPatch->patch.uState = PATCH_DISABLED;
756 }
757 else
758 if (RT_SUCCESS(rc))
759 {
760 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
761 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
762 AssertRC(rc);
763 }
764 }
765 break;
766
767 case FIXUP_REL_JMPTOPATCH:
768 {
769 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
770
771 if ( pPatch->patch.uState == PATCH_ENABLED
772 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
773 {
774 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
775 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
776 RTRCPTR pJumpOffGC;
777 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
778 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
779
780#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
781 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
782#else
783 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
784#endif
785
786 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
787#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
788 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
789 {
790 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
791
792 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
793 oldJump[0] = pPatch->patch.aPrivInstr[0];
794 oldJump[1] = pPatch->patch.aPrivInstr[1];
795 *(RTRCUINTPTR *)&oldJump[2] = displOld;
796 }
797 else
798#endif
799 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
800 {
801 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
802 oldJump[0] = 0xE9;
803 *(RTRCUINTPTR *)&oldJump[1] = displOld;
804 }
805 else
806 {
807 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
808 continue; //this should never happen!!
809 }
810 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
811
812 /*
813 * Read old patch jump and compare it to the one we previously installed
814 */
815 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
816 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
817
818 if ( rc == VERR_PAGE_NOT_PRESENT
819 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
820 {
821 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
822
823 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
824 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
825 }
826 else
827 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
828 {
829 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
830 /*
831 * Disable patch; this is not a good solution
832 */
833 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
834 pPatch->patch.uState = PATCH_DISABLED;
835 }
836 else
837 if (RT_SUCCESS(rc))
838 {
839 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
840 AssertRC(rc);
841 }
842 else
843 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
844 }
845 else
846 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
847
848 pRec->pDest = pTarget;
849 break;
850 }
851
852 case FIXUP_REL_JMPTOGUEST:
853 {
854 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
855 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
856
857 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
858 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
859 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
860 pRec->pSource = pSource;
861 break;
862 }
863
864 default:
865 AssertMsg(0, ("Invalid fixup type!!\n"));
866 return VERR_INVALID_PARAMETER;
867 }
868 }
869
870 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
871 PATM_LOG_PATCH_INSTR(pVM, &pPatch->patch, PATMREAD_RAWCODE, "Rel patch jump:", "");
872 return 0;
873}
874
875/**
876 * \#PF Handler callback for virtual access handler ranges.
877 *
878 * Important to realize that a physical page in a range can have aliases, and
879 * for ALL and WRITE handlers these will also trigger.
880 *
881 * @returns VINF_SUCCESS if the handler have carried out the operation.
882 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
883 * @param pVM VM Handle.
884 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
885 * @param pvPtr The HC mapping of that address.
886 * @param pvBuf What the guest is reading/writing.
887 * @param cbBuf How much it's reading/writing.
888 * @param enmAccessType The access type.
889 * @param pvUser User argument.
890 */
891DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
892 PGMACCESSTYPE enmAccessType, void *pvUser)
893{
894 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
895 NOREF(pvPtr); NOREF(pvBuf); NOREF(cbBuf); NOREF(pvUser);
896
897 /** @todo could be the wrong virtual address (alias) */
898 pVM->patm.s.pvFaultMonitor = GCPtr;
899 PATMR3HandleMonitoredPage(pVM);
900 return VINF_PGM_HANDLER_DO_DEFAULT;
901}
902
903
904#ifdef VBOX_WITH_DEBUGGER
905/**
906 * Callback function for RTAvloU32DoWithAll
907 *
908 * Enables the patch that's being enumerated
909 *
910 * @returns 0 (continue enumeration).
911 * @param pNode Current node
912 * @param pVM The VM to operate on.
913 */
914static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
915{
916 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
917
918 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
919 return 0;
920}
921#endif /* VBOX_WITH_DEBUGGER */
922
923
924#ifdef VBOX_WITH_DEBUGGER
925/**
926 * Callback function for RTAvloU32DoWithAll
927 *
928 * Disables the patch that's being enumerated
929 *
930 * @returns 0 (continue enumeration).
931 * @param pNode Current node
932 * @param pVM The VM to operate on.
933 */
934static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
935{
936 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
937
938 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
939 return 0;
940}
941#endif
942
943/**
944 * Returns the host context pointer and size of the patch memory block
945 *
946 * @returns VBox status code.
947 * @param pVM The VM to operate on.
948 * @param pcb Size of the patch memory block
949 */
950VMMR3DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
951{
952 if (pcb)
953 *pcb = pVM->patm.s.cbPatchMem;
954
955 return pVM->patm.s.pPatchMemHC;
956}
957
958
959/**
960 * Returns the guest context pointer and size of the patch memory block
961 *
962 * @returns VBox status code.
963 * @param pVM The VM to operate on.
964 * @param pcb Size of the patch memory block
965 */
966VMMR3DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
967{
968 if (pcb)
969 *pcb = pVM->patm.s.cbPatchMem;
970
971 return pVM->patm.s.pPatchMemGC;
972}
973
974
975/**
976 * Returns the host context pointer of the GC context structure
977 *
978 * @returns VBox status code.
979 * @param pVM The VM to operate on.
980 */
981VMMR3DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
982{
983 return pVM->patm.s.pGCStateHC;
984}
985
986
987/**
988 * Checks whether the HC address is part of our patch region
989 *
990 * @returns VBox status code.
991 * @param pVM The VM to operate on.
992 * @param pAddrGC Guest context address
993 */
994VMMR3DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, R3PTRTYPE(uint8_t *) pAddrHC)
995{
996 return (pAddrHC >= pVM->patm.s.pPatchMemHC && pAddrHC < pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) ? true : false;
997}
998
999
1000/**
1001 * Allows or disallow patching of privileged instructions executed by the guest OS
1002 *
1003 * @returns VBox status code.
1004 * @param pVM The VM to operate on.
1005 * @param fAllowPatching Allow/disallow patching
1006 */
1007VMMR3DECL(int) PATMR3AllowPatching(PVM pVM, uint32_t fAllowPatching)
1008{
1009 pVM->fPATMEnabled = (fAllowPatching) ? true : false;
1010 return VINF_SUCCESS;
1011}
1012
1013/**
1014 * Convert a GC patch block pointer to a HC patch pointer
1015 *
1016 * @returns HC pointer or NULL if it's not a GC patch pointer
1017 * @param pVM The VM to operate on.
1018 * @param pAddrGC GC pointer
1019 */
1020VMMR3DECL(R3PTRTYPE(void *)) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
1021{
1022 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
1023 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
1024 else
1025 return NULL;
1026}
1027
1028/**
1029 * Query PATM state (enabled/disabled)
1030 *
1031 * @returns 0 - disabled, 1 - enabled
1032 * @param pVM The VM to operate on.
1033 */
1034VMMR3DECL(int) PATMR3IsEnabled(PVM pVM)
1035{
1036 return pVM->fPATMEnabled;
1037}
1038
1039
1040/**
1041 * Convert guest context address to host context pointer
1042 *
1043 * @returns VBox status code.
1044 * @param pVM The VM to operate on.
1045 * @param pCacheRec Address conversion cache record
1046 * @param pGCPtr Guest context pointer
1047 *
1048 * @returns Host context pointer or NULL in case of an error
1049 *
1050 */
1051R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
1052{
1053 int rc;
1054 R3PTRTYPE(uint8_t *) pHCPtr;
1055 uint32_t offset;
1056
1057 if (PATMIsPatchGCAddr(pVM, pGCPtr))
1058 {
1059 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1060 Assert(pPatch);
1061 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
1062 }
1063
1064 offset = pGCPtr & PAGE_OFFSET_MASK;
1065 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1066 return pCacheRec->pPageLocStartHC + offset;
1067
1068 /* Release previous lock if any. */
1069 if (pCacheRec->Lock.pvMap)
1070 {
1071 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
1072 pCacheRec->Lock.pvMap = NULL;
1073 }
1074
1075 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1076 if (rc != VINF_SUCCESS)
1077 {
1078 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1079 return NULL;
1080 }
1081 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1082 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1083 return pHCPtr;
1084}
1085
1086
1087/* Calculates and fills in all branch targets
1088 *
1089 * @returns VBox status code.
1090 * @param pVM The VM to operate on.
1091 * @param pPatch Current patch block pointer
1092 *
1093 */
1094static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1095{
1096 int32_t displ;
1097
1098 PJUMPREC pRec = 0;
1099 unsigned nrJumpRecs = 0;
1100
1101 /*
1102 * Set all branch targets inside the patch block.
1103 * We remove all jump records as they are no longer needed afterwards.
1104 */
1105 while (true)
1106 {
1107 RCPTRTYPE(uint8_t *) pInstrGC;
1108 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1109
1110 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1111 if (pRec == 0)
1112 break;
1113
1114 nrJumpRecs++;
1115
1116 /* HC in patch block to GC in patch block. */
1117 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1118
1119 if (pRec->opcode == OP_CALL)
1120 {
1121 /* Special case: call function replacement patch from this patch block.
1122 */
1123 PPATMPATCHREC pFunctionRec = PATMQueryFunctionPatch(pVM, pRec->pTargetGC);
1124 if (!pFunctionRec)
1125 {
1126 int rc;
1127
1128 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1129 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1130 else
1131 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1132
1133 if (RT_FAILURE(rc))
1134 {
1135 uint8_t *pPatchHC;
1136 RTRCPTR pPatchGC;
1137 RTRCPTR pOrgInstrGC;
1138
1139 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1140 Assert(pOrgInstrGC);
1141
1142 /* Failure for some reason -> mark exit point with int 3. */
1143 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1144
1145 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1146 Assert(pPatchGC);
1147
1148 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1149
1150 /* Set a breakpoint at the very beginning of the recompiled instruction */
1151 *pPatchHC = 0xCC;
1152
1153 continue;
1154 }
1155 }
1156 else
1157 {
1158 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1159 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1160 }
1161
1162 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1163 }
1164 else
1165 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1166
1167 if (pBranchTargetGC == 0)
1168 {
1169 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1170 return VERR_PATCHING_REFUSED;
1171 }
1172 /* Our jumps *always* have a dword displacement (to make things easier). */
1173 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1174 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1175 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1176 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1177 }
1178 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1179 Assert(pPatch->JumpTree == 0);
1180 return VINF_SUCCESS;
1181}
1182
1183/* Add an illegal instruction record
1184 *
1185 * @param pVM The VM to operate on.
1186 * @param pPatch Patch structure ptr
1187 * @param pInstrGC Guest context pointer to privileged instruction
1188 *
1189 */
1190static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1191{
1192 PAVLPVNODECORE pRec;
1193
1194 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1195 Assert(pRec);
1196 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1197
1198 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1199 Assert(ret); NOREF(ret);
1200 pPatch->pTempInfo->nrIllegalInstr++;
1201}
1202
1203static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1204{
1205 PAVLPVNODECORE pRec;
1206
1207 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1208 if (pRec)
1209 return true;
1210 else
1211 return false;
1212}
1213
1214/**
1215 * Add a patch to guest lookup record
1216 *
1217 * @param pVM The VM to operate on.
1218 * @param pPatch Patch structure ptr
1219 * @param pPatchInstrHC Guest context pointer to patch block
1220 * @param pInstrGC Guest context pointer to privileged instruction
1221 * @param enmType Lookup type
1222 * @param fDirty Dirty flag
1223 *
1224 */
1225 /** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
1226void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1227{
1228 bool ret;
1229 PRECPATCHTOGUEST pPatchToGuestRec;
1230 PRECGUESTTOPATCH pGuestToPatchRec;
1231 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1232
1233 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1234 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1235
1236 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1237 {
1238 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1239 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1240 return; /* already there */
1241
1242 Assert(!pPatchToGuestRec);
1243 }
1244#ifdef VBOX_STRICT
1245 else
1246 {
1247 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1248 Assert(!pPatchToGuestRec);
1249 }
1250#endif
1251
1252 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1253 Assert(pPatchToGuestRec);
1254 pPatchToGuestRec->Core.Key = PatchOffset;
1255 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1256 pPatchToGuestRec->enmType = enmType;
1257 pPatchToGuestRec->fDirty = fDirty;
1258
1259 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1260 Assert(ret);
1261
1262 /* GC to patch address */
1263 if (enmType == PATM_LOOKUP_BOTHDIR)
1264 {
1265 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1266 if (!pGuestToPatchRec)
1267 {
1268 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1269 pGuestToPatchRec->Core.Key = pInstrGC;
1270 pGuestToPatchRec->PatchOffset = PatchOffset;
1271
1272 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1273 Assert(ret);
1274 }
1275 }
1276
1277 pPatch->nrPatch2GuestRecs++;
1278}
1279
1280
1281/**
1282 * Removes a patch to guest lookup record
1283 *
1284 * @param pVM The VM to operate on.
1285 * @param pPatch Patch structure ptr
1286 * @param pPatchInstrGC Guest context pointer to patch block
1287 */
1288void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1289{
1290 PAVLU32NODECORE pNode;
1291 PAVLU32NODECORE pNode2;
1292 PRECPATCHTOGUEST pPatchToGuestRec;
1293 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1294
1295 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1296 Assert(pPatchToGuestRec);
1297 if (pPatchToGuestRec)
1298 {
1299 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1300 {
1301 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1302
1303 Assert(pGuestToPatchRec->Core.Key);
1304 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1305 Assert(pNode2);
1306 }
1307 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1308 Assert(pNode);
1309
1310 MMR3HeapFree(pPatchToGuestRec);
1311 pPatch->nrPatch2GuestRecs--;
1312 }
1313}
1314
1315
1316/**
1317 * RTAvlPVDestroy callback.
1318 */
1319static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1320{
1321 MMR3HeapFree(pNode);
1322 return 0;
1323}
1324
1325/**
1326 * Empty the specified tree (PV tree, MMR3 heap)
1327 *
1328 * @param pVM The VM to operate on.
1329 * @param ppTree Tree to empty
1330 */
1331void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1332{
1333 NOREF(pVM);
1334 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1335}
1336
1337
1338/**
1339 * RTAvlU32Destroy callback.
1340 */
1341static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1342{
1343 MMR3HeapFree(pNode);
1344 return 0;
1345}
1346
1347/**
1348 * Empty the specified tree (U32 tree, MMR3 heap)
1349 *
1350 * @param pVM The VM to operate on.
1351 * @param ppTree Tree to empty
1352 */
1353void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1354{
1355 NOREF(pVM);
1356 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1357}
1358
1359
1360/**
1361 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1362 *
1363 * @returns VBox status code.
1364 * @param pVM The VM to operate on.
1365 * @param pCpu CPU disassembly state
1366 * @param pInstrGC Guest context pointer to privileged instruction
1367 * @param pCurInstrGC Guest context pointer to the current instruction
1368 * @param pCacheRec Cache record ptr
1369 *
1370 */
1371static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1372{
1373 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1374 bool fIllegalInstr = false;
1375
1376 /*
1377 * Preliminary heuristics:
1378 *- no call instructions without a fixed displacement between cli and sti/popf
1379 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1380 *- no nested pushf/cli
1381 *- sti/popf should be the (eventual) target of all branches
1382 *- no near or far returns; no int xx, no into
1383 *
1384 * Note: Later on we can impose less stricter guidelines if the need arises
1385 */
1386
1387 /* Bail out if the patch gets too big. */
1388 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1389 {
1390 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1391 fIllegalInstr = true;
1392 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1393 }
1394 else
1395 {
1396 /* No unconditional jumps or calls without fixed displacements. */
1397 if ( (pCpu->pCurInstr->optype & DISOPTYPE_CONTROLFLOW)
1398 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1399 )
1400 {
1401 Assert(pCpu->param1.cb <= 4 || pCpu->param1.cb == 6);
1402 if ( pCpu->param1.cb == 6 /* far call/jmp */
1403 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1404 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1405 )
1406 {
1407 fIllegalInstr = true;
1408 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1409 }
1410 }
1411
1412 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1413 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->opcode == OP_JMP)
1414 {
1415 if ( pCurInstrGC > pPatch->pPrivInstrGC
1416 && pCurInstrGC + pCpu->cbInstr < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1417 {
1418 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1419 /* We turn this one into a int 3 callable patch. */
1420 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1421 }
1422 }
1423 else
1424 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1425 if (pPatch->opcode == OP_PUSHF)
1426 {
1427 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->opcode == OP_PUSHF)
1428 {
1429 fIllegalInstr = true;
1430 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1431 }
1432 }
1433
1434 /* no far returns */
1435 if (pCpu->pCurInstr->opcode == OP_RETF)
1436 {
1437 pPatch->pTempInfo->nrRetInstr++;
1438 fIllegalInstr = true;
1439 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1440 }
1441 else if ( pCpu->pCurInstr->opcode == OP_INT3
1442 || pCpu->pCurInstr->opcode == OP_INT
1443 || pCpu->pCurInstr->opcode == OP_INTO)
1444 {
1445 /* No int xx or into either. */
1446 fIllegalInstr = true;
1447 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1448 }
1449 }
1450
1451 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1452
1453 /* Illegal instruction -> end of analysis phase for this code block */
1454 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1455 return VINF_SUCCESS;
1456
1457 /* Check for exit points. */
1458 switch (pCpu->pCurInstr->opcode)
1459 {
1460 case OP_SYSEXIT:
1461 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1462
1463 case OP_SYSENTER:
1464 case OP_ILLUD2:
1465 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1466 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1467 return VINF_SUCCESS;
1468
1469 case OP_STI:
1470 case OP_POPF:
1471 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1472 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1473 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1474 {
1475 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1476 return VERR_PATCHING_REFUSED;
1477 }
1478 if (pPatch->opcode == OP_PUSHF)
1479 {
1480 if (pCpu->pCurInstr->opcode == OP_POPF)
1481 {
1482 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1483 return VINF_SUCCESS;
1484
1485 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1486 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1487 pPatch->flags |= PATMFL_CHECK_SIZE;
1488 }
1489 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1490 }
1491 /* else: fall through. */
1492 case OP_RETN: /* exit point for function replacement */
1493 return VINF_SUCCESS;
1494
1495 case OP_IRET:
1496 return VINF_SUCCESS; /* exitpoint */
1497
1498 case OP_CPUID:
1499 case OP_CALL:
1500 case OP_JMP:
1501 break;
1502
1503 default:
1504 if (pCpu->pCurInstr->optype & (DISOPTYPE_PRIVILEGED_NOTRAP))
1505 {
1506 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1507 return VINF_SUCCESS; /* exit point */
1508 }
1509 break;
1510 }
1511
1512 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1513 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->optype & DISOPTYPE_RELATIVE_CONTROLFLOW))
1514 {
1515 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1516 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->cbInstr));
1517 return VINF_SUCCESS;
1518 }
1519
1520 return VWRN_CONTINUE_ANALYSIS;
1521}
1522
1523/**
1524 * Analyses the instructions inside a function for compliance
1525 *
1526 * @returns VBox status code.
1527 * @param pVM The VM to operate on.
1528 * @param pCpu CPU disassembly state
1529 * @param pInstrGC Guest context pointer to privileged instruction
1530 * @param pCurInstrGC Guest context pointer to the current instruction
1531 * @param pCacheRec Cache record ptr
1532 *
1533 */
1534static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1535{
1536 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1537 bool fIllegalInstr = false;
1538 NOREF(pInstrGC);
1539
1540 //Preliminary heuristics:
1541 //- no call instructions
1542 //- ret ends a block
1543
1544 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1545
1546 // bail out if the patch gets too big
1547 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1548 {
1549 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1550 fIllegalInstr = true;
1551 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1552 }
1553 else
1554 {
1555 // no unconditional jumps or calls without fixed displacements
1556 if ( (pCpu->pCurInstr->optype & DISOPTYPE_CONTROLFLOW)
1557 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1558 )
1559 {
1560 Assert(pCpu->param1.cb <= 4 || pCpu->param1.cb == 6);
1561 if ( pCpu->param1.cb == 6 /* far call/jmp */
1562 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1563 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1564 )
1565 {
1566 fIllegalInstr = true;
1567 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1568 }
1569 }
1570 else /* no far returns */
1571 if (pCpu->pCurInstr->opcode == OP_RETF)
1572 {
1573 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1574 fIllegalInstr = true;
1575 }
1576 else /* no int xx or into either */
1577 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1578 {
1579 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1580 fIllegalInstr = true;
1581 }
1582
1583 #if 0
1584 ///@todo we can handle certain in/out and privileged instructions in the guest context
1585 if (pCpu->pCurInstr->optype & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr->opcode != OP_STI)
1586 {
1587 Log(("Illegal instructions for function patch!!\n"));
1588 return VERR_PATCHING_REFUSED;
1589 }
1590 #endif
1591 }
1592
1593 pPatch->cbPatchBlockSize += pCpu->cbInstr;
1594
1595 /* Illegal instruction -> end of analysis phase for this code block */
1596 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1597 {
1598 return VINF_SUCCESS;
1599 }
1600
1601 // Check for exit points
1602 switch (pCpu->pCurInstr->opcode)
1603 {
1604 case OP_ILLUD2:
1605 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1606 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1607 return VINF_SUCCESS;
1608
1609 case OP_IRET:
1610 case OP_SYSEXIT: /* will fault or emulated in GC */
1611 case OP_RETN:
1612 return VINF_SUCCESS;
1613
1614 case OP_POPF:
1615 case OP_STI:
1616 return VWRN_CONTINUE_ANALYSIS;
1617 default:
1618 if (pCpu->pCurInstr->optype & (DISOPTYPE_PRIVILEGED_NOTRAP))
1619 {
1620 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1621 return VINF_SUCCESS; /* exit point */
1622 }
1623 return VWRN_CONTINUE_ANALYSIS;
1624 }
1625
1626 return VWRN_CONTINUE_ANALYSIS;
1627}
1628
1629/**
1630 * Recompiles the instructions in a code block
1631 *
1632 * @returns VBox status code.
1633 * @param pVM The VM to operate on.
1634 * @param pCpu CPU disassembly state
1635 * @param pInstrGC Guest context pointer to privileged instruction
1636 * @param pCurInstrGC Guest context pointer to the current instruction
1637 * @param pCacheRec Cache record ptr
1638 *
1639 */
1640static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1641{
1642 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1643 int rc = VINF_SUCCESS;
1644 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1645
1646 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1647
1648 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1649 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1650 {
1651 /*
1652 * Been there, done that; so insert a jump (we don't want to duplicate code)
1653 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1654 */
1655 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1656 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->prefix & DISPREFIX_OPSIZE));
1657 }
1658
1659 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1660 {
1661 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1662 }
1663 else
1664 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1665
1666 if (RT_FAILURE(rc))
1667 return rc;
1668
1669 /* Note: Never do a direct return unless a failure is encountered! */
1670
1671 /* Clear recompilation of next instruction flag; we are doing that right here. */
1672 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1673 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1674
1675 /* Add lookup record for patch to guest address translation */
1676 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1677
1678 /* Update lowest and highest instruction address for this patch */
1679 if (pCurInstrGC < pPatch->pInstrGCLowest)
1680 pPatch->pInstrGCLowest = pCurInstrGC;
1681 else
1682 if (pCurInstrGC > pPatch->pInstrGCHighest)
1683 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->cbInstr;
1684
1685 /* Illegal instruction -> end of recompile phase for this code block. */
1686 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1687 {
1688 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1689 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1690 goto end;
1691 }
1692
1693 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1694 * Indirect calls are handled below.
1695 */
1696 if ( (pCpu->pCurInstr->optype & DISOPTYPE_CONTROLFLOW)
1697 && (pCpu->pCurInstr->opcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1698 && (OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J))
1699 {
1700 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1701 if (pTargetGC == 0)
1702 {
1703 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.fUse));
1704 return VERR_PATCHING_REFUSED;
1705 }
1706
1707 if (pCpu->pCurInstr->opcode == OP_CALL)
1708 {
1709 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1710 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1711 if (RT_FAILURE(rc))
1712 goto end;
1713 }
1714 else
1715 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->opcode, !!(pCpu->prefix & DISPREFIX_OPSIZE));
1716
1717 if (RT_SUCCESS(rc))
1718 rc = VWRN_CONTINUE_RECOMPILE;
1719
1720 goto end;
1721 }
1722
1723 switch (pCpu->pCurInstr->opcode)
1724 {
1725 case OP_CLI:
1726 {
1727 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1728 * until we've found the proper exit point(s).
1729 */
1730 if ( pCurInstrGC != pInstrGC
1731 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1732 )
1733 {
1734 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1735 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1736 }
1737 /* Set by irq inhibition; no longer valid now. */
1738 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1739
1740 rc = patmPatchGenCli(pVM, pPatch);
1741 if (RT_SUCCESS(rc))
1742 rc = VWRN_CONTINUE_RECOMPILE;
1743 break;
1744 }
1745
1746 case OP_MOV:
1747 if (pCpu->pCurInstr->optype & DISOPTYPE_POTENTIALLY_DANGEROUS)
1748 {
1749 /* mov ss, src? */
1750 if ( (pCpu->param1.fUse & DISUSE_REG_SEG)
1751 && (pCpu->param1.base.reg_seg == DISSELREG_SS))
1752 {
1753 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1754 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1755 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1756 }
1757#if 0 /* necessary for Haiku */
1758 else
1759 if ( (pCpu->param2.fUse & DISUSE_REG_SEG)
1760 && (pCpu->param2.base.reg_seg == USE_REG_SS)
1761 && (pCpu->param1.fUse & (DISUSE_REG_GEN32|DISUSE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1762 {
1763 /* mov GPR, ss */
1764 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1765 if (RT_SUCCESS(rc))
1766 rc = VWRN_CONTINUE_RECOMPILE;
1767 break;
1768 }
1769#endif
1770 }
1771 goto duplicate_instr;
1772
1773 case OP_POP:
1774 if (pCpu->pCurInstr->param1 == OP_PARM_REG_SS)
1775 {
1776 Assert(pCpu->pCurInstr->optype & DISOPTYPE_INHIBIT_IRQS);
1777
1778 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1779 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1780 }
1781 goto duplicate_instr;
1782
1783 case OP_STI:
1784 {
1785 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1786
1787 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1788 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1789 {
1790 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1791 fInhibitIRQInstr = true;
1792 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1793 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1794 }
1795 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1796
1797 if (RT_SUCCESS(rc))
1798 {
1799 DISCPUSTATE cpu = *pCpu;
1800 unsigned cbInstr;
1801 int disret;
1802 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1803
1804 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1805
1806 pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
1807 { /* Force pNextInstrHC out of scope after using it */
1808 uint8_t *pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1809 if (pNextInstrHC == NULL)
1810 {
1811 AssertFailed();
1812 return VERR_PATCHING_REFUSED;
1813 }
1814
1815 // Disassemble the next instruction
1816 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
1817 }
1818 if (disret == false)
1819 {
1820 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1821 return VERR_PATCHING_REFUSED;
1822 }
1823 pReturnInstrGC = pNextInstrGC + cbInstr;
1824
1825 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1826 || pReturnInstrGC <= pInstrGC
1827 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1828 )
1829 {
1830 /* Not an exit point for function duplication patches */
1831 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1832 && RT_SUCCESS(rc))
1833 {
1834 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1835 rc = VWRN_CONTINUE_RECOMPILE;
1836 }
1837 else
1838 rc = VINF_SUCCESS; //exit point
1839 }
1840 else {
1841 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1842 rc = VERR_PATCHING_REFUSED; //not allowed!!
1843 }
1844 }
1845 break;
1846 }
1847
1848 case OP_POPF:
1849 {
1850 bool fGenerateJmpBack = (pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32);
1851
1852 /* Not an exit point for IDT handler or function replacement patches */
1853 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1854 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1855 fGenerateJmpBack = false;
1856
1857 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->cbInstr, !!(pCpu->prefix & DISPREFIX_OPSIZE), fGenerateJmpBack);
1858 if (RT_SUCCESS(rc))
1859 {
1860 if (fGenerateJmpBack == false)
1861 {
1862 /* Not an exit point for IDT handler or function replacement patches */
1863 rc = VWRN_CONTINUE_RECOMPILE;
1864 }
1865 else
1866 {
1867 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1868 rc = VINF_SUCCESS; /* exit point! */
1869 }
1870 }
1871 break;
1872 }
1873
1874 case OP_PUSHF:
1875 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->prefix & DISPREFIX_OPSIZE));
1876 if (RT_SUCCESS(rc))
1877 rc = VWRN_CONTINUE_RECOMPILE;
1878 break;
1879
1880 case OP_PUSH:
1881 if (pCpu->pCurInstr->param1 == OP_PARM_REG_CS)
1882 {
1883 rc = patmPatchGenPushCS(pVM, pPatch);
1884 if (RT_SUCCESS(rc))
1885 rc = VWRN_CONTINUE_RECOMPILE;
1886 break;
1887 }
1888 goto duplicate_instr;
1889
1890 case OP_IRET:
1891 Log(("IRET at %RRv\n", pCurInstrGC));
1892 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->prefix & DISPREFIX_OPSIZE));
1893 if (RT_SUCCESS(rc))
1894 {
1895 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1896 rc = VINF_SUCCESS; /* exit point by definition */
1897 }
1898 break;
1899
1900 case OP_ILLUD2:
1901 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1902 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1903 if (RT_SUCCESS(rc))
1904 rc = VINF_SUCCESS; /* exit point by definition */
1905 Log(("Illegal opcode (0xf 0xb)\n"));
1906 break;
1907
1908 case OP_CPUID:
1909 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1910 if (RT_SUCCESS(rc))
1911 rc = VWRN_CONTINUE_RECOMPILE;
1912 break;
1913
1914 case OP_STR:
1915 case OP_SLDT:
1916 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1917 if (RT_SUCCESS(rc))
1918 rc = VWRN_CONTINUE_RECOMPILE;
1919 break;
1920
1921 case OP_SGDT:
1922 case OP_SIDT:
1923 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1924 if (RT_SUCCESS(rc))
1925 rc = VWRN_CONTINUE_RECOMPILE;
1926 break;
1927
1928 case OP_RETN:
1929 /* retn is an exit point for function patches */
1930 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
1931 if (RT_SUCCESS(rc))
1932 rc = VINF_SUCCESS; /* exit point by definition */
1933 break;
1934
1935 case OP_SYSEXIT:
1936 /* Duplicate it, so it can be emulated in GC (or fault). */
1937 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1938 if (RT_SUCCESS(rc))
1939 rc = VINF_SUCCESS; /* exit point by definition */
1940 break;
1941
1942 case OP_CALL:
1943 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1944 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1945 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1946 */
1947 Assert(pCpu->param1.cb == 4 || pCpu->param1.cb == 6);
1948 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.cb == 4 /* no far calls! */)
1949 {
1950 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
1951 if (RT_SUCCESS(rc))
1952 {
1953 rc = VWRN_CONTINUE_RECOMPILE;
1954 }
1955 break;
1956 }
1957 goto gen_illegal_instr;
1958
1959 case OP_JMP:
1960 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1961 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1962 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1963 */
1964 Assert(pCpu->param1.cb == 4 || pCpu->param1.cb == 6);
1965 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.cb == 4 /* no far jumps! */)
1966 {
1967 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
1968 if (RT_SUCCESS(rc))
1969 rc = VINF_SUCCESS; /* end of branch */
1970 break;
1971 }
1972 goto gen_illegal_instr;
1973
1974 case OP_INT3:
1975 case OP_INT:
1976 case OP_INTO:
1977 goto gen_illegal_instr;
1978
1979 case OP_MOV_DR:
1980 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1981 if (pCpu->pCurInstr->param2 == OP_PARM_Dd)
1982 {
1983 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
1984 if (RT_SUCCESS(rc))
1985 rc = VWRN_CONTINUE_RECOMPILE;
1986 break;
1987 }
1988 goto duplicate_instr;
1989
1990 case OP_MOV_CR:
1991 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1992 if (pCpu->pCurInstr->param2 == OP_PARM_Cd)
1993 {
1994 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
1995 if (RT_SUCCESS(rc))
1996 rc = VWRN_CONTINUE_RECOMPILE;
1997 break;
1998 }
1999 goto duplicate_instr;
2000
2001 default:
2002 if (pCpu->pCurInstr->optype & (DISOPTYPE_CONTROLFLOW | DISOPTYPE_PRIVILEGED_NOTRAP))
2003 {
2004gen_illegal_instr:
2005 rc = patmPatchGenIllegalInstr(pVM, pPatch);
2006 if (RT_SUCCESS(rc))
2007 rc = VINF_SUCCESS; /* exit point by definition */
2008 }
2009 else
2010 {
2011duplicate_instr:
2012 Log(("patmPatchGenDuplicate\n"));
2013 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
2014 if (RT_SUCCESS(rc))
2015 rc = VWRN_CONTINUE_RECOMPILE;
2016 }
2017 break;
2018 }
2019
2020end:
2021
2022 if ( !fInhibitIRQInstr
2023 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2024 {
2025 int rc2;
2026 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2027
2028 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2029 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
2030 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
2031 {
2032 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
2033
2034 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2035 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
2036 rc = VINF_SUCCESS; /* end of the line */
2037 }
2038 else
2039 {
2040 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
2041 }
2042 if (RT_FAILURE(rc2))
2043 rc = rc2;
2044 }
2045
2046 if (RT_SUCCESS(rc))
2047 {
2048 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
2049 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
2050 && pCurInstrGC + pCpu->cbInstr - pInstrGC >= SIZEOF_NEARJUMP32
2051 && !(pCpu->pCurInstr->optype & DISOPTYPE_RELATIVE_CONTROLFLOW)
2052 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
2053 )
2054 {
2055 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->cbInstr;
2056
2057 // The end marker for this kind of patch is any instruction at a location outside our patch jump
2058 Log(("patmRecompileCallback: end found for single instruction patch at %RRv cbInstr %d\n", pNextInstrGC, pCpu->cbInstr));
2059
2060 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2061 AssertRC(rc);
2062 }
2063 }
2064 return rc;
2065}
2066
2067
2068#ifdef LOG_ENABLED
2069
2070/* Add a disasm jump record (temporary for prevent duplicate analysis)
2071 *
2072 * @param pVM The VM to operate on.
2073 * @param pPatch Patch structure ptr
2074 * @param pInstrGC Guest context pointer to privileged instruction
2075 *
2076 */
2077static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2078{
2079 PAVLPVNODECORE pRec;
2080
2081 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2082 Assert(pRec);
2083 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2084
2085 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2086 Assert(ret);
2087}
2088
2089/**
2090 * Checks if jump target has been analysed before.
2091 *
2092 * @returns VBox status code.
2093 * @param pPatch Patch struct
2094 * @param pInstrGC Jump target
2095 *
2096 */
2097static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2098{
2099 PAVLPVNODECORE pRec;
2100
2101 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2102 if (pRec)
2103 return true;
2104 return false;
2105}
2106
2107/**
2108 * For proper disassembly of the final patch block
2109 *
2110 * @returns VBox status code.
2111 * @param pVM The VM to operate on.
2112 * @param pCpu CPU disassembly state
2113 * @param pInstrGC Guest context pointer to privileged instruction
2114 * @param pCurInstrGC Guest context pointer to the current instruction
2115 * @param pCacheRec Cache record ptr
2116 *
2117 */
2118int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2119{
2120 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2121 NOREF(pInstrGC);
2122
2123 if (pCpu->pCurInstr->opcode == OP_INT3)
2124 {
2125 /* Could be an int3 inserted in a call patch. Check to be sure */
2126 DISCPUSTATE cpu;
2127 RTRCPTR pOrgJumpGC;
2128
2129 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2130
2131 { /* Force pOrgJumpHC out of scope after using it */
2132 uint8_t *pOrgJumpHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2133
2134 bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL);
2135 if (!disret || cpu.pCurInstr->opcode != OP_CALL || cpu.param1.cb != 4 /* only near calls */)
2136 return VINF_SUCCESS;
2137 }
2138 return VWRN_CONTINUE_ANALYSIS;
2139 }
2140
2141 if ( pCpu->pCurInstr->opcode == OP_ILLUD2
2142 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2143 {
2144 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2145 return VWRN_CONTINUE_ANALYSIS;
2146 }
2147
2148 if ( (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2149 || pCpu->pCurInstr->opcode == OP_INT
2150 || pCpu->pCurInstr->opcode == OP_IRET
2151 || pCpu->pCurInstr->opcode == OP_RETN
2152 || pCpu->pCurInstr->opcode == OP_RETF
2153 )
2154 {
2155 return VINF_SUCCESS;
2156 }
2157
2158 if (pCpu->pCurInstr->opcode == OP_ILLUD2)
2159 return VINF_SUCCESS;
2160
2161 return VWRN_CONTINUE_ANALYSIS;
2162}
2163
2164
2165/**
2166 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2167 *
2168 * @returns VBox status code.
2169 * @param pVM The VM to operate on.
2170 * @param pInstrGC Guest context pointer to the initial privileged instruction
2171 * @param pCurInstrGC Guest context pointer to the current instruction
2172 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2173 * @param pCacheRec Cache record ptr
2174 *
2175 */
2176int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2177{
2178 DISCPUSTATE cpu;
2179 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2180 int rc = VWRN_CONTINUE_ANALYSIS;
2181 uint32_t cbInstr, delta;
2182 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2183 bool disret;
2184 char szOutput[256];
2185
2186 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2187
2188 /* We need this to determine branch targets (and for disassembling). */
2189 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2190
2191 while (rc == VWRN_CONTINUE_ANALYSIS)
2192 {
2193 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2194 if (pCurInstrHC == NULL)
2195 {
2196 rc = VERR_PATCHING_REFUSED;
2197 goto end;
2198 }
2199
2200 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_RAWCODE,
2201 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2202 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2203 {
2204 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2205
2206 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2207 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2208 else
2209 Log(("DIS %s", szOutput));
2210
2211 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2212 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2213 {
2214 rc = VINF_SUCCESS;
2215 goto end;
2216 }
2217 }
2218 else
2219 Log(("DIS: %s", szOutput));
2220
2221 if (disret == false)
2222 {
2223 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2224 rc = VINF_SUCCESS;
2225 goto end;
2226 }
2227
2228 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2229 if (rc != VWRN_CONTINUE_ANALYSIS) {
2230 break; //done!
2231 }
2232
2233 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2234 if ( (cpu.pCurInstr->optype & DISOPTYPE_CONTROLFLOW)
2235 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2236 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2237 )
2238 {
2239 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2240 RTRCPTR pOrgTargetGC;
2241
2242 if (pTargetGC == 0)
2243 {
2244 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.fUse));
2245 rc = VERR_PATCHING_REFUSED;
2246 break;
2247 }
2248
2249 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2250 {
2251 //jump back to guest code
2252 rc = VINF_SUCCESS;
2253 goto end;
2254 }
2255 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2256
2257 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2258 {
2259 rc = VINF_SUCCESS;
2260 goto end;
2261 }
2262
2263 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2264 {
2265 /* New jump, let's check it. */
2266 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2267
2268 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2269 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2270 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2271
2272 if (rc != VINF_SUCCESS) {
2273 break; //done!
2274 }
2275 }
2276 if (cpu.pCurInstr->opcode == OP_JMP)
2277 {
2278 /* Unconditional jump; return to caller. */
2279 rc = VINF_SUCCESS;
2280 goto end;
2281 }
2282
2283 rc = VWRN_CONTINUE_ANALYSIS;
2284 }
2285 pCurInstrGC += cbInstr;
2286 }
2287end:
2288 return rc;
2289}
2290
2291/**
2292 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2293 *
2294 * @returns VBox status code.
2295 * @param pVM The VM to operate on.
2296 * @param pInstrGC Guest context pointer to the initial privileged instruction
2297 * @param pCurInstrGC Guest context pointer to the current instruction
2298 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2299 * @param pCacheRec Cache record ptr
2300 *
2301 */
2302int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2303{
2304 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2305
2306 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2307 /* Free all disasm jump records. */
2308 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2309 return rc;
2310}
2311
2312#endif /* LOG_ENABLED */
2313
2314/**
2315 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2316 * If so, this patch is permanently disabled.
2317 *
2318 * @param pVM The VM to operate on.
2319 * @param pInstrGC Guest context pointer to instruction
2320 * @param pConflictGC Guest context pointer to check
2321 *
2322 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2323 *
2324 */
2325VMMR3DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2326{
2327 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2328 if (pTargetPatch)
2329 {
2330 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2331 }
2332 return VERR_PATCH_NO_CONFLICT;
2333}
2334
2335/**
2336 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2337 *
2338 * @returns VBox status code.
2339 * @param pVM The VM to operate on.
2340 * @param pInstrGC Guest context pointer to privileged instruction
2341 * @param pCurInstrGC Guest context pointer to the current instruction
2342 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2343 * @param pCacheRec Cache record ptr
2344 *
2345 */
2346static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2347{
2348 DISCPUSTATE cpu;
2349 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2350 int rc = VWRN_CONTINUE_ANALYSIS;
2351 uint32_t cbInstr;
2352 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2353 bool disret;
2354#ifdef LOG_ENABLED
2355 char szOutput[256];
2356#endif
2357
2358 while (rc == VWRN_CONTINUE_RECOMPILE)
2359 {
2360 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2361 if (pCurInstrHC == NULL)
2362 {
2363 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2364 goto end;
2365 }
2366#ifdef LOG_ENABLED
2367 disret = patmR3DisInstrToStr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE,
2368 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2369 Log(("Recompile: %s", szOutput));
2370#else
2371 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
2372#endif
2373 if (disret == false)
2374 {
2375 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2376
2377 /* Add lookup record for patch to guest address translation */
2378 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2379 patmPatchGenIllegalInstr(pVM, pPatch);
2380 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2381 goto end;
2382 }
2383
2384 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2385 if (rc != VWRN_CONTINUE_RECOMPILE)
2386 {
2387 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2388 if ( rc == VINF_SUCCESS
2389 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2390 {
2391 DISCPUSTATE cpunext;
2392 uint32_t opsizenext;
2393 uint8_t *pNextInstrHC;
2394 RTRCPTR pNextInstrGC = pCurInstrGC + cbInstr;
2395
2396 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2397
2398 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2399 * Recompile the next instruction as well
2400 */
2401 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2402 if (pNextInstrHC == NULL)
2403 {
2404 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2405 goto end;
2406 }
2407 disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpunext, &opsizenext);
2408 if (disret == false)
2409 {
2410 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2411 goto end;
2412 }
2413 switch(cpunext.pCurInstr->opcode)
2414 {
2415 case OP_IRET: /* inhibit cleared in generated code */
2416 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2417 case OP_HLT:
2418 break; /* recompile these */
2419
2420 default:
2421 if (cpunext.pCurInstr->optype & DISOPTYPE_CONTROLFLOW)
2422 {
2423 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2424
2425 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2426 AssertRC(rc);
2427 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2428 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2429 }
2430 break;
2431 }
2432
2433 /* Note: after a cli we must continue to a proper exit point */
2434 if (cpunext.pCurInstr->opcode != OP_CLI)
2435 {
2436 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2437 if (RT_SUCCESS(rc))
2438 {
2439 rc = VINF_SUCCESS;
2440 goto end;
2441 }
2442 break;
2443 }
2444 else
2445 rc = VWRN_CONTINUE_RECOMPILE;
2446 }
2447 else
2448 break; /* done! */
2449 }
2450
2451 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2452
2453
2454 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2455 if ( (cpu.pCurInstr->optype & DISOPTYPE_CONTROLFLOW)
2456 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2457 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2458 )
2459 {
2460 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2461 if (addr == 0)
2462 {
2463 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.fUse));
2464 rc = VERR_PATCHING_REFUSED;
2465 break;
2466 }
2467
2468 Log(("Jump encountered target %RRv\n", addr));
2469
2470 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2471 if (!(cpu.pCurInstr->optype & DISOPTYPE_UNCOND_CONTROLFLOW))
2472 {
2473 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2474 /* First we need to finish this linear code stream until the next exit point. */
2475 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+cbInstr, pfnPATMR3Recompile, pCacheRec);
2476 if (RT_FAILURE(rc))
2477 {
2478 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2479 break; //fatal error
2480 }
2481 }
2482
2483 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2484 {
2485 /* New code; let's recompile it. */
2486 Log(("patmRecompileCodeStream continue with jump\n"));
2487
2488 /*
2489 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2490 * this patch so we can continue our analysis
2491 *
2492 * We rely on CSAM to detect and resolve conflicts
2493 */
2494 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, addr);
2495 if(pTargetPatch)
2496 {
2497 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2498 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2499 }
2500
2501 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2502 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2503 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2504
2505 if(pTargetPatch)
2506 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2507
2508 if (RT_FAILURE(rc))
2509 {
2510 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2511 break; //done!
2512 }
2513 }
2514 /* Always return to caller here; we're done! */
2515 rc = VINF_SUCCESS;
2516 goto end;
2517 }
2518 else
2519 if (cpu.pCurInstr->optype & DISOPTYPE_UNCOND_CONTROLFLOW)
2520 {
2521 rc = VINF_SUCCESS;
2522 goto end;
2523 }
2524 pCurInstrGC += cbInstr;
2525 }
2526end:
2527 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2528 return rc;
2529}
2530
2531
2532/**
2533 * Generate the jump from guest to patch code
2534 *
2535 * @returns VBox status code.
2536 * @param pVM The VM to operate on.
2537 * @param pPatch Patch record
2538 * @param pCacheRec Guest translation lookup cache record
2539 */
2540static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2541{
2542 uint8_t temp[8];
2543 uint8_t *pPB;
2544 int rc;
2545
2546 Assert(pPatch->cbPatchJump <= sizeof(temp));
2547 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2548
2549 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2550 Assert(pPB);
2551
2552#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2553 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2554 {
2555 Assert(pPatch->pPatchJumpDestGC);
2556
2557 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2558 {
2559 // jmp [PatchCode]
2560 if (fAddFixup)
2561 {
2562 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2563 {
2564 Log(("Relocation failed for the jump in the guest code!!\n"));
2565 return VERR_PATCHING_REFUSED;
2566 }
2567 }
2568
2569 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2570 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2571 }
2572 else
2573 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2574 {
2575 // jmp [PatchCode]
2576 if (fAddFixup)
2577 {
2578 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2579 {
2580 Log(("Relocation failed for the jump in the guest code!!\n"));
2581 return VERR_PATCHING_REFUSED;
2582 }
2583 }
2584
2585 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2586 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2587 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2588 }
2589 else
2590 {
2591 Assert(0);
2592 return VERR_PATCHING_REFUSED;
2593 }
2594 }
2595 else
2596#endif
2597 {
2598 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2599
2600 // jmp [PatchCode]
2601 if (fAddFixup)
2602 {
2603 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2604 {
2605 Log(("Relocation failed for the jump in the guest code!!\n"));
2606 return VERR_PATCHING_REFUSED;
2607 }
2608 }
2609 temp[0] = 0xE9; //jmp
2610 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2611 }
2612 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2613 AssertRC(rc);
2614
2615 if (rc == VINF_SUCCESS)
2616 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2617
2618 return rc;
2619}
2620
2621/**
2622 * Remove the jump from guest to patch code
2623 *
2624 * @returns VBox status code.
2625 * @param pVM The VM to operate on.
2626 * @param pPatch Patch record
2627 */
2628static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2629{
2630#ifdef DEBUG
2631 DISCPUSTATE cpu;
2632 char szOutput[256];
2633 uint32_t cbInstr, i = 0;
2634 bool disret;
2635
2636 while (i < pPatch->cbPrivInstr)
2637 {
2638 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2639 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2640 if (disret == false)
2641 break;
2642
2643 Log(("Org patch jump: %s", szOutput));
2644 Assert(cbInstr);
2645 i += cbInstr;
2646 }
2647#endif
2648
2649 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2650 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2651#ifdef DEBUG
2652 if (rc == VINF_SUCCESS)
2653 {
2654 i = 0;
2655 while (i < pPatch->cbPrivInstr)
2656 {
2657 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
2658 &cpu, &cbInstr, szOutput, sizeof(szOutput));
2659 if (disret == false)
2660 break;
2661
2662 Log(("Org instr: %s", szOutput));
2663 Assert(cbInstr);
2664 i += cbInstr;
2665 }
2666 }
2667#endif
2668 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2669 return rc;
2670}
2671
2672/**
2673 * Generate the call from guest to patch code
2674 *
2675 * @returns VBox status code.
2676 * @param pVM The VM to operate on.
2677 * @param pPatch Patch record
2678 * @param pInstrHC HC address where to insert the jump
2679 * @param pCacheRec Guest translation cache record
2680 */
2681static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2682{
2683 uint8_t temp[8];
2684 uint8_t *pPB;
2685 int rc;
2686
2687 Assert(pPatch->cbPatchJump <= sizeof(temp));
2688
2689 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2690 Assert(pPB);
2691
2692 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2693
2694 // jmp [PatchCode]
2695 if (fAddFixup)
2696 {
2697 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2698 {
2699 Log(("Relocation failed for the jump in the guest code!!\n"));
2700 return VERR_PATCHING_REFUSED;
2701 }
2702 }
2703
2704 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2705 temp[0] = pPatch->aPrivInstr[0];
2706 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2707
2708 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2709 AssertRC(rc);
2710
2711 return rc;
2712}
2713
2714
2715/**
2716 * Patch cli/sti pushf/popf instruction block at specified location
2717 *
2718 * @returns VBox status code.
2719 * @param pVM The VM to operate on.
2720 * @param pInstrGC Guest context point to privileged instruction
2721 * @param pInstrHC Host context point to privileged instruction
2722 * @param uOpcode Instruction opcode
2723 * @param uOpSize Size of starting instruction
2724 * @param pPatchRec Patch record
2725 *
2726 * @note returns failure if patching is not allowed or possible
2727 *
2728 */
2729VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2730 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2731{
2732 PPATCHINFO pPatch = &pPatchRec->patch;
2733 int rc = VERR_PATCHING_REFUSED;
2734 uint32_t orgOffsetPatchMem = ~0;
2735 RTRCPTR pInstrStart;
2736 bool fInserted;
2737 NOREF(pInstrHC); NOREF(uOpSize);
2738
2739 /* Save original offset (in case of failures later on) */
2740 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2741 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2742
2743 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2744 switch (uOpcode)
2745 {
2746 case OP_MOV:
2747 break;
2748
2749 case OP_CLI:
2750 case OP_PUSHF:
2751 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2752 /* Note: special precautions are taken when disabling and enabling such patches. */
2753 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2754 break;
2755
2756 default:
2757 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2758 {
2759 AssertMsg(0, ("PATMR3PatchBlock: Invalid opcode %x\n", uOpcode));
2760 return VERR_INVALID_PARAMETER;
2761 }
2762 }
2763
2764 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2765 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2766
2767 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2768 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2769 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2770 )
2771 {
2772 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2773 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2774 rc = VERR_PATCHING_REFUSED;
2775 goto failure;
2776 }
2777
2778 pPatch->nrPatch2GuestRecs = 0;
2779 pInstrStart = pInstrGC;
2780
2781#ifdef PATM_ENABLE_CALL
2782 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2783#endif
2784
2785 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2786 pPatch->uCurPatchOffset = 0;
2787
2788 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2789 {
2790 Assert(pPatch->flags & PATMFL_INTHANDLER);
2791
2792 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2793 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2794 if (RT_FAILURE(rc))
2795 goto failure;
2796 }
2797
2798 /***************************************************************************************************************************/
2799 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2800 /***************************************************************************************************************************/
2801#ifdef VBOX_WITH_STATISTICS
2802 if (!(pPatch->flags & PATMFL_SYSENTER))
2803 {
2804 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2805 if (RT_FAILURE(rc))
2806 goto failure;
2807 }
2808#endif
2809
2810 PATMP2GLOOKUPREC cacheRec;
2811 RT_ZERO(cacheRec);
2812 cacheRec.pPatch = pPatch;
2813
2814 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2815 /* Free leftover lock if any. */
2816 if (cacheRec.Lock.pvMap)
2817 {
2818 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2819 cacheRec.Lock.pvMap = NULL;
2820 }
2821 if (rc != VINF_SUCCESS)
2822 {
2823 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2824 goto failure;
2825 }
2826
2827 /* Calculated during analysis. */
2828 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2829 {
2830 /* Most likely cause: we encountered an illegal instruction very early on. */
2831 /** @todo could turn it into an int3 callable patch. */
2832 Log(("PATMR3PatchBlock: patch block too small -> refuse\n"));
2833 rc = VERR_PATCHING_REFUSED;
2834 goto failure;
2835 }
2836
2837 /* size of patch block */
2838 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2839
2840
2841 /* Update free pointer in patch memory. */
2842 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2843 /* Round to next 8 byte boundary. */
2844 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2845
2846 /*
2847 * Insert into patch to guest lookup tree
2848 */
2849 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2850 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2851 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2852 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2853 if (!fInserted)
2854 {
2855 rc = VERR_PATCHING_REFUSED;
2856 goto failure;
2857 }
2858
2859 /* Note that patmr3SetBranchTargets can install additional patches!! */
2860 rc = patmr3SetBranchTargets(pVM, pPatch);
2861 if (rc != VINF_SUCCESS)
2862 {
2863 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2864 goto failure;
2865 }
2866
2867#ifdef LOG_ENABLED
2868 Log(("Patch code ----------------------------------------------------------\n"));
2869 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2870 /* Free leftover lock if any. */
2871 if (cacheRec.Lock.pvMap)
2872 {
2873 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2874 cacheRec.Lock.pvMap = NULL;
2875 }
2876 Log(("Patch code ends -----------------------------------------------------\n"));
2877#endif
2878
2879 /* make a copy of the guest code bytes that will be overwritten */
2880 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2881
2882 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2883 AssertRC(rc);
2884
2885 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2886 {
2887 /*uint8_t bASMInt3 = 0xCC; - unused */
2888
2889 Log(("PATMR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2890 /* Replace first opcode byte with 'int 3'. */
2891 rc = patmActivateInt3Patch(pVM, pPatch);
2892 if (RT_FAILURE(rc))
2893 goto failure;
2894
2895 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2896 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2897
2898 pPatch->flags &= ~PATMFL_INSTR_HINT;
2899 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2900 }
2901 else
2902 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2903 {
2904 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2905 /* now insert a jump in the guest code */
2906 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2907 AssertRC(rc);
2908 if (RT_FAILURE(rc))
2909 goto failure;
2910
2911 }
2912
2913 PATM_LOG_RAW_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
2914
2915 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2916 pPatch->pTempInfo->nrIllegalInstr = 0;
2917
2918 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2919
2920 pPatch->uState = PATCH_ENABLED;
2921 return VINF_SUCCESS;
2922
2923failure:
2924 if (pPatchRec->CoreOffset.Key)
2925 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
2926
2927 patmEmptyTree(pVM, &pPatch->FixupTree);
2928 pPatch->nrFixups = 0;
2929
2930 patmEmptyTree(pVM, &pPatch->JumpTree);
2931 pPatch->nrJumpRecs = 0;
2932
2933 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2934 pPatch->pTempInfo->nrIllegalInstr = 0;
2935
2936 /* Turn this cli patch into a dummy. */
2937 pPatch->uState = PATCH_REFUSED;
2938 pPatch->pPatchBlockOffset = 0;
2939
2940 // Give back the patch memory we no longer need
2941 Assert(orgOffsetPatchMem != (uint32_t)~0);
2942 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2943
2944 return rc;
2945}
2946
2947/**
2948 * Patch IDT handler
2949 *
2950 * @returns VBox status code.
2951 * @param pVM The VM to operate on.
2952 * @param pInstrGC Guest context point to privileged instruction
2953 * @param uOpSize Size of starting instruction
2954 * @param pPatchRec Patch record
2955 * @param pCacheRec Cache record ptr
2956 *
2957 * @note returns failure if patching is not allowed or possible
2958 *
2959 */
2960static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
2961{
2962 PPATCHINFO pPatch = &pPatchRec->patch;
2963 bool disret;
2964 DISCPUSTATE cpuPush, cpuJmp;
2965 uint32_t cbInstr;
2966 RTRCPTR pCurInstrGC = pInstrGC;
2967 uint8_t *pCurInstrHC, *pInstrHC;
2968 uint32_t orgOffsetPatchMem = ~0;
2969
2970 pInstrHC = pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2971 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
2972
2973 /*
2974 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
2975 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
2976 * condition here and only patch the common entypoint once.
2977 */
2978 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &cbInstr);
2979 Assert(disret);
2980 if (disret && cpuPush.pCurInstr->opcode == OP_PUSH)
2981 {
2982 RTRCPTR pJmpInstrGC;
2983 int rc;
2984 pCurInstrGC += cbInstr;
2985
2986 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuJmp, &cbInstr);
2987 if ( disret
2988 && cpuJmp.pCurInstr->opcode == OP_JMP
2989 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
2990 )
2991 {
2992 bool fInserted;
2993 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2994 if (pJmpPatch == 0)
2995 {
2996 /* Patch it first! */
2997 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
2998 if (rc != VINF_SUCCESS)
2999 goto failure;
3000 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
3001 Assert(pJmpPatch);
3002 }
3003 if (pJmpPatch->patch.uState != PATCH_ENABLED)
3004 goto failure;
3005
3006 /* save original offset (in case of failures later on) */
3007 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3008
3009 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3010 pPatch->uCurPatchOffset = 0;
3011 pPatch->nrPatch2GuestRecs = 0;
3012
3013#ifdef VBOX_WITH_STATISTICS
3014 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3015 if (RT_FAILURE(rc))
3016 goto failure;
3017#endif
3018
3019 /* Install fake cli patch (to clear the virtual IF) */
3020 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
3021 if (RT_FAILURE(rc))
3022 goto failure;
3023
3024 /* Add lookup record for patch to guest address translation (for the push) */
3025 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
3026
3027 /* Duplicate push. */
3028 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
3029 if (RT_FAILURE(rc))
3030 goto failure;
3031
3032 /* Generate jump to common entrypoint. */
3033 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
3034 if (RT_FAILURE(rc))
3035 goto failure;
3036
3037 /* size of patch block */
3038 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3039
3040 /* Update free pointer in patch memory. */
3041 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3042 /* Round to next 8 byte boundary */
3043 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3044
3045 /* There's no jump from guest to patch code. */
3046 pPatch->cbPatchJump = 0;
3047
3048
3049#ifdef LOG_ENABLED
3050 Log(("Patch code ----------------------------------------------------------\n"));
3051 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3052 Log(("Patch code ends -----------------------------------------------------\n"));
3053#endif
3054 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3055
3056 /*
3057 * Insert into patch to guest lookup tree
3058 */
3059 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3060 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3061 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3062 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3063
3064 pPatch->uState = PATCH_ENABLED;
3065
3066 return VINF_SUCCESS;
3067 }
3068 }
3069failure:
3070 /* Give back the patch memory we no longer need */
3071 if (orgOffsetPatchMem != (uint32_t)~0)
3072 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3073
3074 return PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3075}
3076
3077/**
3078 * Install a trampoline to call a guest trap handler directly
3079 *
3080 * @returns VBox status code.
3081 * @param pVM The VM to operate on.
3082 * @param pInstrGC Guest context point to privileged instruction
3083 * @param pPatchRec Patch record
3084 * @param pCacheRec Cache record ptr
3085 *
3086 */
3087static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3088{
3089 PPATCHINFO pPatch = &pPatchRec->patch;
3090 int rc = VERR_PATCHING_REFUSED;
3091 uint32_t orgOffsetPatchMem = ~0;
3092 bool fInserted;
3093
3094 // save original offset (in case of failures later on)
3095 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3096
3097 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3098 pPatch->uCurPatchOffset = 0;
3099 pPatch->nrPatch2GuestRecs = 0;
3100
3101#ifdef VBOX_WITH_STATISTICS
3102 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3103 if (RT_FAILURE(rc))
3104 goto failure;
3105#endif
3106
3107 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3108 if (RT_FAILURE(rc))
3109 goto failure;
3110
3111 /* size of patch block */
3112 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3113
3114 /* Update free pointer in patch memory. */
3115 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3116 /* Round to next 8 byte boundary */
3117 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3118
3119 /* There's no jump from guest to patch code. */
3120 pPatch->cbPatchJump = 0;
3121
3122#ifdef LOG_ENABLED
3123 Log(("Patch code ----------------------------------------------------------\n"));
3124 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3125 Log(("Patch code ends -----------------------------------------------------\n"));
3126#endif
3127 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "TRAP handler");
3128 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3129
3130 /*
3131 * Insert into patch to guest lookup tree
3132 */
3133 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3134 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3135 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3136 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3137
3138 pPatch->uState = PATCH_ENABLED;
3139 return VINF_SUCCESS;
3140
3141failure:
3142 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3143
3144 /* Turn this cli patch into a dummy. */
3145 pPatch->uState = PATCH_REFUSED;
3146 pPatch->pPatchBlockOffset = 0;
3147
3148 /* Give back the patch memory we no longer need */
3149 Assert(orgOffsetPatchMem != (uint32_t)~0);
3150 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3151
3152 return rc;
3153}
3154
3155
3156#ifdef LOG_ENABLED
3157/**
3158 * Check if the instruction is patched as a common idt handler
3159 *
3160 * @returns true or false
3161 * @param pVM The VM to operate on.
3162 * @param pInstrGC Guest context point to the instruction
3163 *
3164 */
3165static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3166{
3167 PPATMPATCHREC pRec;
3168
3169 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3170 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3171 return true;
3172 return false;
3173}
3174#endif //DEBUG
3175
3176
3177/**
3178 * Duplicates a complete function
3179 *
3180 * @returns VBox status code.
3181 * @param pVM The VM to operate on.
3182 * @param pInstrGC Guest context point to privileged instruction
3183 * @param pPatchRec Patch record
3184 * @param pCacheRec Cache record ptr
3185 *
3186 */
3187static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3188{
3189 PPATCHINFO pPatch = &pPatchRec->patch;
3190 int rc = VERR_PATCHING_REFUSED;
3191 DISCPUSTATE cpu;
3192 uint32_t orgOffsetPatchMem = ~0;
3193 bool fInserted;
3194
3195 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3196 /* Save original offset (in case of failures later on). */
3197 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3198
3199 /* We will not go on indefinitely with call instruction handling. */
3200 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3201 {
3202 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3203 return VERR_PATCHING_REFUSED;
3204 }
3205
3206 pVM->patm.s.ulCallDepth++;
3207
3208#ifdef PATM_ENABLE_CALL
3209 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3210#endif
3211
3212 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3213
3214 pPatch->nrPatch2GuestRecs = 0;
3215 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3216 pPatch->uCurPatchOffset = 0;
3217
3218 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
3219
3220 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3221 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3222 if (RT_FAILURE(rc))
3223 goto failure;
3224
3225#ifdef VBOX_WITH_STATISTICS
3226 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3227 if (RT_FAILURE(rc))
3228 goto failure;
3229#endif
3230
3231 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3232 if (rc != VINF_SUCCESS)
3233 {
3234 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3235 goto failure;
3236 }
3237
3238 //size of patch block
3239 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3240
3241 //update free pointer in patch memory
3242 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3243 /* Round to next 8 byte boundary. */
3244 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3245
3246 pPatch->uState = PATCH_ENABLED;
3247
3248 /*
3249 * Insert into patch to guest lookup tree
3250 */
3251 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3252 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3253 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3254 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3255 if (!fInserted)
3256 {
3257 rc = VERR_PATCHING_REFUSED;
3258 goto failure;
3259 }
3260
3261 /* Note that patmr3SetBranchTargets can install additional patches!! */
3262 rc = patmr3SetBranchTargets(pVM, pPatch);
3263 if (rc != VINF_SUCCESS)
3264 {
3265 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3266 goto failure;
3267 }
3268
3269#ifdef LOG_ENABLED
3270 Log(("Patch code ----------------------------------------------------------\n"));
3271 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3272 Log(("Patch code ends -----------------------------------------------------\n"));
3273#endif
3274
3275 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3276
3277 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3278 pPatch->pTempInfo->nrIllegalInstr = 0;
3279
3280 pVM->patm.s.ulCallDepth--;
3281 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3282 return VINF_SUCCESS;
3283
3284failure:
3285 if (pPatchRec->CoreOffset.Key)
3286 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3287
3288 patmEmptyTree(pVM, &pPatch->FixupTree);
3289 pPatch->nrFixups = 0;
3290
3291 patmEmptyTree(pVM, &pPatch->JumpTree);
3292 pPatch->nrJumpRecs = 0;
3293
3294 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3295 pPatch->pTempInfo->nrIllegalInstr = 0;
3296
3297 /* Turn this cli patch into a dummy. */
3298 pPatch->uState = PATCH_REFUSED;
3299 pPatch->pPatchBlockOffset = 0;
3300
3301 // Give back the patch memory we no longer need
3302 Assert(orgOffsetPatchMem != (uint32_t)~0);
3303 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3304
3305 pVM->patm.s.ulCallDepth--;
3306 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3307 return rc;
3308}
3309
3310/**
3311 * Creates trampoline code to jump inside an existing patch
3312 *
3313 * @returns VBox status code.
3314 * @param pVM The VM to operate on.
3315 * @param pInstrGC Guest context point to privileged instruction
3316 * @param pPatchRec Patch record
3317 *
3318 */
3319static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3320{
3321 PPATCHINFO pPatch = &pPatchRec->patch;
3322 RTRCPTR pPage, pPatchTargetGC = 0;
3323 uint32_t orgOffsetPatchMem = ~0;
3324 int rc = VERR_PATCHING_REFUSED;
3325 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3326 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3327 bool fInserted = false;
3328
3329 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3330 /* Save original offset (in case of failures later on). */
3331 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3332
3333 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3334 /** @todo we already checked this before */
3335 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3336
3337 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3338 if (pPatchPage)
3339 {
3340 uint32_t i;
3341
3342 for (i=0;i<pPatchPage->cCount;i++)
3343 {
3344 if (pPatchPage->aPatch[i])
3345 {
3346 pPatchToJmp = pPatchPage->aPatch[i];
3347
3348 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3349 && pPatchToJmp->uState == PATCH_ENABLED)
3350 {
3351 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3352 if (pPatchTargetGC)
3353 {
3354 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3355 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3356 Assert(pPatchToGuestRec);
3357
3358 pPatchToGuestRec->fJumpTarget = true;
3359 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3360 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3361 break;
3362 }
3363 }
3364 }
3365 }
3366 }
3367 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3368
3369 /*
3370 * Only record the trampoline patch if this is the first patch to the target
3371 * or we recorded other patches already.
3372 * The goal is to refuse refreshing function duplicates if the guest
3373 * modifies code after a saved state was loaded because it is not possible
3374 * to save the relation between trampoline and target without changing the
3375 * saved satte version.
3376 */
3377 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3378 || pPatchToJmp->pTrampolinePatchesHead)
3379 {
3380 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3381 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3382 if (!pTrampRec)
3383 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3384
3385 pTrampRec->pPatchTrampoline = pPatchRec;
3386 }
3387
3388 pPatch->nrPatch2GuestRecs = 0;
3389 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3390 pPatch->uCurPatchOffset = 0;
3391
3392 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3393 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3394 if (RT_FAILURE(rc))
3395 goto failure;
3396
3397#ifdef VBOX_WITH_STATISTICS
3398 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3399 if (RT_FAILURE(rc))
3400 goto failure;
3401#endif
3402
3403 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3404 if (RT_FAILURE(rc))
3405 goto failure;
3406
3407 /*
3408 * Insert into patch to guest lookup tree
3409 */
3410 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3411 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3412 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3413 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3414 if (!fInserted)
3415 {
3416 rc = VERR_PATCHING_REFUSED;
3417 goto failure;
3418 }
3419
3420 /* size of patch block */
3421 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3422
3423 /* Update free pointer in patch memory. */
3424 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3425 /* Round to next 8 byte boundary */
3426 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3427
3428 /* There's no jump from guest to patch code. */
3429 pPatch->cbPatchJump = 0;
3430
3431 /* Enable the patch. */
3432 pPatch->uState = PATCH_ENABLED;
3433 /* We allow this patch to be called as a function. */
3434 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3435
3436 if (pTrampRec)
3437 {
3438 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3439 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3440 }
3441 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3442 return VINF_SUCCESS;
3443
3444failure:
3445 if (pPatchRec->CoreOffset.Key)
3446 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3447
3448 patmEmptyTree(pVM, &pPatch->FixupTree);
3449 pPatch->nrFixups = 0;
3450
3451 patmEmptyTree(pVM, &pPatch->JumpTree);
3452 pPatch->nrJumpRecs = 0;
3453
3454 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3455 pPatch->pTempInfo->nrIllegalInstr = 0;
3456
3457 /* Turn this cli patch into a dummy. */
3458 pPatch->uState = PATCH_REFUSED;
3459 pPatch->pPatchBlockOffset = 0;
3460
3461 // Give back the patch memory we no longer need
3462 Assert(orgOffsetPatchMem != (uint32_t)~0);
3463 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3464
3465 if (pTrampRec)
3466 MMR3HeapFree(pTrampRec);
3467
3468 return rc;
3469}
3470
3471
3472/**
3473 * Patch branch target function for call/jump at specified location.
3474 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3475 *
3476 * @returns VBox status code.
3477 * @param pVM The VM to operate on.
3478 * @param pCtx Guest context
3479 *
3480 */
3481VMMR3DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3482{
3483 RTRCPTR pBranchTarget, pPage;
3484 int rc;
3485 RTRCPTR pPatchTargetGC = 0;
3486
3487 pBranchTarget = pCtx->edx;
3488 pBranchTarget = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3489
3490 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3491 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3492
3493 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3494 if (pPatchPage)
3495 {
3496 uint32_t i;
3497
3498 for (i=0;i<pPatchPage->cCount;i++)
3499 {
3500 if (pPatchPage->aPatch[i])
3501 {
3502 PPATCHINFO pPatch = pPatchPage->aPatch[i];
3503
3504 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3505 && pPatch->uState == PATCH_ENABLED)
3506 {
3507 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3508 if (pPatchTargetGC)
3509 {
3510 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3511 break;
3512 }
3513 }
3514 }
3515 }
3516 }
3517
3518 if (pPatchTargetGC)
3519 {
3520 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3521 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3522 }
3523 else
3524 {
3525 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3526 }
3527
3528 if (rc == VINF_SUCCESS)
3529 {
3530 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3531 Assert(pPatchTargetGC);
3532 }
3533
3534 if (pPatchTargetGC)
3535 {
3536 pCtx->eax = pPatchTargetGC;
3537 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3538 }
3539 else
3540 {
3541 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3542 pCtx->eax = 0;
3543 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3544 }
3545 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3546 rc = PATMAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3547 AssertRC(rc);
3548
3549 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3550 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3551 return VINF_SUCCESS;
3552}
3553
3554/**
3555 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3556 *
3557 * @returns VBox status code.
3558 * @param pVM The VM to operate on.
3559 * @param pCpu Disassembly CPU structure ptr
3560 * @param pInstrGC Guest context point to privileged instruction
3561 * @param pCacheRec Cache record ptr
3562 *
3563 */
3564static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3565{
3566 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3567 int rc = VERR_PATCHING_REFUSED;
3568 DISCPUSTATE cpu;
3569 RTRCPTR pTargetGC;
3570 PPATMPATCHREC pPatchFunction;
3571 uint32_t cbInstr;
3572 bool disret;
3573
3574 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3575 Assert((pCpu->pCurInstr->opcode == OP_CALL || pCpu->pCurInstr->opcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32);
3576
3577 if ((pCpu->pCurInstr->opcode != OP_CALL && pCpu->pCurInstr->opcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32)
3578 {
3579 rc = VERR_PATCHING_REFUSED;
3580 goto failure;
3581 }
3582
3583 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3584 if (pTargetGC == 0)
3585 {
3586 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.fUse));
3587 rc = VERR_PATCHING_REFUSED;
3588 goto failure;
3589 }
3590
3591 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3592 if (pPatchFunction == NULL)
3593 {
3594 for(;;)
3595 {
3596 /* It could be an indirect call (call -> jmp dest).
3597 * Note that it's dangerous to assume the jump will never change...
3598 */
3599 uint8_t *pTmpInstrHC;
3600
3601 pTmpInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3602 Assert(pTmpInstrHC);
3603 if (pTmpInstrHC == 0)
3604 break;
3605
3606 disret = patmR3DisInstr(pVM, pPatch, pTargetGC, pTmpInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
3607 if (disret == false || cpu.pCurInstr->opcode != OP_JMP)
3608 break;
3609
3610 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3611 if (pTargetGC == 0)
3612 {
3613 break;
3614 }
3615
3616 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3617 break;
3618 }
3619 if (pPatchFunction == 0)
3620 {
3621 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3622 rc = VERR_PATCHING_REFUSED;
3623 goto failure;
3624 }
3625 }
3626
3627 // make a copy of the guest code bytes that will be overwritten
3628 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3629
3630 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3631 AssertRC(rc);
3632
3633 /* Now replace the original call in the guest code */
3634 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3635 AssertRC(rc);
3636 if (RT_FAILURE(rc))
3637 goto failure;
3638
3639 /* Lowest and highest address for write monitoring. */
3640 pPatch->pInstrGCLowest = pInstrGC;
3641 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3642 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "Call");
3643
3644 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3645
3646 pPatch->uState = PATCH_ENABLED;
3647 return VINF_SUCCESS;
3648
3649failure:
3650 /* Turn this patch into a dummy. */
3651 pPatch->uState = PATCH_REFUSED;
3652
3653 return rc;
3654}
3655
3656/**
3657 * Replace the address in an MMIO instruction with the cached version.
3658 *
3659 * @returns VBox status code.
3660 * @param pVM The VM to operate on.
3661 * @param pInstrGC Guest context point to privileged instruction
3662 * @param pCpu Disassembly CPU structure ptr
3663 * @param pCacheRec Cache record ptr
3664 *
3665 * @note returns failure if patching is not allowed or possible
3666 *
3667 */
3668static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3669{
3670 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3671 uint8_t *pPB;
3672 int rc = VERR_PATCHING_REFUSED;
3673
3674 Assert(pVM->patm.s.mmio.pCachedData);
3675 if (!pVM->patm.s.mmio.pCachedData)
3676 goto failure;
3677
3678 if (pCpu->param2.fUse != DISUSE_DISPLACEMENT32)
3679 goto failure;
3680
3681 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3682 if (pPB == 0)
3683 goto failure;
3684
3685 /* Add relocation record for cached data access. */
3686 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3687 {
3688 Log(("Relocation failed for cached mmio address!!\n"));
3689 return VERR_PATCHING_REFUSED;
3690 }
3691 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "MMIO patch old instruction:", "");
3692
3693 /* Save original instruction. */
3694 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3695 AssertRC(rc);
3696
3697 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3698
3699 /* Replace address with that of the cached item. */
3700 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->cbInstr - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3701 AssertRC(rc);
3702 if (RT_FAILURE(rc))
3703 {
3704 goto failure;
3705 }
3706
3707 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3708 pVM->patm.s.mmio.pCachedData = 0;
3709 pVM->patm.s.mmio.GCPhys = 0;
3710 pPatch->uState = PATCH_ENABLED;
3711 return VINF_SUCCESS;
3712
3713failure:
3714 /* Turn this patch into a dummy. */
3715 pPatch->uState = PATCH_REFUSED;
3716
3717 return rc;
3718}
3719
3720
3721/**
3722 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3723 *
3724 * @returns VBox status code.
3725 * @param pVM The VM to operate on.
3726 * @param pInstrGC Guest context point to privileged instruction
3727 * @param pPatch Patch record
3728 *
3729 * @note returns failure if patching is not allowed or possible
3730 *
3731 */
3732static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3733{
3734 DISCPUSTATE cpu;
3735 uint32_t cbInstr;
3736 bool disret;
3737 uint8_t *pInstrHC;
3738
3739 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3740
3741 /* Convert GC to HC address. */
3742 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3743 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3744
3745 /* Disassemble mmio instruction. */
3746 disret = patmR3DisInstrNoStrOpMode(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE,
3747 &cpu, &cbInstr);
3748 if (disret == false)
3749 {
3750 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3751 return VERR_PATCHING_REFUSED;
3752 }
3753
3754 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
3755 if (cbInstr > MAX_INSTR_SIZE)
3756 return VERR_PATCHING_REFUSED;
3757 if (cpu.param2.fUse != DISUSE_DISPLACEMENT32)
3758 return VERR_PATCHING_REFUSED;
3759
3760 /* Add relocation record for cached data access. */
3761 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3762 {
3763 Log(("Relocation failed for cached mmio address!!\n"));
3764 return VERR_PATCHING_REFUSED;
3765 }
3766 /* Replace address with that of the cached item. */
3767 *(RTRCPTR *)&pInstrHC[cpu.cbInstr - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3768
3769 /* Lowest and highest address for write monitoring. */
3770 pPatch->pInstrGCLowest = pInstrGC;
3771 pPatch->pInstrGCHighest = pInstrGC + cpu.cbInstr;
3772
3773 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, "MMIO");
3774 pVM->patm.s.mmio.pCachedData = 0;
3775 pVM->patm.s.mmio.GCPhys = 0;
3776 return VINF_SUCCESS;
3777}
3778
3779/**
3780 * Activates an int3 patch
3781 *
3782 * @returns VBox status code.
3783 * @param pVM The VM to operate on.
3784 * @param pPatch Patch record
3785 */
3786static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3787{
3788 uint8_t bASMInt3 = 0xCC;
3789 int rc;
3790
3791 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3792 Assert(pPatch->uState != PATCH_ENABLED);
3793
3794 /* Replace first opcode byte with 'int 3'. */
3795 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
3796 AssertRC(rc);
3797
3798 pPatch->cbPatchJump = sizeof(bASMInt3);
3799
3800 return rc;
3801}
3802
3803/**
3804 * Deactivates an int3 patch
3805 *
3806 * @returns VBox status code.
3807 * @param pVM The VM to operate on.
3808 * @param pPatch Patch record
3809 */
3810static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3811{
3812 uint8_t ASMInt3 = 0xCC;
3813 int rc;
3814
3815 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3816 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3817
3818 /* Restore first opcode byte. */
3819 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3820 AssertRC(rc);
3821 return rc;
3822}
3823
3824/**
3825 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically
3826 * in the raw-mode context.
3827 *
3828 * @returns VBox status code.
3829 * @param pVM The VM to operate on.
3830 * @param pInstrGC Guest context point to privileged instruction
3831 * @param pInstrHC Host context point to privileged instruction
3832 * @param pCpu Disassembly CPU structure ptr
3833 * @param pPatch Patch record
3834 *
3835 * @note returns failure if patching is not allowed or possible
3836 *
3837 */
3838VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu,
3839 PPATCHINFO pPatch)
3840{
3841 uint8_t bASMInt3 = 0xCC;
3842 int rc;
3843
3844 /* Note: Do not use patch memory here! It might called during patch installation too. */
3845 PATM_LOG_PATCH_INSTR(pVM, pPatch, PATMREAD_ORGCODE, "PATMR3PatchInstrInt3:", "");
3846
3847 /* Save the original instruction. */
3848 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3849 AssertRC(rc);
3850 pPatch->cbPatchJump = sizeof(bASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3851
3852 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3853
3854 /* Replace first opcode byte with 'int 3'. */
3855 rc = patmActivateInt3Patch(pVM, pPatch);
3856 if (RT_FAILURE(rc))
3857 goto failure;
3858
3859 /* Lowest and highest address for write monitoring. */
3860 pPatch->pInstrGCLowest = pInstrGC;
3861 pPatch->pInstrGCHighest = pInstrGC + pCpu->cbInstr;
3862
3863 pPatch->uState = PATCH_ENABLED;
3864 return VINF_SUCCESS;
3865
3866failure:
3867 /* Turn this patch into a dummy. */
3868 return VERR_PATCHING_REFUSED;
3869}
3870
3871#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3872/**
3873 * Patch a jump instruction at specified location
3874 *
3875 * @returns VBox status code.
3876 * @param pVM The VM to operate on.
3877 * @param pInstrGC Guest context point to privileged instruction
3878 * @param pInstrHC Host context point to privileged instruction
3879 * @param pCpu Disassembly CPU structure ptr
3880 * @param pPatchRec Patch record
3881 *
3882 * @note returns failure if patching is not allowed or possible
3883 *
3884 */
3885int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3886{
3887 PPATCHINFO pPatch = &pPatchRec->patch;
3888 int rc = VERR_PATCHING_REFUSED;
3889
3890 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3891 pPatch->uCurPatchOffset = 0;
3892 pPatch->cbPatchBlockSize = 0;
3893 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3894
3895 /*
3896 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3897 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3898 */
3899 switch (pCpu->pCurInstr->opcode)
3900 {
3901 case OP_JO:
3902 case OP_JNO:
3903 case OP_JC:
3904 case OP_JNC:
3905 case OP_JE:
3906 case OP_JNE:
3907 case OP_JBE:
3908 case OP_JNBE:
3909 case OP_JS:
3910 case OP_JNS:
3911 case OP_JP:
3912 case OP_JNP:
3913 case OP_JL:
3914 case OP_JNL:
3915 case OP_JLE:
3916 case OP_JNLE:
3917 case OP_JMP:
3918 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3919 Assert(pCpu->param1.fUse & DISUSE_IMMEDIATE32_REL);
3920 if (!(pCpu->param1.fUse & DISUSE_IMMEDIATE32_REL))
3921 goto failure;
3922
3923 Assert(pCpu->cbInstr == SIZEOF_NEARJUMP32 || pCpu->cbInstr == SIZEOF_NEAR_COND_JUMP32);
3924 if (pCpu->cbInstr != SIZEOF_NEARJUMP32 && pCpu->cbInstr != SIZEOF_NEAR_COND_JUMP32)
3925 goto failure;
3926
3927 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->cbInstr))
3928 {
3929 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
3930 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
3931 rc = VERR_PATCHING_REFUSED;
3932 goto failure;
3933 }
3934
3935 break;
3936
3937 default:
3938 goto failure;
3939 }
3940
3941 // make a copy of the guest code bytes that will be overwritten
3942 Assert(pCpu->cbInstr <= sizeof(pPatch->aPrivInstr));
3943 Assert(pCpu->cbInstr >= SIZEOF_NEARJUMP32);
3944 pPatch->cbPatchJump = pCpu->cbInstr;
3945
3946 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3947 AssertRC(rc);
3948
3949 /* Now insert a jump in the guest code. */
3950 /*
3951 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
3952 * references the target instruction in the conflict patch.
3953 */
3954 RTRCPTR pJmpDest = PATMR3GuestGCPtrToPatchGCPtr(pVM, pInstrGC + pCpu->cbInstr + (int32_t)pCpu->param1.parval);
3955
3956 AssertMsg(pJmpDest, ("PATMR3GuestGCPtrToPatchGCPtr failed for %RRv\n", pInstrGC + pCpu->cbInstr + (int32_t)pCpu->param1.parval));
3957 pPatch->pPatchJumpDestGC = pJmpDest;
3958
3959 PATMP2GLOOKUPREC cacheRec;
3960 RT_ZERO(cacheRec);
3961 cacheRec.pPatch = pPatch;
3962
3963 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
3964 /* Free leftover lock if any. */
3965 if (cacheRec.Lock.pvMap)
3966 {
3967 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
3968 cacheRec.Lock.pvMap = NULL;
3969 }
3970 AssertRC(rc);
3971 if (RT_FAILURE(rc))
3972 goto failure;
3973
3974 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
3975
3976 PATM_LOG_ORG_PATCH_INSTR(pVM, pPatch, patmGetInstructionString(pPatch->opcode, pPatch->flags));
3977 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3978
3979 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
3980
3981 /* Lowest and highest address for write monitoring. */
3982 pPatch->pInstrGCLowest = pInstrGC;
3983 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
3984
3985 pPatch->uState = PATCH_ENABLED;
3986 return VINF_SUCCESS;
3987
3988failure:
3989 /* Turn this cli patch into a dummy. */
3990 pPatch->uState = PATCH_REFUSED;
3991
3992 return rc;
3993}
3994#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
3995
3996
3997/**
3998 * Gives hint to PATM about supervisor guest instructions
3999 *
4000 * @returns VBox status code.
4001 * @param pVM The VM to operate on.
4002 * @param pInstr Guest context point to privileged instruction
4003 * @param flags Patch flags
4004 */
4005VMMR3DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4006{
4007 Assert(pInstrGC);
4008 Assert(flags == PATMFL_CODE32);
4009
4010 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4011 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4012}
4013
4014/**
4015 * Patch privileged instruction at specified location
4016 *
4017 * @returns VBox status code.
4018 * @param pVM The VM to operate on.
4019 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4020 * @param flags Patch flags
4021 *
4022 * @note returns failure if patching is not allowed or possible
4023 */
4024VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4025{
4026 DISCPUSTATE cpu;
4027 R3PTRTYPE(uint8_t *) pInstrHC;
4028 uint32_t cbInstr;
4029 PPATMPATCHREC pPatchRec;
4030 PCPUMCTX pCtx = 0;
4031 bool disret;
4032 int rc;
4033 PVMCPU pVCpu = VMMGetCpu0(pVM);
4034
4035 if ( !pVM
4036 || pInstrGC == 0
4037 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4038 {
4039 AssertFailed();
4040 return VERR_INVALID_PARAMETER;
4041 }
4042
4043 if (PATMIsEnabled(pVM) == false)
4044 return VERR_PATCHING_REFUSED;
4045
4046 /* Test for patch conflict only with patches that actually change guest code. */
4047 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4048 {
4049 PPATCHINFO pConflictPatch = PATMFindActivePatchByEntrypoint(pVM, pInstrGC);
4050 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4051 if (pConflictPatch != 0)
4052 return VERR_PATCHING_REFUSED;
4053 }
4054
4055 if (!(flags & PATMFL_CODE32))
4056 {
4057 /** @todo Only 32 bits code right now */
4058 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4059 return VERR_NOT_IMPLEMENTED;
4060 }
4061
4062 /* We ran out of patch memory; don't bother anymore. */
4063 if (pVM->patm.s.fOutOfMemory == true)
4064 return VERR_PATCHING_REFUSED;
4065
4066 /* Make sure the code selector is wide open; otherwise refuse. */
4067 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4068 if (CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0)
4069 {
4070 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4071 if (pInstrGCFlat != pInstrGC)
4072 {
4073 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4074 return VERR_PATCHING_REFUSED;
4075 }
4076 }
4077
4078 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4079 if (!(flags & PATMFL_GUEST_SPECIFIC))
4080 {
4081 /* New code. Make sure CSAM has a go at it first. */
4082 CSAMR3CheckCode(pVM, pInstrGC);
4083 }
4084
4085 /* Note: obsolete */
4086 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4087 && (flags & PATMFL_MMIO_ACCESS))
4088 {
4089 RTRCUINTPTR offset;
4090 void *pvPatchCoreOffset;
4091
4092 /* Find the patch record. */
4093 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4094 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4095 if (pvPatchCoreOffset == NULL)
4096 {
4097 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4098 return VERR_PATCH_NOT_FOUND; //fatal error
4099 }
4100 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4101
4102 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4103 }
4104
4105 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4106
4107 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4108 if (pPatchRec)
4109 {
4110 Assert(!(flags & PATMFL_TRAMPOLINE));
4111
4112 /* Hints about existing patches are ignored. */
4113 if (flags & PATMFL_INSTR_HINT)
4114 return VERR_PATCHING_REFUSED;
4115
4116 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4117 {
4118 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4119 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4120 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4121 }
4122
4123 if (pPatchRec->patch.uState == PATCH_DISABLED)
4124 {
4125 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4126 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4127 {
4128 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4129 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4130 }
4131 else
4132 Log(("Enabling patch %RRv again\n", pInstrGC));
4133
4134 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4135 rc = PATMR3EnablePatch(pVM, pInstrGC);
4136 if (RT_SUCCESS(rc))
4137 return VWRN_PATCH_ENABLED;
4138
4139 return rc;
4140 }
4141 if ( pPatchRec->patch.uState == PATCH_ENABLED
4142 || pPatchRec->patch.uState == PATCH_DIRTY)
4143 {
4144 /*
4145 * The patch might have been overwritten.
4146 */
4147 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4148 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4149 {
4150 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4151 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4152 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4153 {
4154 if (flags & PATMFL_IDTHANDLER)
4155 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4156
4157 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4158 }
4159 }
4160 rc = PATMR3RemovePatch(pVM, pInstrGC);
4161 if (RT_FAILURE(rc))
4162 return VERR_PATCHING_REFUSED;
4163 }
4164 else
4165 {
4166 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4167 /* already tried it once! */
4168 return VERR_PATCHING_REFUSED;
4169 }
4170 }
4171
4172 RTGCPHYS GCPhys;
4173 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4174 if (rc != VINF_SUCCESS)
4175 {
4176 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4177 return rc;
4178 }
4179 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4180 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4181 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4182 {
4183 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4184 return VERR_PATCHING_REFUSED;
4185 }
4186
4187 /* Initialize cache record for guest address translations. */
4188 bool fInserted;
4189 PATMP2GLOOKUPREC cacheRec;
4190 RT_ZERO(cacheRec);
4191
4192 pInstrHC = PATMGCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4193 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4194
4195 /* Allocate patch record. */
4196 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4197 if (RT_FAILURE(rc))
4198 {
4199 Log(("Out of memory!!!!\n"));
4200 return VERR_NO_MEMORY;
4201 }
4202 pPatchRec->Core.Key = pInstrGC;
4203 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4204 /* Insert patch record into the lookup tree. */
4205 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4206 Assert(fInserted);
4207
4208 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4209 pPatchRec->patch.flags = flags;
4210 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
4211 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4212
4213 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4214 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4215
4216 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4217 {
4218 /*
4219 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4220 */
4221 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4222 if (pPatchNear)
4223 {
4224 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4225 {
4226 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4227
4228 pPatchRec->patch.uState = PATCH_UNUSABLE;
4229 /*
4230 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4231 */
4232 return VERR_PATCHING_REFUSED;
4233 }
4234 }
4235 }
4236
4237 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4238 if (pPatchRec->patch.pTempInfo == 0)
4239 {
4240 Log(("Out of memory!!!!\n"));
4241 return VERR_NO_MEMORY;
4242 }
4243
4244 disret = patmR3DisInstrNoStrOpMode(pVM, &pPatchRec->patch, pInstrGC, NULL, PATMREAD_ORGCODE, &cpu, &cbInstr);
4245 if (disret == false)
4246 {
4247 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4248 return VERR_PATCHING_REFUSED;
4249 }
4250
4251 AssertMsg(cbInstr <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", cbInstr));
4252 if (cbInstr > MAX_INSTR_SIZE)
4253 return VERR_PATCHING_REFUSED;
4254
4255 pPatchRec->patch.cbPrivInstr = cbInstr;
4256 pPatchRec->patch.opcode = cpu.pCurInstr->opcode;
4257
4258 /* Restricted hinting for now. */
4259 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->opcode == OP_CLI);
4260
4261 /* Initialize cache record patch pointer. */
4262 cacheRec.pPatch = &pPatchRec->patch;
4263
4264 /* Allocate statistics slot */
4265 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4266 {
4267 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4268 }
4269 else
4270 {
4271 Log(("WARNING: Patch index wrap around!!\n"));
4272 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4273 }
4274
4275 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4276 {
4277 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4278 }
4279 else
4280 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4281 {
4282 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4283 }
4284 else
4285 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4286 {
4287 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4288 }
4289 else
4290 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4291 {
4292 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4293 }
4294 else
4295 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4296 {
4297 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4298 }
4299 else
4300 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4301 {
4302 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4303 }
4304 else
4305 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4306 {
4307 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4308 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4309
4310 rc = patmIdtHandler(pVM, pInstrGC, cbInstr, pPatchRec, &cacheRec);
4311#ifdef VBOX_WITH_STATISTICS
4312 if ( rc == VINF_SUCCESS
4313 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4314 {
4315 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4316 }
4317#endif
4318 }
4319 else
4320 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4321 {
4322 switch (cpu.pCurInstr->opcode)
4323 {
4324 case OP_SYSENTER:
4325 case OP_PUSH:
4326 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4327 if (rc == VINF_SUCCESS)
4328 {
4329 if (rc == VINF_SUCCESS)
4330 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4331 return rc;
4332 }
4333 break;
4334
4335 default:
4336 rc = VERR_NOT_IMPLEMENTED;
4337 break;
4338 }
4339 }
4340 else
4341 {
4342 switch (cpu.pCurInstr->opcode)
4343 {
4344 case OP_SYSENTER:
4345 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4346 if (rc == VINF_SUCCESS)
4347 {
4348 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4349 return VINF_SUCCESS;
4350 }
4351 break;
4352
4353#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4354 case OP_JO:
4355 case OP_JNO:
4356 case OP_JC:
4357 case OP_JNC:
4358 case OP_JE:
4359 case OP_JNE:
4360 case OP_JBE:
4361 case OP_JNBE:
4362 case OP_JS:
4363 case OP_JNS:
4364 case OP_JP:
4365 case OP_JNP:
4366 case OP_JL:
4367 case OP_JNL:
4368 case OP_JLE:
4369 case OP_JNLE:
4370 case OP_JECXZ:
4371 case OP_LOOP:
4372 case OP_LOOPNE:
4373 case OP_LOOPE:
4374 case OP_JMP:
4375 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4376 {
4377 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4378 break;
4379 }
4380 return VERR_NOT_IMPLEMENTED;
4381#endif
4382
4383 case OP_PUSHF:
4384 case OP_CLI:
4385 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4386 rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->opcode, cbInstr, pPatchRec);
4387 break;
4388
4389 case OP_STR:
4390 case OP_SGDT:
4391 case OP_SLDT:
4392 case OP_SIDT:
4393 case OP_CPUID:
4394 case OP_LSL:
4395 case OP_LAR:
4396 case OP_SMSW:
4397 case OP_VERW:
4398 case OP_VERR:
4399 case OP_IRET:
4400 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4401 break;
4402
4403 default:
4404 return VERR_NOT_IMPLEMENTED;
4405 }
4406 }
4407
4408 if (rc != VINF_SUCCESS)
4409 {
4410 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4411 {
4412 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4413 pPatchRec->patch.nrPatch2GuestRecs = 0;
4414 }
4415 pVM->patm.s.uCurrentPatchIdx--;
4416 }
4417 else
4418 {
4419 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4420 AssertRCReturn(rc, rc);
4421
4422 /* Keep track upper and lower boundaries of patched instructions */
4423 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4424 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4425 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4426 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4427
4428 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4429 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4430
4431 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4432 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4433
4434 rc = VINF_SUCCESS;
4435
4436 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4437 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4438 {
4439 rc = PATMR3DisablePatch(pVM, pInstrGC);
4440 AssertRCReturn(rc, rc);
4441 }
4442
4443#ifdef VBOX_WITH_STATISTICS
4444 /* Register statistics counter */
4445 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4446 {
4447 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4448 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4449#ifndef DEBUG_sandervl
4450 /* Full breakdown for the GUI. */
4451 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4452 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4453 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4454 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4455 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4456 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4457 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4458 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4459 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4460 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4461 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4462 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4463 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4464 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4465 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4466 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4467#endif
4468 }
4469#endif
4470 }
4471 /* Free leftover lock if any. */
4472 if (cacheRec.Lock.pvMap)
4473 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4474 return rc;
4475}
4476
4477/**
4478 * Query instruction size
4479 *
4480 * @returns VBox status code.
4481 * @param pVM The VM to operate on.
4482 * @param pPatch Patch record
4483 * @param pInstrGC Instruction address
4484 */
4485static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4486{
4487 uint8_t *pInstrHC;
4488 PGMPAGEMAPLOCK Lock;
4489
4490 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4491 if (rc == VINF_SUCCESS)
4492 {
4493 DISCPUSTATE cpu;
4494 bool disret;
4495 uint32_t cbInstr;
4496
4497 disret = patmR3DisInstr(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE | PATMREAD_NOCHECK, &cpu, &cbInstr);
4498 PGMPhysReleasePageMappingLock(pVM, &Lock);
4499 if (disret)
4500 return cbInstr;
4501 }
4502 return 0;
4503}
4504
4505/**
4506 * Add patch to page record
4507 *
4508 * @returns VBox status code.
4509 * @param pVM The VM to operate on.
4510 * @param pPage Page address
4511 * @param pPatch Patch record
4512 */
4513int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4514{
4515 PPATMPATCHPAGE pPatchPage;
4516 int rc;
4517
4518 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4519
4520 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4521 if (pPatchPage)
4522 {
4523 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4524 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4525 {
4526 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4527 PPATCHINFO *paPatchOld = pPatchPage->aPatch;
4528
4529 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4530 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4531 if (RT_FAILURE(rc))
4532 {
4533 Log(("Out of memory!!!!\n"));
4534 return VERR_NO_MEMORY;
4535 }
4536 memcpy(pPatchPage->aPatch, paPatchOld, cMaxPatchesOld*sizeof(PPATCHINFO));
4537 MMHyperFree(pVM, paPatchOld);
4538 }
4539 pPatchPage->aPatch[pPatchPage->cCount] = pPatch;
4540 pPatchPage->cCount++;
4541 }
4542 else
4543 {
4544 bool fInserted;
4545
4546 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4547 if (RT_FAILURE(rc))
4548 {
4549 Log(("Out of memory!!!!\n"));
4550 return VERR_NO_MEMORY;
4551 }
4552 pPatchPage->Core.Key = pPage;
4553 pPatchPage->cCount = 1;
4554 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4555
4556 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4557 if (RT_FAILURE(rc))
4558 {
4559 Log(("Out of memory!!!!\n"));
4560 MMHyperFree(pVM, pPatchPage);
4561 return VERR_NO_MEMORY;
4562 }
4563 pPatchPage->aPatch[0] = pPatch;
4564
4565 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4566 Assert(fInserted);
4567 pVM->patm.s.cPageRecords++;
4568
4569 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4570 }
4571 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4572
4573 /* Get the closest guest instruction (from below) */
4574 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4575 Assert(pGuestToPatchRec);
4576 if (pGuestToPatchRec)
4577 {
4578 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4579 if ( pPatchPage->pLowestAddrGC == 0
4580 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4581 {
4582 RTRCUINTPTR offset;
4583
4584 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4585
4586 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4587 /* If we're too close to the page boundary, then make sure an instruction from the previous page doesn't cross the boundary itself. */
4588 if (offset && offset < MAX_INSTR_SIZE)
4589 {
4590 /* Get the closest guest instruction (from above) */
4591 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4592
4593 if (pGuestToPatchRec)
4594 {
4595 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4596 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4597 {
4598 pPatchPage->pLowestAddrGC = pPage;
4599 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4600 }
4601 }
4602 }
4603 }
4604 }
4605
4606 /* Get the closest guest instruction (from above) */
4607 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4608 Assert(pGuestToPatchRec);
4609 if (pGuestToPatchRec)
4610 {
4611 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4612 if ( pPatchPage->pHighestAddrGC == 0
4613 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4614 {
4615 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4616 /* Increase by instruction size. */
4617 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4618//// Assert(size);
4619 pPatchPage->pHighestAddrGC += size;
4620 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4621 }
4622 }
4623
4624 return VINF_SUCCESS;
4625}
4626
4627/**
4628 * Remove patch from page record
4629 *
4630 * @returns VBox status code.
4631 * @param pVM The VM to operate on.
4632 * @param pPage Page address
4633 * @param pPatch Patch record
4634 */
4635int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4636{
4637 PPATMPATCHPAGE pPatchPage;
4638 int rc;
4639
4640 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4641 Assert(pPatchPage);
4642
4643 if (!pPatchPage)
4644 return VERR_INVALID_PARAMETER;
4645
4646 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4647
4648 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4649 if (pPatchPage->cCount > 1)
4650 {
4651 uint32_t i;
4652
4653 /* Used by multiple patches */
4654 for (i=0;i<pPatchPage->cCount;i++)
4655 {
4656 if (pPatchPage->aPatch[i] == pPatch)
4657 {
4658 pPatchPage->aPatch[i] = 0;
4659 break;
4660 }
4661 }
4662 /* close the gap between the remaining pointers. */
4663 if (i < pPatchPage->cCount - 1)
4664 {
4665 memcpy(&pPatchPage->aPatch[i], &pPatchPage->aPatch[i+1], sizeof(PPATCHINFO)*(pPatchPage->cCount - (i+1)));
4666 }
4667 AssertMsg(i < pPatchPage->cCount, ("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4668
4669 pPatchPage->cCount--;
4670 }
4671 else
4672 {
4673 PPATMPATCHPAGE pPatchNode;
4674
4675 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4676
4677 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4678 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4679 Assert(pPatchNode && pPatchNode == pPatchPage);
4680
4681 Assert(pPatchPage->aPatch);
4682 rc = MMHyperFree(pVM, pPatchPage->aPatch);
4683 AssertRC(rc);
4684 rc = MMHyperFree(pVM, pPatchPage);
4685 AssertRC(rc);
4686 pVM->patm.s.cPageRecords--;
4687 }
4688 return VINF_SUCCESS;
4689}
4690
4691/**
4692 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4693 *
4694 * @returns VBox status code.
4695 * @param pVM The VM to operate on.
4696 * @param pPatch Patch record
4697 */
4698int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4699{
4700 int rc;
4701 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4702
4703 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4704 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4705 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4706
4707 /** @todo optimize better (large gaps between current and next used page) */
4708 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4709 {
4710 /* Get the closest guest instruction (from above) */
4711 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4712 if ( pGuestToPatchRec
4713 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4714 )
4715 {
4716 /* Code in page really patched -> add record */
4717 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4718 AssertRC(rc);
4719 }
4720 }
4721 pPatch->flags |= PATMFL_CODE_MONITORED;
4722 return VINF_SUCCESS;
4723}
4724
4725/**
4726 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4727 *
4728 * @returns VBox status code.
4729 * @param pVM The VM to operate on.
4730 * @param pPatch Patch record
4731 */
4732int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4733{
4734 int rc;
4735 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4736
4737 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4738 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4739 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4740
4741 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4742 {
4743 /* Get the closest guest instruction (from above) */
4744 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4745 if ( pGuestToPatchRec
4746 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4747 )
4748 {
4749 /* Code in page really patched -> remove record */
4750 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4751 AssertRC(rc);
4752 }
4753 }
4754 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4755 return VINF_SUCCESS;
4756}
4757
4758/**
4759 * Notifies PATM about a (potential) write to code that has been patched.
4760 *
4761 * @returns VBox status code.
4762 * @param pVM The VM to operate on.
4763 * @param GCPtr GC pointer to write address
4764 * @param cbWrite Nr of bytes to write
4765 *
4766 */
4767VMMR3DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4768{
4769 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4770
4771 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4772
4773 Assert(VM_IS_EMT(pVM));
4774
4775 /* Quick boundary check */
4776 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4777 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4778 )
4779 return VINF_SUCCESS;
4780
4781 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4782
4783 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4784 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4785
4786 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4787 {
4788loop_start:
4789 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4790 if (pPatchPage)
4791 {
4792 uint32_t i;
4793 bool fValidPatchWrite = false;
4794
4795 /* Quick check to see if the write is in the patched part of the page */
4796 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4797 || pPatchPage->pHighestAddrGC < GCPtr)
4798 {
4799 break;
4800 }
4801
4802 for (i=0;i<pPatchPage->cCount;i++)
4803 {
4804 if (pPatchPage->aPatch[i])
4805 {
4806 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4807 RTRCPTR pPatchInstrGC;
4808 //unused: bool fForceBreak = false;
4809
4810 Assert(pPatchPage->aPatch[i]->flags & PATMFL_CODE_MONITORED);
4811 /** @todo inefficient and includes redundant checks for multiple pages. */
4812 for (uint32_t j=0; j<cbWrite; j++)
4813 {
4814 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4815
4816 if ( pPatch->cbPatchJump
4817 && pGuestPtrGC >= pPatch->pPrivInstrGC
4818 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4819 {
4820 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4821 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4822 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4823 if (rc == VINF_SUCCESS)
4824 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4825 goto loop_start;
4826
4827 continue;
4828 }
4829
4830 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4831 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4832 if (!pPatchInstrGC)
4833 {
4834 RTRCPTR pClosestInstrGC;
4835 uint32_t size;
4836
4837 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4838 if (pPatchInstrGC)
4839 {
4840 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4841 Assert(pClosestInstrGC <= pGuestPtrGC);
4842 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4843 /* Check if this is not a write into a gap between two patches */
4844 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4845 pPatchInstrGC = 0;
4846 }
4847 }
4848 if (pPatchInstrGC)
4849 {
4850 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4851
4852 fValidPatchWrite = true;
4853
4854 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4855 Assert(pPatchToGuestRec);
4856 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4857 {
4858 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4859
4860 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4861 {
4862 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4863
4864 PATMR3MarkDirtyPatch(pVM, pPatch);
4865
4866 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4867 goto loop_start;
4868 }
4869 else
4870 {
4871 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4872 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4873
4874 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4875 pPatchToGuestRec->fDirty = true;
4876
4877 *pInstrHC = 0xCC;
4878
4879 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4880 }
4881 }
4882 /* else already marked dirty */
4883 }
4884 }
4885 }
4886 } /* for each patch */
4887
4888 if (fValidPatchWrite == false)
4889 {
4890 /* Write to a part of the page that either:
4891 * - doesn't contain any code (shared code/data); rather unlikely
4892 * - old code page that's no longer in active use.
4893 */
4894invalid_write_loop_start:
4895 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4896
4897 if (pPatchPage)
4898 {
4899 for (i=0;i<pPatchPage->cCount;i++)
4900 {
4901 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4902
4903 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
4904 {
4905 /* Note: possibly dangerous assumption that all future writes will be harmless. */
4906 if (pPatch->flags & PATMFL_IDTHANDLER)
4907 {
4908 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4909
4910 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
4911 int rc = patmRemovePatchPages(pVM, pPatch);
4912 AssertRC(rc);
4913 }
4914 else
4915 {
4916 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4917 PATMR3MarkDirtyPatch(pVM, pPatch);
4918 }
4919 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4920 goto invalid_write_loop_start;
4921 }
4922 } /* for */
4923 }
4924 }
4925 }
4926 }
4927 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
4928 return VINF_SUCCESS;
4929
4930}
4931
4932/**
4933 * Disable all patches in a flushed page
4934 *
4935 * @returns VBox status code
4936 * @param pVM The VM to operate on.
4937 * @param addr GC address of the page to flush
4938 */
4939/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
4940 */
4941VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
4942{
4943 addr &= PAGE_BASE_GC_MASK;
4944
4945 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
4946 if (pPatchPage)
4947 {
4948 int i;
4949
4950 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
4951 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
4952 {
4953 if (pPatchPage->aPatch[i])
4954 {
4955 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4956
4957 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
4958 PATMR3MarkDirtyPatch(pVM, pPatch);
4959 }
4960 }
4961 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
4962 }
4963 return VINF_SUCCESS;
4964}
4965
4966/**
4967 * Checks if the instructions at the specified address has been patched already.
4968 *
4969 * @returns boolean, patched or not
4970 * @param pVM The VM to operate on.
4971 * @param pInstrGC Guest context pointer to instruction
4972 */
4973VMMR3DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
4974{
4975 PPATMPATCHREC pPatchRec;
4976 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4977 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
4978 return true;
4979 return false;
4980}
4981
4982/**
4983 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
4984 *
4985 * @returns VBox status code.
4986 * @param pVM The VM to operate on.
4987 * @param pInstrGC GC address of instr
4988 * @param pByte opcode byte pointer (OUT)
4989 *
4990 */
4991VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
4992{
4993 PPATMPATCHREC pPatchRec;
4994
4995 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
4996
4997 /* Shortcut. */
4998 if ( !PATMIsEnabled(pVM)
4999 || pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5000 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5001 {
5002 return VERR_PATCH_NOT_FOUND;
5003 }
5004
5005 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5006 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5007 if ( pPatchRec
5008 && pPatchRec->patch.uState == PATCH_ENABLED
5009 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5010 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5011 {
5012 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5013 *pByte = pPatchRec->patch.aPrivInstr[offset];
5014
5015 if (pPatchRec->patch.cbPatchJump == 1)
5016 {
5017 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5018 }
5019 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5020 return VINF_SUCCESS;
5021 }
5022 return VERR_PATCH_NOT_FOUND;
5023}
5024
5025/**
5026 * Disable patch for privileged instruction at specified location
5027 *
5028 * @returns VBox status code.
5029 * @param pVM The VM to operate on.
5030 * @param pInstr Guest context point to privileged instruction
5031 *
5032 * @note returns failure if patching is not allowed or possible
5033 *
5034 */
5035VMMR3DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5036{
5037 PPATMPATCHREC pPatchRec;
5038 PPATCHINFO pPatch;
5039
5040 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5041 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5042 if (pPatchRec)
5043 {
5044 int rc = VINF_SUCCESS;
5045
5046 pPatch = &pPatchRec->patch;
5047
5048 /* Already disabled? */
5049 if (pPatch->uState == PATCH_DISABLED)
5050 return VINF_SUCCESS;
5051
5052 /* Clear the IDT entries for the patch we're disabling. */
5053 /* Note: very important as we clear IF in the patch itself */
5054 /** @todo this needs to be changed */
5055 if (pPatch->flags & PATMFL_IDTHANDLER)
5056 {
5057 uint32_t iGate;
5058
5059 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5060 if (iGate != (uint32_t)~0)
5061 {
5062 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5063 if (++cIDTHandlersDisabled < 256)
5064 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5065 }
5066 }
5067
5068 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5069 if ( pPatch->pPatchBlockOffset
5070 && pPatch->uState == PATCH_ENABLED)
5071 {
5072 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5073 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5074 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5075 }
5076
5077 /* IDT or function patches haven't changed any guest code. */
5078 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5079 {
5080 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5081 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5082
5083 if (pPatch->uState != PATCH_REFUSED)
5084 {
5085 uint8_t temp[16];
5086
5087 Assert(pPatch->cbPatchJump < sizeof(temp));
5088
5089 /* Let's first check if the guest code is still the same. */
5090 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5091 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5092 if (rc == VINF_SUCCESS)
5093 {
5094 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5095
5096 if ( temp[0] != 0xE9 /* jmp opcode */
5097 || *(RTRCINTPTR *)(&temp[1]) != displ
5098 )
5099 {
5100 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5101 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5102 /* Remove it completely */
5103 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5104 rc = PATMR3RemovePatch(pVM, pInstrGC);
5105 AssertRC(rc);
5106 return VWRN_PATCH_REMOVED;
5107 }
5108 patmRemoveJumpToPatch(pVM, pPatch);
5109 }
5110 else
5111 {
5112 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5113 pPatch->uState = PATCH_DISABLE_PENDING;
5114 }
5115 }
5116 else
5117 {
5118 AssertMsgFailed(("Patch was refused!\n"));
5119 return VERR_PATCH_ALREADY_DISABLED;
5120 }
5121 }
5122 else
5123 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5124 {
5125 uint8_t temp[16];
5126
5127 Assert(pPatch->cbPatchJump < sizeof(temp));
5128
5129 /* Let's first check if the guest code is still the same. */
5130 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5131 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5132 if (rc == VINF_SUCCESS)
5133 {
5134 if (temp[0] != 0xCC)
5135 {
5136 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5137 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5138 /* Remove it completely */
5139 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5140 rc = PATMR3RemovePatch(pVM, pInstrGC);
5141 AssertRC(rc);
5142 return VWRN_PATCH_REMOVED;
5143 }
5144 patmDeactivateInt3Patch(pVM, pPatch);
5145 }
5146 }
5147
5148 if (rc == VINF_SUCCESS)
5149 {
5150 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5151 if (pPatch->uState == PATCH_DISABLE_PENDING)
5152 {
5153 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5154 pPatch->uState = PATCH_UNUSABLE;
5155 }
5156 else
5157 if (pPatch->uState != PATCH_DIRTY)
5158 {
5159 pPatch->uOldState = pPatch->uState;
5160 pPatch->uState = PATCH_DISABLED;
5161 }
5162 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5163 }
5164
5165 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5166 return VINF_SUCCESS;
5167 }
5168 Log(("Patch not found!\n"));
5169 return VERR_PATCH_NOT_FOUND;
5170}
5171
5172/**
5173 * Permanently disable patch for privileged instruction at specified location
5174 *
5175 * @returns VBox status code.
5176 * @param pVM The VM to operate on.
5177 * @param pInstr Guest context instruction pointer
5178 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5179 * @param pConflictPatch Conflicting patch
5180 *
5181 */
5182static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5183{
5184 NOREF(pConflictAddr);
5185#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5186 PATCHINFO patch;
5187 DISCPUSTATE cpu;
5188 R3PTRTYPE(uint8_t *) pInstrHC;
5189 uint32_t cbInstr;
5190 bool disret;
5191 int rc;
5192
5193 RT_ZERO(patch);
5194 pInstrHC = PATMGCVirtToHCVirt(pVM, &patch, pInstrGC);
5195 disret = patmR3DisInstr(pVM, &patch, pInstrGC, pInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr);
5196 /*
5197 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5198 * with one that jumps right into the conflict patch.
5199 * Otherwise we must disable the conflicting patch to avoid serious problems.
5200 */
5201 if ( disret == true
5202 && (pConflictPatch->flags & PATMFL_CODE32)
5203 && (cpu.pCurInstr->opcode == OP_JMP || (cpu.pCurInstr->optype & DISOPTYPE_COND_CONTROLFLOW))
5204 && (cpu.param1.fUse & DISUSE_IMMEDIATE32_REL))
5205 {
5206 /* Hint patches must be enabled first. */
5207 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5208 {
5209 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5210 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5211 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5212 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5213 /* Enabling might fail if the patched code has changed in the meantime. */
5214 if (rc != VINF_SUCCESS)
5215 return rc;
5216 }
5217
5218 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5219 if (RT_SUCCESS(rc))
5220 {
5221 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5222 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5223 return VINF_SUCCESS;
5224 }
5225 }
5226#endif
5227
5228 if (pConflictPatch->opcode == OP_CLI)
5229 {
5230 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5231 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5232 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5233 if (rc == VWRN_PATCH_REMOVED)
5234 return VINF_SUCCESS;
5235 if (RT_SUCCESS(rc))
5236 {
5237 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5238 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5239 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5240 if (rc == VERR_PATCH_NOT_FOUND)
5241 return VINF_SUCCESS; /* removed already */
5242
5243 AssertRC(rc);
5244 if (RT_SUCCESS(rc))
5245 {
5246 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5247 return VINF_SUCCESS;
5248 }
5249 }
5250 /* else turned into unusable patch (see below) */
5251 }
5252 else
5253 {
5254 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5255 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5256 if (rc == VWRN_PATCH_REMOVED)
5257 return VINF_SUCCESS;
5258 }
5259
5260 /* No need to monitor the code anymore. */
5261 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5262 {
5263 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5264 AssertRC(rc);
5265 }
5266 pConflictPatch->uState = PATCH_UNUSABLE;
5267 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5268 return VERR_PATCH_DISABLED;
5269}
5270
5271/**
5272 * Enable patch for privileged instruction at specified location
5273 *
5274 * @returns VBox status code.
5275 * @param pVM The VM to operate on.
5276 * @param pInstr Guest context point to privileged instruction
5277 *
5278 * @note returns failure if patching is not allowed or possible
5279 *
5280 */
5281VMMR3DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5282{
5283 PPATMPATCHREC pPatchRec;
5284 PPATCHINFO pPatch;
5285
5286 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5287 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5288 if (pPatchRec)
5289 {
5290 int rc = VINF_SUCCESS;
5291
5292 pPatch = &pPatchRec->patch;
5293
5294 if (pPatch->uState == PATCH_DISABLED)
5295 {
5296 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5297 {
5298 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5299 uint8_t temp[16];
5300
5301 Assert(pPatch->cbPatchJump < sizeof(temp));
5302
5303 /* Let's first check if the guest code is still the same. */
5304 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5305 AssertRC(rc2);
5306 if (rc2 == VINF_SUCCESS)
5307 {
5308 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5309 {
5310 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5311 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5312 /* Remove it completely */
5313 rc = PATMR3RemovePatch(pVM, pInstrGC);
5314 AssertRC(rc);
5315 return VERR_PATCH_NOT_FOUND;
5316 }
5317
5318 PATMP2GLOOKUPREC cacheRec;
5319 RT_ZERO(cacheRec);
5320 cacheRec.pPatch = pPatch;
5321
5322 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5323 /* Free leftover lock if any. */
5324 if (cacheRec.Lock.pvMap)
5325 {
5326 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5327 cacheRec.Lock.pvMap = NULL;
5328 }
5329 AssertRC(rc2);
5330 if (RT_FAILURE(rc2))
5331 return rc2;
5332
5333#ifdef DEBUG
5334 {
5335 DISCPUSTATE cpu;
5336 char szOutput[256];
5337 uint32_t cbInstr;
5338 uint32_t i = 0;
5339 bool disret;
5340 while(i < pPatch->cbPatchJump)
5341 {
5342 disret = patmR3DisInstrToStr(pVM, pPatch, pPatch->pPrivInstrGC + i, NULL, PATMREAD_ORGCODE,
5343 &cpu, &cbInstr, szOutput, sizeof(szOutput));
5344 Log(("Renewed patch instr: %s", szOutput));
5345 i += cbInstr;
5346 }
5347 }
5348#endif
5349 }
5350 }
5351 else
5352 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5353 {
5354 uint8_t temp[16];
5355
5356 Assert(pPatch->cbPatchJump < sizeof(temp));
5357
5358 /* Let's first check if the guest code is still the same. */
5359 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5360 AssertRC(rc2);
5361
5362 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5363 {
5364 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5365 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5366 rc = PATMR3RemovePatch(pVM, pInstrGC);
5367 AssertRC(rc);
5368 return VERR_PATCH_NOT_FOUND;
5369 }
5370
5371 rc2 = patmActivateInt3Patch(pVM, pPatch);
5372 if (RT_FAILURE(rc2))
5373 return rc2;
5374 }
5375
5376 pPatch->uState = pPatch->uOldState; //restore state
5377
5378 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5379 if (pPatch->pPatchBlockOffset)
5380 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5381
5382 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5383 }
5384 else
5385 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5386
5387 return rc;
5388 }
5389 return VERR_PATCH_NOT_FOUND;
5390}
5391
5392/**
5393 * Remove patch for privileged instruction at specified location
5394 *
5395 * @returns VBox status code.
5396 * @param pVM The VM to operate on.
5397 * @param pPatchRec Patch record
5398 * @param fForceRemove Remove *all* patches
5399 */
5400int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5401{
5402 PPATCHINFO pPatch;
5403
5404 pPatch = &pPatchRec->patch;
5405
5406 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5407 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5408 {
5409 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5410 return VERR_ACCESS_DENIED;
5411 }
5412 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5413
5414 /* Note: NEVER EVER REUSE PATCH MEMORY */
5415 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5416
5417 if (pPatchRec->patch.pPatchBlockOffset)
5418 {
5419 PAVLOU32NODECORE pNode;
5420
5421 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5422 Assert(pNode);
5423 }
5424
5425 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5426 {
5427 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5428 AssertRC(rc);
5429 }
5430
5431#ifdef VBOX_WITH_STATISTICS
5432 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5433 {
5434 STAMR3Deregister(pVM, &pPatchRec->patch);
5435#ifndef DEBUG_sandervl
5436 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5437 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5438 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5439 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5440 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5441 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5442 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5443 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5444 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5445 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5446 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5447 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5448 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5449 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5450#endif
5451 }
5452#endif
5453
5454 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5455 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5456 pPatch->nrPatch2GuestRecs = 0;
5457 Assert(pPatch->Patch2GuestAddrTree == 0);
5458
5459 patmEmptyTree(pVM, &pPatch->FixupTree);
5460 pPatch->nrFixups = 0;
5461 Assert(pPatch->FixupTree == 0);
5462
5463 if (pPatchRec->patch.pTempInfo)
5464 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5465
5466 /* Note: might fail, because it has already been removed (e.g. during reset). */
5467 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5468
5469 /* Free the patch record */
5470 MMHyperFree(pVM, pPatchRec);
5471 return VINF_SUCCESS;
5472}
5473
5474/**
5475 * RTAvlU32DoWithAll() worker.
5476 * Checks whether the current trampoline instruction is the jump to the target patch
5477 * and updates the displacement to jump to the new target.
5478 *
5479 * @returns VBox status code.
5480 * @retval VERR_ALREADY_EXISTS if the jump was found.
5481 * @param pNode The current patch to guest record to check.
5482 * @param pvUser The refresh state.
5483 */
5484static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5485{
5486 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5487 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5488 PVM pVM = pRefreshPatchState->pVM;
5489
5490 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5491
5492 /*
5493 * Check if the patch instruction starts with a jump.
5494 * ASSUMES that there is no other patch to guest record that starts
5495 * with a jump.
5496 */
5497 if (*pPatchInstr == 0xE9)
5498 {
5499 /* Jump found, update the displacement. */
5500 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5501 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5502 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5503
5504 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5505 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5506
5507 *(uint32_t *)&pPatchInstr[1] = displ;
5508 return VERR_ALREADY_EXISTS; /** @todo better return code */
5509 }
5510
5511 return VINF_SUCCESS;
5512}
5513
5514/**
5515 * Attempt to refresh the patch by recompiling its entire code block
5516 *
5517 * @returns VBox status code.
5518 * @param pVM The VM to operate on.
5519 * @param pPatchRec Patch record
5520 */
5521int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5522{
5523 PPATCHINFO pPatch;
5524 int rc;
5525 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5526 PTRAMPREC pTrampolinePatchesHead = NULL;
5527
5528 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5529
5530 pPatch = &pPatchRec->patch;
5531 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5532 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5533 {
5534 if (!pPatch->pTrampolinePatchesHead)
5535 {
5536 /*
5537 * It is sometimes possible that there are trampoline patches to this patch
5538 * but they are not recorded (after a saved state load for example).
5539 * Refuse to refresh those patches.
5540 * Can hurt performance in theory if the patched code is modified by the guest
5541 * and is executed often. However most of the time states are saved after the guest
5542 * code was modified and is not updated anymore afterwards so this shouldn't be a
5543 * big problem.
5544 */
5545 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5546 return VERR_PATCHING_REFUSED;
5547 }
5548 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5549 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5550 }
5551
5552 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5553
5554 rc = PATMR3DisablePatch(pVM, pInstrGC);
5555 AssertRC(rc);
5556
5557 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5558 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5559#ifdef VBOX_WITH_STATISTICS
5560 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5561 {
5562 STAMR3Deregister(pVM, &pPatchRec->patch);
5563#ifndef DEBUG_sandervl
5564 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5565 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5566 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5567 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5568 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5569 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5570 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5571 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5572 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5573 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5574 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5575 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5576 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5577 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5578#endif
5579 }
5580#endif
5581
5582 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5583
5584 /* Attempt to install a new patch. */
5585 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5586 if (RT_SUCCESS(rc))
5587 {
5588 RTRCPTR pPatchTargetGC;
5589 PPATMPATCHREC pNewPatchRec;
5590
5591 /* Determine target address in new patch */
5592 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5593 Assert(pPatchTargetGC);
5594 if (!pPatchTargetGC)
5595 {
5596 rc = VERR_PATCHING_REFUSED;
5597 goto failure;
5598 }
5599
5600 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5601 pPatch->uCurPatchOffset = 0;
5602
5603 /* insert jump to new patch in old patch block */
5604 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5605 if (RT_FAILURE(rc))
5606 goto failure;
5607
5608 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5609 Assert(pNewPatchRec); /* can't fail */
5610
5611 /* Remove old patch (only do that when everything is finished) */
5612 int rc2 = PATMRemovePatch(pVM, pPatchRec, true /* force removal */);
5613 AssertRC(rc2);
5614
5615 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5616 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5617 Assert(fInserted); NOREF(fInserted);
5618
5619 Log(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5620 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5621
5622 /* Used by another patch, so don't remove it! */
5623 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5624
5625 if (pTrampolinePatchesHead)
5626 {
5627 /* Update all trampoline patches to jump to the new patch. */
5628 PTRAMPREC pTrampRec = NULL;
5629 PATMREFRESHPATCH RefreshPatch;
5630
5631 RefreshPatch.pVM = pVM;
5632 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5633
5634 pTrampRec = pTrampolinePatchesHead;
5635
5636 while (pTrampRec)
5637 {
5638 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5639
5640 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5641 /*
5642 * We have to find the right patch2guest record because there might be others
5643 * for statistics.
5644 */
5645 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5646 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5647 Assert(rc == VERR_ALREADY_EXISTS);
5648 rc = VINF_SUCCESS;
5649 pTrampRec = pTrampRec->pNext;
5650 }
5651 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5652 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5653 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5654 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5655 }
5656 }
5657
5658failure:
5659 if (RT_FAILURE(rc))
5660 {
5661 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5662
5663 /* Remove the new inactive patch */
5664 rc = PATMR3RemovePatch(pVM, pInstrGC);
5665 AssertRC(rc);
5666
5667 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5668 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5669 Assert(fInserted); NOREF(fInserted);
5670
5671 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5672 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5673 AssertRC(rc2);
5674
5675 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5676 }
5677 return rc;
5678}
5679
5680/**
5681 * Find patch for privileged instruction at specified location
5682 *
5683 * @returns Patch structure pointer if found; else NULL
5684 * @param pVM The VM to operate on.
5685 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5686 * @param fIncludeHints Include hinted patches or not
5687 *
5688 */
5689PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5690{
5691 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5692 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5693 if (pPatchRec)
5694 {
5695 if ( pPatchRec->patch.uState == PATCH_ENABLED
5696 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5697 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5698 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5699 {
5700 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5701 return &pPatchRec->patch;
5702 }
5703 else
5704 if ( fIncludeHints
5705 && pPatchRec->patch.uState == PATCH_DISABLED
5706 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5707 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5708 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5709 {
5710 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5711 return &pPatchRec->patch;
5712 }
5713 }
5714 return NULL;
5715}
5716
5717/**
5718 * Checks whether the GC address is inside a generated patch jump
5719 *
5720 * @returns true -> yes, false -> no
5721 * @param pVM The VM to operate on.
5722 * @param pAddr Guest context address
5723 * @param pPatchAddr Guest context patch address (if true)
5724 */
5725VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5726{
5727 RTRCPTR addr;
5728 PPATCHINFO pPatch;
5729
5730 if (PATMIsEnabled(pVM) == false)
5731 return false;
5732
5733 if (pPatchAddr == NULL)
5734 pPatchAddr = &addr;
5735
5736 *pPatchAddr = 0;
5737
5738 pPatch = PATMFindActivePatchByEntrypoint(pVM, pAddr);
5739 if (pPatch)
5740 *pPatchAddr = pPatch->pPrivInstrGC;
5741
5742 return *pPatchAddr == 0 ? false : true;
5743}
5744
5745/**
5746 * Remove patch for privileged instruction at specified location
5747 *
5748 * @returns VBox status code.
5749 * @param pVM The VM to operate on.
5750 * @param pInstr Guest context point to privileged instruction
5751 *
5752 * @note returns failure if patching is not allowed or possible
5753 *
5754 */
5755VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5756{
5757 PPATMPATCHREC pPatchRec;
5758
5759 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5760 if (pPatchRec)
5761 {
5762 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5763 if (rc == VWRN_PATCH_REMOVED)
5764 return VINF_SUCCESS;
5765
5766 return PATMRemovePatch(pVM, pPatchRec, false);
5767 }
5768 AssertFailed();
5769 return VERR_PATCH_NOT_FOUND;
5770}
5771
5772/**
5773 * Mark patch as dirty
5774 *
5775 * @returns VBox status code.
5776 * @param pVM The VM to operate on.
5777 * @param pPatch Patch record
5778 *
5779 * @note returns failure if patching is not allowed or possible
5780 *
5781 */
5782VMMR3DECL(int) PATMR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5783{
5784 if (pPatch->pPatchBlockOffset)
5785 {
5786 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5787 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5788 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5789 }
5790
5791 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5792 /* Put back the replaced instruction. */
5793 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5794 if (rc == VWRN_PATCH_REMOVED)
5795 return VINF_SUCCESS;
5796
5797 /* Note: we don't restore patch pages for patches that are not enabled! */
5798 /* Note: be careful when changing this behaviour!! */
5799
5800 /* The patch pages are no longer marked for self-modifying code detection */
5801 if (pPatch->flags & PATMFL_CODE_MONITORED)
5802 {
5803 rc = patmRemovePatchPages(pVM, pPatch);
5804 AssertRCReturn(rc, rc);
5805 }
5806 pPatch->uState = PATCH_DIRTY;
5807
5808 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5809 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5810
5811 return VINF_SUCCESS;
5812}
5813
5814/**
5815 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5816 *
5817 * @returns VBox status code.
5818 * @param pVM The VM to operate on.
5819 * @param pPatch Patch block structure pointer
5820 * @param pPatchGC GC address in patch block
5821 */
5822RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5823{
5824 Assert(pPatch->Patch2GuestAddrTree);
5825 /* Get the closest record from below. */
5826 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5827 if (pPatchToGuestRec)
5828 return pPatchToGuestRec->pOrgInstrGC;
5829
5830 return 0;
5831}
5832
5833/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5834 *
5835 * @returns corresponding GC pointer in patch block
5836 * @param pVM The VM to operate on.
5837 * @param pPatch Current patch block pointer
5838 * @param pInstrGC Guest context pointer to privileged instruction
5839 *
5840 */
5841RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5842{
5843 if (pPatch->Guest2PatchAddrTree)
5844 {
5845 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
5846 if (pGuestToPatchRec)
5847 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5848 }
5849
5850 return 0;
5851}
5852
5853/* Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no identical match)
5854 *
5855 * @returns corresponding GC pointer in patch block
5856 * @param pVM The VM to operate on.
5857 * @param pPatch Current patch block pointer
5858 * @param pInstrGC Guest context pointer to privileged instruction
5859 *
5860 */
5861RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5862{
5863 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
5864 if (pGuestToPatchRec)
5865 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5866
5867 return 0;
5868}
5869
5870/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5871 *
5872 * @returns corresponding GC pointer in patch block
5873 * @param pVM The VM to operate on.
5874 * @param pInstrGC Guest context pointer to privileged instruction
5875 *
5876 */
5877VMMR3DECL(RTRCPTR) PATMR3GuestGCPtrToPatchGCPtr(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
5878{
5879 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5880 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
5881 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
5882 else
5883 return 0;
5884}
5885
5886/**
5887 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5888 *
5889 * @returns original GC instruction pointer or 0 if not found
5890 * @param pVM The VM to operate on.
5891 * @param pPatchGC GC address in patch block
5892 * @param pEnmState State of the translated address (out)
5893 *
5894 */
5895VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
5896{
5897 PPATMPATCHREC pPatchRec;
5898 void *pvPatchCoreOffset;
5899 RTRCPTR pPrivInstrGC;
5900
5901 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
5902 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5903 if (pvPatchCoreOffset == 0)
5904 {
5905 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
5906 return 0;
5907 }
5908 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
5909 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
5910 if (pEnmState)
5911 {
5912 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
5913 || pPatchRec->patch.uState == PATCH_DIRTY
5914 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
5915 || pPatchRec->patch.uState == PATCH_UNUSABLE),
5916 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
5917
5918 if ( !pPrivInstrGC
5919 || pPatchRec->patch.uState == PATCH_UNUSABLE
5920 || pPatchRec->patch.uState == PATCH_REFUSED)
5921 {
5922 pPrivInstrGC = 0;
5923 *pEnmState = PATMTRANS_FAILED;
5924 }
5925 else
5926 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
5927 {
5928 *pEnmState = PATMTRANS_INHIBITIRQ;
5929 }
5930 else
5931 if ( pPatchRec->patch.uState == PATCH_ENABLED
5932 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
5933 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
5934 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5935 {
5936 *pEnmState = PATMTRANS_OVERWRITTEN;
5937 }
5938 else
5939 if (PATMFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
5940 {
5941 *pEnmState = PATMTRANS_OVERWRITTEN;
5942 }
5943 else
5944 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
5945 {
5946 *pEnmState = PATMTRANS_PATCHSTART;
5947 }
5948 else
5949 *pEnmState = PATMTRANS_SAFE;
5950 }
5951 return pPrivInstrGC;
5952}
5953
5954/**
5955 * Returns the GC pointer of the patch for the specified GC address
5956 *
5957 * @returns VBox status code.
5958 * @param pVM The VM to operate on.
5959 * @param pAddrGC Guest context address
5960 */
5961VMMR3DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
5962{
5963 PPATMPATCHREC pPatchRec;
5964
5965 /* Find the patch record. */
5966 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
5967 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
5968 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
5969 return PATCHCODE_PTR_GC(&pPatchRec->patch);
5970 else
5971 return 0;
5972}
5973
5974/**
5975 * Attempt to recover dirty instructions
5976 *
5977 * @returns VBox status code.
5978 * @param pVM The VM to operate on.
5979 * @param pCtx CPU context
5980 * @param pPatch Patch record
5981 * @param pPatchToGuestRec Patch to guest address record
5982 * @param pEip GC pointer of trapping instruction
5983 */
5984static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
5985{
5986 DISCPUSTATE CpuOld, CpuNew;
5987 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
5988 int rc;
5989 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
5990 uint32_t cbDirty;
5991 PRECPATCHTOGUEST pRec;
5992 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
5993 PVMCPU pVCpu = VMMGetCpu0(pVM);
5994 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
5995
5996 pRec = pPatchToGuestRec;
5997 pCurInstrGC = pOrgInstrGC;
5998 pCurPatchInstrGC = pEip;
5999 cbDirty = 0;
6000 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6001
6002 /* Find all adjacent dirty instructions */
6003 while (true)
6004 {
6005 if (pRec->fJumpTarget)
6006 {
6007 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6008 pRec->fDirty = false;
6009 return VERR_PATCHING_REFUSED;
6010 }
6011
6012 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6013 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6014 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6015
6016 /* Only harmless instructions are acceptable. */
6017 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6018 if ( RT_FAILURE(rc)
6019 || !(CpuOld.pCurInstr->optype & DISOPTYPE_HARMLESS))
6020 {
6021 if (RT_SUCCESS(rc))
6022 cbDirty += CpuOld.cbInstr;
6023 else
6024 if (!cbDirty)
6025 cbDirty = 1;
6026 break;
6027 }
6028
6029#ifdef DEBUG
6030 char szBuf[256];
6031 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6032 szBuf, sizeof(szBuf), NULL);
6033 Log(("DIRTY: %s\n", szBuf));
6034#endif
6035 /* Mark as clean; if we fail we'll let it always fault. */
6036 pRec->fDirty = false;
6037
6038 /* Remove old lookup record. */
6039 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6040 pPatchToGuestRec = NULL;
6041
6042 pCurPatchInstrGC += CpuOld.cbInstr;
6043 cbDirty += CpuOld.cbInstr;
6044
6045 /* Let's see if there's another dirty instruction right after. */
6046 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6047 if (!pRec || !pRec->fDirty)
6048 break; /* no more dirty instructions */
6049
6050 /* In case of complex instructions the next guest instruction could be quite far off. */
6051 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6052 }
6053
6054 if ( RT_SUCCESS(rc)
6055 && (CpuOld.pCurInstr->optype & DISOPTYPE_HARMLESS)
6056 )
6057 {
6058 uint32_t cbLeft;
6059
6060 pCurPatchInstrHC = pPatchInstrHC;
6061 pCurPatchInstrGC = pEip;
6062 cbLeft = cbDirty;
6063
6064 while (cbLeft && RT_SUCCESS(rc))
6065 {
6066 bool fValidInstr;
6067
6068 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6069
6070 fValidInstr = !!(CpuNew.pCurInstr->optype & DISOPTYPE_HARMLESS);
6071 if ( !fValidInstr
6072 && (CpuNew.pCurInstr->optype & DISOPTYPE_RELATIVE_CONTROLFLOW)
6073 )
6074 {
6075 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6076
6077 if ( pTargetGC >= pOrgInstrGC
6078 && pTargetGC <= pOrgInstrGC + cbDirty
6079 )
6080 {
6081 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6082 fValidInstr = true;
6083 }
6084 }
6085
6086 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6087 if ( rc == VINF_SUCCESS
6088 && CpuNew.cbInstr <= cbLeft /* must still fit */
6089 && fValidInstr
6090 )
6091 {
6092#ifdef DEBUG
6093 char szBuf[256];
6094 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6095 szBuf, sizeof(szBuf), NULL);
6096 Log(("NEW: %s\n", szBuf));
6097#endif
6098
6099 /* Copy the new instruction. */
6100 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.cbInstr);
6101 AssertRC(rc);
6102
6103 /* Add a new lookup record for the duplicated instruction. */
6104 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6105 }
6106 else
6107 {
6108#ifdef DEBUG
6109 char szBuf[256];
6110 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6111 szBuf, sizeof(szBuf), NULL);
6112 Log(("NEW: %s (FAILED)\n", szBuf));
6113#endif
6114 /* Restore the old lookup record for the duplicated instruction. */
6115 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6116
6117 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6118 rc = VERR_PATCHING_REFUSED;
6119 break;
6120 }
6121 pCurInstrGC += CpuNew.cbInstr;
6122 pCurPatchInstrHC += CpuNew.cbInstr;
6123 pCurPatchInstrGC += CpuNew.cbInstr;
6124 cbLeft -= CpuNew.cbInstr;
6125
6126 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6127 if (!cbLeft)
6128 {
6129 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6130 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6131 {
6132 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6133 if (pRec)
6134 {
6135 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6136 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6137
6138 Assert(!pRec->fDirty);
6139
6140 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6141 if (cbFiller >= SIZEOF_NEARJUMP32)
6142 {
6143 pPatchFillHC[0] = 0xE9;
6144 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6145#ifdef DEBUG
6146 char szBuf[256];
6147 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6148 szBuf, sizeof(szBuf), NULL);
6149 Log(("FILL: %s\n", szBuf));
6150#endif
6151 }
6152 else
6153 {
6154 for (unsigned i = 0; i < cbFiller; i++)
6155 {
6156 pPatchFillHC[i] = 0x90; /* NOP */
6157#ifdef DEBUG
6158 char szBuf[256];
6159 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC + i,
6160 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6161 Log(("FILL: %s\n", szBuf));
6162#endif
6163 }
6164 }
6165 }
6166 }
6167 }
6168 }
6169 }
6170 else
6171 rc = VERR_PATCHING_REFUSED;
6172
6173 if (RT_SUCCESS(rc))
6174 {
6175 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6176 }
6177 else
6178 {
6179 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6180 Assert(cbDirty);
6181
6182 /* Mark the whole instruction stream with breakpoints. */
6183 if (cbDirty)
6184 memset(pPatchInstrHC, 0xCC, cbDirty);
6185
6186 if ( pVM->patm.s.fOutOfMemory == false
6187 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6188 {
6189 rc = patmR3RefreshPatch(pVM, pPatch);
6190 if (RT_FAILURE(rc))
6191 {
6192 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6193 }
6194 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6195 rc = VERR_PATCHING_REFUSED;
6196 }
6197 }
6198 return rc;
6199}
6200
6201/**
6202 * Handle trap inside patch code
6203 *
6204 * @returns VBox status code.
6205 * @param pVM The VM to operate on.
6206 * @param pCtx CPU context
6207 * @param pEip GC pointer of trapping instruction
6208 * @param ppNewEip GC pointer to new instruction
6209 */
6210VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6211{
6212 PPATMPATCHREC pPatch = 0;
6213 void *pvPatchCoreOffset;
6214 RTRCUINTPTR offset;
6215 RTRCPTR pNewEip;
6216 int rc ;
6217 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6218 PVMCPU pVCpu = VMMGetCpu0(pVM);
6219
6220 Assert(pVM->cCpus == 1);
6221
6222 pNewEip = 0;
6223 *ppNewEip = 0;
6224
6225 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6226
6227 /* Find the patch record. */
6228 /* Note: there might not be a patch to guest translation record (global function) */
6229 offset = pEip - pVM->patm.s.pPatchMemGC;
6230 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6231 if (pvPatchCoreOffset)
6232 {
6233 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6234
6235 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6236
6237 if (pPatch->patch.uState == PATCH_DIRTY)
6238 {
6239 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6240 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6241 {
6242 /* Function duplication patches set fPIF to 1 on entry */
6243 pVM->patm.s.pGCStateHC->fPIF = 1;
6244 }
6245 }
6246 else
6247 if (pPatch->patch.uState == PATCH_DISABLED)
6248 {
6249 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6250 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6251 {
6252 /* Function duplication patches set fPIF to 1 on entry */
6253 pVM->patm.s.pGCStateHC->fPIF = 1;
6254 }
6255 }
6256 else
6257 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6258 {
6259 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6260
6261 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6262 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6263 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6264 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6265 }
6266
6267 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6268 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6269
6270 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6271 pPatch->patch.cTraps++;
6272 PATM_STAT_FAULT_INC(&pPatch->patch);
6273 }
6274 else
6275 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6276
6277 /* Check if we were interrupted in PATM generated instruction code. */
6278 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6279 {
6280 DISCPUSTATE Cpu;
6281 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6282 AssertRC(rc);
6283
6284 if ( rc == VINF_SUCCESS
6285 && ( Cpu.pCurInstr->opcode == OP_PUSHF
6286 || Cpu.pCurInstr->opcode == OP_PUSH
6287 || Cpu.pCurInstr->opcode == OP_CALL)
6288 )
6289 {
6290 uint64_t fFlags;
6291
6292 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6293
6294 if (Cpu.pCurInstr->opcode == OP_PUSH)
6295 {
6296 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6297 if ( rc == VINF_SUCCESS
6298 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6299 {
6300 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6301
6302 /* Reset the PATM stack. */
6303 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6304
6305 pVM->patm.s.pGCStateHC->fPIF = 1;
6306
6307 Log(("Faulting push -> go back to the original instruction\n"));
6308
6309 /* continue at the original instruction */
6310 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6311 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6312 return VINF_SUCCESS;
6313 }
6314 }
6315
6316 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6317 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6318 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6319 if (rc == VINF_SUCCESS)
6320 {
6321 /* The guest page *must* be present. */
6322 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6323 if ( rc == VINF_SUCCESS
6324 && (fFlags & X86_PTE_P))
6325 {
6326 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6327 return VINF_PATCH_CONTINUE;
6328 }
6329 }
6330 }
6331 else
6332 if (pPatch->patch.pPrivInstrGC == pNewEip)
6333 {
6334 /* Invalidated patch or first instruction overwritten.
6335 * We can ignore the fPIF state in this case.
6336 */
6337 /* Reset the PATM stack. */
6338 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6339
6340 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6341
6342 pVM->patm.s.pGCStateHC->fPIF = 1;
6343
6344 /* continue at the original instruction */
6345 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6346 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6347 return VINF_SUCCESS;
6348 }
6349
6350 char szBuf[256];
6351 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6352
6353 /* Very bad. We crashed in emitted code. Probably stack? */
6354 if (pPatch)
6355 {
6356 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6357 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6358 }
6359 else
6360 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6361 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6362 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6363 }
6364
6365 /* From here on, we must have a valid patch to guest translation. */
6366 if (pvPatchCoreOffset == 0)
6367 {
6368 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6369 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6370 return VERR_PATCH_NOT_FOUND;
6371 }
6372
6373 /* Take care of dirty/changed instructions. */
6374 if (pPatchToGuestRec->fDirty)
6375 {
6376 Assert(pPatchToGuestRec->Core.Key == offset);
6377 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6378
6379 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6380 if (RT_SUCCESS(rc))
6381 {
6382 /* Retry the current instruction. */
6383 pNewEip = pEip;
6384 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6385 }
6386 else
6387 {
6388 /* Reset the PATM stack. */
6389 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6390
6391 rc = VINF_SUCCESS; /* Continue at original instruction. */
6392 }
6393
6394 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6395 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6396 return rc;
6397 }
6398
6399#ifdef VBOX_STRICT
6400 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6401 {
6402 DISCPUSTATE cpu;
6403 bool disret;
6404 uint32_t cbInstr;
6405 PATMP2GLOOKUPREC cacheRec;
6406 RT_ZERO(cacheRec);
6407 cacheRec.pPatch = &pPatch->patch;
6408
6409 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6410 &cpu, &cbInstr);
6411 if (cacheRec.Lock.pvMap)
6412 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6413
6414 if (disret && cpu.pCurInstr->opcode == OP_RETN)
6415 {
6416 RTRCPTR retaddr;
6417 PCPUMCTX pCtx2;
6418
6419 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6420
6421 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6422 AssertRC(rc);
6423
6424 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6425 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6426 }
6427 }
6428#endif
6429
6430 /* Return original address, correct by subtracting the CS base address. */
6431 *ppNewEip = pNewEip - SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), 0);
6432
6433 /* Reset the PATM stack. */
6434 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6435
6436 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6437 {
6438 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6439 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6440#ifdef VBOX_STRICT
6441 DISCPUSTATE cpu;
6442 bool disret;
6443 uint32_t cbInstr;
6444 PATMP2GLOOKUPREC cacheRec;
6445 RT_ZERO(cacheRec);
6446 cacheRec.pPatch = &pPatch->patch;
6447
6448 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
6449 &cpu, &cbInstr);
6450 if (cacheRec.Lock.pvMap)
6451 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6452
6453 if (disret && (cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_INT3))
6454 {
6455 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT;
6456 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
6457 &cpu, &cbInstr);
6458 if (cacheRec.Lock.pvMap)
6459 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6460
6461 Assert(cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_IRET);
6462 }
6463#endif
6464 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6465 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6466 }
6467
6468 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6469#ifdef LOG_ENABLED
6470 CPUMR3DisasmInstr(pVM, pVCpu, pCtx, pNewEip, "PATCHRET: ");
6471#endif
6472 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6473 {
6474 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6475 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6476 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6477 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6478 return VERR_PATCH_DISABLED;
6479 }
6480
6481#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6482 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6483 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6484 {
6485 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6486 //we are only wasting time, back out the patch
6487 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6488 pTrapRec->pNextPatchInstr = 0;
6489 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6490 return VERR_PATCH_DISABLED;
6491 }
6492#endif
6493
6494 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6495 return VINF_SUCCESS;
6496}
6497
6498
6499/**
6500 * Handle page-fault in monitored page
6501 *
6502 * @returns VBox status code.
6503 * @param pVM The VM to operate on.
6504 */
6505VMMR3DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6506{
6507 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6508
6509 addr &= PAGE_BASE_GC_MASK;
6510
6511 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6512 AssertRC(rc); NOREF(rc);
6513
6514 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6515 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6516 {
6517 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6518 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6519 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6520 if (rc == VWRN_PATCH_REMOVED)
6521 return VINF_SUCCESS;
6522
6523 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6524
6525 if (addr == pPatchRec->patch.pPrivInstrGC)
6526 addr++;
6527 }
6528
6529 for(;;)
6530 {
6531 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6532
6533 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6534 break;
6535
6536 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6537 {
6538 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6539 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6540 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6541 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6542 }
6543 addr = pPatchRec->patch.pPrivInstrGC + 1;
6544 }
6545
6546 pVM->patm.s.pvFaultMonitor = 0;
6547 return VINF_SUCCESS;
6548}
6549
6550
6551#ifdef VBOX_WITH_STATISTICS
6552
6553static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6554{
6555 if (pPatch->flags & PATMFL_SYSENTER)
6556 {
6557 return "SYSENT";
6558 }
6559 else
6560 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6561 {
6562 static char szTrap[16];
6563 uint32_t iGate;
6564
6565 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6566 if (iGate < 256)
6567 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6568 else
6569 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6570 return szTrap;
6571 }
6572 else
6573 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6574 return "DUPFUNC";
6575 else
6576 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6577 return "FUNCCALL";
6578 else
6579 if (pPatch->flags & PATMFL_TRAMPOLINE)
6580 return "TRAMP";
6581 else
6582 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6583}
6584
6585static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6586{
6587 NOREF(pVM);
6588 switch(pPatch->uState)
6589 {
6590 case PATCH_ENABLED:
6591 return "ENA";
6592 case PATCH_DISABLED:
6593 return "DIS";
6594 case PATCH_DIRTY:
6595 return "DIR";
6596 case PATCH_UNUSABLE:
6597 return "UNU";
6598 case PATCH_REFUSED:
6599 return "REF";
6600 case PATCH_DISABLE_PENDING:
6601 return "DIP";
6602 default:
6603 AssertFailed();
6604 return " ";
6605 }
6606}
6607
6608/**
6609 * Resets the sample.
6610 * @param pVM The VM handle.
6611 * @param pvSample The sample registered using STAMR3RegisterCallback.
6612 */
6613static void patmResetStat(PVM pVM, void *pvSample)
6614{
6615 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6616 Assert(pPatch);
6617
6618 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6619 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6620}
6621
6622/**
6623 * Prints the sample into the buffer.
6624 *
6625 * @param pVM The VM handle.
6626 * @param pvSample The sample registered using STAMR3RegisterCallback.
6627 * @param pszBuf The buffer to print into.
6628 * @param cchBuf The size of the buffer.
6629 */
6630static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6631{
6632 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6633 Assert(pPatch);
6634
6635 Assert(pPatch->uState != PATCH_REFUSED);
6636 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6637
6638 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6639 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6640 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6641}
6642
6643/**
6644 * Returns the GC address of the corresponding patch statistics counter
6645 *
6646 * @returns Stat address
6647 * @param pVM The VM to operate on.
6648 * @param pPatch Patch structure
6649 */
6650RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6651{
6652 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6653 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6654}
6655
6656#endif /* VBOX_WITH_STATISTICS */
6657
6658#ifdef VBOX_WITH_DEBUGGER
6659/**
6660 * The '.patmoff' command.
6661 *
6662 * @returns VBox status.
6663 * @param pCmd Pointer to the command descriptor (as registered).
6664 * @param pCmdHlp Pointer to command helper functions.
6665 * @param pVM Pointer to the current VM (if any).
6666 * @param paArgs Pointer to (readonly) array of arguments.
6667 * @param cArgs Number of arguments in the array.
6668 */
6669static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
6670{
6671 /*
6672 * Validate input.
6673 */
6674 NOREF(pCmd); NOREF(cArgs); NOREF(paArgs);
6675 if (!pVM)
6676 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6677
6678 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6679 PATMR3AllowPatching(pVM, false);
6680 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6681}
6682
6683/**
6684 * The '.patmon' command.
6685 *
6686 * @returns VBox status.
6687 * @param pCmd Pointer to the command descriptor (as registered).
6688 * @param pCmdHlp Pointer to command helper functions.
6689 * @param pVM Pointer to the current VM (if any).
6690 * @param paArgs Pointer to (readonly) array of arguments.
6691 * @param cArgs Number of arguments in the array.
6692 */
6693static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
6694{
6695 /*
6696 * Validate input.
6697 */
6698 NOREF(pCmd); NOREF(cArgs); NOREF(paArgs);
6699 if (!pVM)
6700 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6701
6702 PATMR3AllowPatching(pVM, true);
6703 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6704 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6705}
6706#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette