VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATM.cpp@ 41658

Last change on this file since 41658 was 41658, checked in by vboxsync, 12 years ago

DIS,VMM,REM,IPRT: Disassembler API adjustments.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 253.4 KB
Line 
1/* $Id: PATM.cpp 41658 2012-06-11 22:21:44Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * NOTE: Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/vmm/cpumdis.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/ssm.h>
33#include <VBox/vmm/trpm.h>
34#include <VBox/vmm/cfgm.h>
35#include <VBox/param.h>
36#include <VBox/vmm/selm.h>
37#include <iprt/avl.h>
38#include "PATMInternal.h"
39#include "PATMPatch.h"
40#include <VBox/vmm/vm.h>
41#include <VBox/vmm/csam.h>
42#include <VBox/dbg.h>
43#include <VBox/err.h>
44#include <VBox/log.h>
45#include <iprt/assert.h>
46#include <iprt/asm.h>
47#include <VBox/dis.h>
48#include <VBox/disopcode.h>
49#include "internal/pgm.h"
50
51#include <iprt/string.h>
52#include "PATMA.h"
53
54//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
55//#define PATM_DISABLE_ALL
56
57/**
58 * Refresh trampoline patch state.
59 */
60typedef struct PATMREFRESHPATCH
61{
62 /** Pointer to the VM structure. */
63 PVM pVM;
64 /** The trampoline patch record. */
65 PPATCHINFO pPatchTrampoline;
66 /** The new patch we want to jump to. */
67 PPATCHINFO pPatchRec;
68} PATMREFRESHPATCH, *PPATMREFRESHPATCH;
69
70
71/*******************************************************************************
72* Internal Functions *
73*******************************************************************************/
74
75static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
76static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
77static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
78
79#ifdef LOG_ENABLED // keep gcc quiet
80static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
81#endif
82#ifdef VBOX_WITH_STATISTICS
83static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
84static void patmResetStat(PVM pVM, void *pvSample);
85static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
86#endif
87
88#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
89#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
90
91static int patmReinit(PVM pVM);
92static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
93
94#ifdef VBOX_WITH_DEBUGGER
95static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
96static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
97static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
98
99/** Command descriptors. */
100static const DBGCCMD g_aCmds[] =
101{
102 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
103 { "patmon", 0, 0, NULL, 0, 0, patmr3CmdOn, "", "Enable patching." },
104 { "patmoff", 0, 0, NULL, 0, 0, patmr3CmdOff, "", "Disable patching." },
105};
106#endif
107
108/* Don't want to break saved states, so put it here as a global variable. */
109static unsigned int cIDTHandlersDisabled = 0;
110
111/**
112 * Initializes the PATM.
113 *
114 * @returns VBox status code.
115 * @param pVM The VM to operate on.
116 */
117VMMR3DECL(int) PATMR3Init(PVM pVM)
118{
119 int rc;
120
121 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
122
123 /* These values can't change as they are hardcoded in patch code (old saved states!) */
124 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
125 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
126 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
127 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
128
129 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
130 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
131
132 /* Allocate patch memory and GC patch state memory. */
133 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
134 /* Add another page in case the generated code is much larger than expected. */
135 /** @todo bad safety precaution */
136 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
137 if (RT_FAILURE(rc))
138 {
139 Log(("MMHyperAlloc failed with %Rrc\n", rc));
140 return rc;
141 }
142 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
143
144 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
145 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
146 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
147
148 /*
149 * Hypervisor memory for GC status data (read/write)
150 *
151 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
152 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
153 *
154 */
155 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /* Note: hardcoded dependencies on this exist. */
156 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
157 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
158
159 /* Hypervisor memory for patch statistics */
160 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
161 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
162
163 /* Memory for patch lookup trees. */
164 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
165 AssertRCReturn(rc, rc);
166 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
167
168#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
169 /* Check CFGM option. */
170 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
171 if (RT_FAILURE(rc))
172# ifdef PATM_DISABLE_ALL
173 pVM->fPATMEnabled = false;
174# else
175 pVM->fPATMEnabled = true;
176# endif
177#endif
178
179 rc = patmReinit(pVM);
180 AssertRC(rc);
181 if (RT_FAILURE(rc))
182 return rc;
183
184 /*
185 * Register save and load state notifiers.
186 */
187 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
188 NULL, NULL, NULL,
189 NULL, patmR3Save, NULL,
190 NULL, patmR3Load, NULL);
191 AssertRCReturn(rc, rc);
192
193#ifdef VBOX_WITH_DEBUGGER
194 /*
195 * Debugger commands.
196 */
197 static bool s_fRegisteredCmds = false;
198 if (!s_fRegisteredCmds)
199 {
200 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
201 if (RT_SUCCESS(rc2))
202 s_fRegisteredCmds = true;
203 }
204#endif
205
206#ifdef VBOX_WITH_STATISTICS
207 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
208 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
209 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
210 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
211 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
212 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
213 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
214 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
215
216 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
217 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
218
219 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
220 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
221 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
222
223 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
224 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
225 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
226 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
227 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
228
229 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
230 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
231
232 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
233 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
234
235 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
236 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
237 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
238
239 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
240 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
241 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
242
243 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
244 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
245
246 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
247 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
248 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
249 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
250
251 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
252 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
253
254 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
255 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
256
257 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
258 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
259 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
260
261 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
262 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
263 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
264 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
265
266 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
267 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
268 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
269 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
270 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
271
272 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
273#endif /* VBOX_WITH_STATISTICS */
274
275 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
276 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
277 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
278 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
279 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
280 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
281 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
282 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
283
284 return rc;
285}
286
287/**
288 * Finalizes HMA page attributes.
289 *
290 * @returns VBox status code.
291 * @param pVM The VM handle.
292 */
293VMMR3DECL(int) PATMR3InitFinalize(PVM pVM)
294{
295 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
296 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
297 if (RT_FAILURE(rc))
298 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
299
300 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
301 if (RT_FAILURE(rc))
302 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
303
304 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
305 if (RT_FAILURE(rc))
306 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
307
308 return rc;
309}
310
311/**
312 * (Re)initializes PATM
313 *
314 * @param pVM The VM.
315 */
316static int patmReinit(PVM pVM)
317{
318 int rc;
319
320 /*
321 * Assert alignment and sizes.
322 */
323 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
324 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
325
326 /*
327 * Setup any fixed pointers and offsets.
328 */
329 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
330
331#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
332#ifndef PATM_DISABLE_ALL
333 pVM->fPATMEnabled = true;
334#endif
335#endif
336
337 Assert(pVM->patm.s.pGCStateHC);
338 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
339 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
340
341 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
342 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
343
344 Assert(pVM->patm.s.pGCStackHC);
345 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
346 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
347 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
348 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
349
350 Assert(pVM->patm.s.pStatsHC);
351 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
352 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
353
354 Assert(pVM->patm.s.pPatchMemHC);
355 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
356 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
357 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
358
359 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
360 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
361
362 Assert(pVM->patm.s.PatchLookupTreeHC);
363 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
364
365 /*
366 * (Re)Initialize PATM structure
367 */
368 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
369 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
370 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
371 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
372 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
373 pVM->patm.s.pvFaultMonitor = 0;
374 pVM->patm.s.deltaReloc = 0;
375
376 /* Lowest and highest patched instruction */
377 pVM->patm.s.pPatchedInstrGCLowest = ~0;
378 pVM->patm.s.pPatchedInstrGCHighest = 0;
379
380 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
381 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
382 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
383
384 pVM->patm.s.pfnSysEnterPatchGC = 0;
385 pVM->patm.s.pfnSysEnterGC = 0;
386
387 pVM->patm.s.fOutOfMemory = false;
388
389 pVM->patm.s.pfnHelperCallGC = 0;
390
391 /* Generate all global functions to be used by future patches. */
392 /* We generate a fake patch in order to use the existing code for relocation. */
393 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
394 if (RT_FAILURE(rc))
395 {
396 Log(("Out of memory!!!!\n"));
397 return VERR_NO_MEMORY;
398 }
399 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
400 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
401 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
402
403 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
404 AssertRC(rc);
405
406 /* Update free pointer in patch memory. */
407 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
408 /* Round to next 8 byte boundary. */
409 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
410 return rc;
411}
412
413
414/**
415 * Applies relocations to data and code managed by this
416 * component. This function will be called at init and
417 * whenever the VMM need to relocate it self inside the GC.
418 *
419 * The PATM will update the addresses used by the switcher.
420 *
421 * @param pVM The VM.
422 */
423VMMR3DECL(void) PATMR3Relocate(PVM pVM)
424{
425 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
426 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
427
428 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
429 if (delta)
430 {
431 PCPUMCTX pCtx;
432
433 /* Update CPUMCTX guest context pointer. */
434 pVM->patm.s.pCPUMCtxGC += delta;
435
436 pVM->patm.s.deltaReloc = delta;
437
438 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
439
440 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
441
442 /* If we are running patch code right now, then also adjust EIP. */
443 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
444 pCtx->eip += delta;
445
446 pVM->patm.s.pGCStateGC = GCPtrNew;
447 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
448
449 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
450
451 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
452
453 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
454
455 if (pVM->patm.s.pfnSysEnterPatchGC)
456 pVM->patm.s.pfnSysEnterPatchGC += delta;
457
458 /* Deal with the global patch functions. */
459 pVM->patm.s.pfnHelperCallGC += delta;
460 pVM->patm.s.pfnHelperRetGC += delta;
461 pVM->patm.s.pfnHelperIretGC += delta;
462 pVM->patm.s.pfnHelperJumpGC += delta;
463
464 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
465 }
466}
467
468
469/**
470 * Terminates the PATM.
471 *
472 * Termination means cleaning up and freeing all resources,
473 * the VM it self is at this point powered off or suspended.
474 *
475 * @returns VBox status code.
476 * @param pVM The VM to operate on.
477 */
478VMMR3DECL(int) PATMR3Term(PVM pVM)
479{
480 /* Memory was all allocated from the two MM heaps and requires no freeing. */
481 NOREF(pVM);
482 return VINF_SUCCESS;
483}
484
485
486/**
487 * PATM reset callback.
488 *
489 * @returns VBox status code.
490 * @param pVM The VM which is reset.
491 */
492VMMR3DECL(int) PATMR3Reset(PVM pVM)
493{
494 Log(("PATMR3Reset\n"));
495
496 /* Free all patches. */
497 while (true)
498 {
499 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
500 if (pPatchRec)
501 {
502 PATMRemovePatch(pVM, pPatchRec, true);
503 }
504 else
505 break;
506 }
507 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
508 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
509 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
510 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
511
512 int rc = patmReinit(pVM);
513 if (RT_SUCCESS(rc))
514 rc = PATMR3InitFinalize(pVM); /* paranoia */
515
516 return rc;
517}
518
519DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDisState, uint8_t *pbDst, RTUINTPTR uSrcAddr, uint32_t cbToRead)
520{
521 PATMDISASM *pDisInfo = (PATMDISASM *)pDisState->apvUserData[0];
522 int orgsize = cbToRead;
523
524 Assert(cbToRead);
525 if (cbToRead == 0)
526 return VERR_INVALID_PARAMETER;
527
528 /*
529 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
530 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
531 */
532 /** @todo could change in the future! */
533 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
534 {
535 for (int i = 0; i < orgsize; i++)
536 {
537 int rc = PATMR3QueryOpcode(pDisInfo->pVM, (RTRCPTR)uSrcAddr, pbDst);
538 if (RT_FAILURE(rc))
539 break;
540 uSrcAddr++;
541 pbDst++;
542 cbToRead--;
543 }
544 if (cbToRead == 0)
545 return VINF_SUCCESS;
546#ifdef VBOX_STRICT
547 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
548 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
549 {
550 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, uSrcAddr, NULL) == false);
551 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, uSrcAddr+cbToRead-1, NULL) == false);
552 }
553#endif
554 }
555
556 if ( !pDisInfo->pInstrHC
557 || ( PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(uSrcAddr + cbToRead - 1)
558 && !PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr)))
559 {
560 Assert(!PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr));
561 return PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], pbDst, uSrcAddr, cbToRead);
562 }
563
564 Assert(pDisInfo->pInstrHC);
565
566 uint8_t *pInstrHC = pDisInfo->pInstrHC;
567
568 Assert(pInstrHC);
569
570 /* pInstrHC is the base address; adjust according to the GC pointer. */
571 pInstrHC = pInstrHC + (uSrcAddr - pDisInfo->pInstrGC);
572
573 memcpy(pbDst, (void *)pInstrHC, cbToRead);
574
575 return VINF_SUCCESS;
576}
577
578/**
579 * Callback function for RTAvloU32DoWithAll
580 *
581 * Updates all fixups in the patches
582 *
583 * @returns VBox status code.
584 * @param pNode Current node
585 * @param pParam The VM to operate on.
586 */
587static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
588{
589 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
590 PVM pVM = (PVM)pParam;
591 RTRCINTPTR delta;
592#ifdef LOG_ENABLED
593 DISCPUSTATE cpu;
594 char szOutput[256];
595 uint32_t opsize;
596 bool disret;
597#endif
598 int rc;
599
600 /* Nothing to do if the patch is not active. */
601 if (pPatch->patch.uState == PATCH_REFUSED)
602 return 0;
603
604#ifdef LOG_ENABLED
605 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
606 {
607 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
608 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
609 Log(("Org patch jump: %s", szOutput));
610 }
611#endif
612
613 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
614 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
615
616 /*
617 * Apply fixups
618 */
619 PRELOCREC pRec = 0;
620 AVLPVKEY key = 0;
621
622 while (true)
623 {
624 /* Get the record that's closest from above */
625 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
626 if (pRec == 0)
627 break;
628
629 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
630
631 switch (pRec->uType)
632 {
633 case FIXUP_ABSOLUTE:
634 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
635 if ( !pRec->pSource
636 || PATMIsPatchGCAddr(pVM, pRec->pSource))
637 {
638 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
639 }
640 else
641 {
642 uint8_t curInstr[15];
643 uint8_t oldInstr[15];
644 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
645
646 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
647
648 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
649 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
650
651 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
652 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
653
654 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
655
656 if ( rc == VERR_PAGE_NOT_PRESENT
657 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
658 {
659 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
660
661 Log(("PATM: Patch page not present -> check later!\n"));
662 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
663 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
664 }
665 else
666 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
667 {
668 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
669 /*
670 * Disable patch; this is not a good solution
671 */
672 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
673 pPatch->patch.uState = PATCH_DISABLED;
674 }
675 else
676 if (RT_SUCCESS(rc))
677 {
678 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
679 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
680 AssertRC(rc);
681 }
682 }
683 break;
684
685 case FIXUP_REL_JMPTOPATCH:
686 {
687 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
688
689 if ( pPatch->patch.uState == PATCH_ENABLED
690 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
691 {
692 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
693 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
694 RTRCPTR pJumpOffGC;
695 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
696 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
697
698#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
699 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
700#else
701 Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
702#endif
703
704 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
705#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
706 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
707 {
708 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
709
710 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
711 oldJump[0] = pPatch->patch.aPrivInstr[0];
712 oldJump[1] = pPatch->patch.aPrivInstr[1];
713 *(RTRCUINTPTR *)&oldJump[2] = displOld;
714 }
715 else
716#endif
717 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
718 {
719 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
720 oldJump[0] = 0xE9;
721 *(RTRCUINTPTR *)&oldJump[1] = displOld;
722 }
723 else
724 {
725 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
726 continue; //this should never happen!!
727 }
728 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
729
730 /*
731 * Read old patch jump and compare it to the one we previously installed
732 */
733 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
734 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
735
736 if ( rc == VERR_PAGE_NOT_PRESENT
737 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
738 {
739 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
740
741 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
742 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
743 }
744 else
745 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
746 {
747 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
748 /*
749 * Disable patch; this is not a good solution
750 */
751 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
752 pPatch->patch.uState = PATCH_DISABLED;
753 }
754 else
755 if (RT_SUCCESS(rc))
756 {
757 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
758 AssertRC(rc);
759 }
760 else
761 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
762 }
763 else
764 Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
765
766 pRec->pDest = pTarget;
767 break;
768 }
769
770 case FIXUP_REL_JMPTOGUEST:
771 {
772 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
773 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
774
775 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
776 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
777 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
778 pRec->pSource = pSource;
779 break;
780 }
781
782 default:
783 AssertMsg(0, ("Invalid fixup type!!\n"));
784 return VERR_INVALID_PARAMETER;
785 }
786 }
787
788#ifdef LOG_ENABLED
789 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
790 {
791 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
792 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
793 Log(("Rel patch jump: %s", szOutput));
794 }
795#endif
796 return 0;
797}
798
799/**
800 * \#PF Handler callback for virtual access handler ranges.
801 *
802 * Important to realize that a physical page in a range can have aliases, and
803 * for ALL and WRITE handlers these will also trigger.
804 *
805 * @returns VINF_SUCCESS if the handler have carried out the operation.
806 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
807 * @param pVM VM Handle.
808 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
809 * @param pvPtr The HC mapping of that address.
810 * @param pvBuf What the guest is reading/writing.
811 * @param cbBuf How much it's reading/writing.
812 * @param enmAccessType The access type.
813 * @param pvUser User argument.
814 */
815DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
816 PGMACCESSTYPE enmAccessType, void *pvUser)
817{
818 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
819 NOREF(pvPtr); NOREF(pvBuf); NOREF(cbBuf); NOREF(pvUser);
820
821 /** @todo could be the wrong virtual address (alias) */
822 pVM->patm.s.pvFaultMonitor = GCPtr;
823 PATMR3HandleMonitoredPage(pVM);
824 return VINF_PGM_HANDLER_DO_DEFAULT;
825}
826
827
828#ifdef VBOX_WITH_DEBUGGER
829/**
830 * Callback function for RTAvloU32DoWithAll
831 *
832 * Enables the patch that's being enumerated
833 *
834 * @returns 0 (continue enumeration).
835 * @param pNode Current node
836 * @param pVM The VM to operate on.
837 */
838static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
839{
840 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
841
842 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
843 return 0;
844}
845#endif /* VBOX_WITH_DEBUGGER */
846
847
848#ifdef VBOX_WITH_DEBUGGER
849/**
850 * Callback function for RTAvloU32DoWithAll
851 *
852 * Disables the patch that's being enumerated
853 *
854 * @returns 0 (continue enumeration).
855 * @param pNode Current node
856 * @param pVM The VM to operate on.
857 */
858static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
859{
860 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
861
862 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
863 return 0;
864}
865#endif
866
867/**
868 * Returns the host context pointer and size of the patch memory block
869 *
870 * @returns VBox status code.
871 * @param pVM The VM to operate on.
872 * @param pcb Size of the patch memory block
873 */
874VMMR3DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
875{
876 if (pcb)
877 *pcb = pVM->patm.s.cbPatchMem;
878
879 return pVM->patm.s.pPatchMemHC;
880}
881
882
883/**
884 * Returns the guest context pointer and size of the patch memory block
885 *
886 * @returns VBox status code.
887 * @param pVM The VM to operate on.
888 * @param pcb Size of the patch memory block
889 */
890VMMR3DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
891{
892 if (pcb)
893 *pcb = pVM->patm.s.cbPatchMem;
894
895 return pVM->patm.s.pPatchMemGC;
896}
897
898
899/**
900 * Returns the host context pointer of the GC context structure
901 *
902 * @returns VBox status code.
903 * @param pVM The VM to operate on.
904 */
905VMMR3DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
906{
907 return pVM->patm.s.pGCStateHC;
908}
909
910
911/**
912 * Checks whether the HC address is part of our patch region
913 *
914 * @returns VBox status code.
915 * @param pVM The VM to operate on.
916 * @param pAddrGC Guest context address
917 */
918VMMR3DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, R3PTRTYPE(uint8_t *) pAddrHC)
919{
920 return (pAddrHC >= pVM->patm.s.pPatchMemHC && pAddrHC < pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) ? true : false;
921}
922
923
924/**
925 * Allows or disallow patching of privileged instructions executed by the guest OS
926 *
927 * @returns VBox status code.
928 * @param pVM The VM to operate on.
929 * @param fAllowPatching Allow/disallow patching
930 */
931VMMR3DECL(int) PATMR3AllowPatching(PVM pVM, uint32_t fAllowPatching)
932{
933 pVM->fPATMEnabled = (fAllowPatching) ? true : false;
934 return VINF_SUCCESS;
935}
936
937/**
938 * Convert a GC patch block pointer to a HC patch pointer
939 *
940 * @returns HC pointer or NULL if it's not a GC patch pointer
941 * @param pVM The VM to operate on.
942 * @param pAddrGC GC pointer
943 */
944VMMR3DECL(R3PTRTYPE(void *)) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
945{
946 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
947 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
948 else
949 return NULL;
950}
951
952/**
953 * Query PATM state (enabled/disabled)
954 *
955 * @returns 0 - disabled, 1 - enabled
956 * @param pVM The VM to operate on.
957 */
958VMMR3DECL(int) PATMR3IsEnabled(PVM pVM)
959{
960 return pVM->fPATMEnabled;
961}
962
963
964/**
965 * Convert guest context address to host context pointer
966 *
967 * @returns VBox status code.
968 * @param pVM The VM to operate on.
969 * @param pCacheRec Address conversion cache record
970 * @param pGCPtr Guest context pointer
971 *
972 * @returns Host context pointer or NULL in case of an error
973 *
974 */
975R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
976{
977 int rc;
978 R3PTRTYPE(uint8_t *) pHCPtr;
979 uint32_t offset;
980
981 if (PATMIsPatchGCAddr(pVM, pGCPtr))
982 {
983 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
984 Assert(pPatch);
985 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
986 }
987
988 offset = pGCPtr & PAGE_OFFSET_MASK;
989 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
990 return pCacheRec->pPageLocStartHC + offset;
991
992 /* Release previous lock if any. */
993 if (pCacheRec->Lock.pvMap)
994 {
995 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
996 pCacheRec->Lock.pvMap = NULL;
997 }
998
999 rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
1000 if (rc != VINF_SUCCESS)
1001 {
1002 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1003 return NULL;
1004 }
1005 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1006 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1007 return pHCPtr;
1008}
1009
1010
1011/* Calculates and fills in all branch targets
1012 *
1013 * @returns VBox status code.
1014 * @param pVM The VM to operate on.
1015 * @param pPatch Current patch block pointer
1016 *
1017 */
1018static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1019{
1020 int32_t displ;
1021
1022 PJUMPREC pRec = 0;
1023 unsigned nrJumpRecs = 0;
1024
1025 /*
1026 * Set all branch targets inside the patch block.
1027 * We remove all jump records as they are no longer needed afterwards.
1028 */
1029 while (true)
1030 {
1031 RCPTRTYPE(uint8_t *) pInstrGC;
1032 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1033
1034 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1035 if (pRec == 0)
1036 break;
1037
1038 nrJumpRecs++;
1039
1040 /* HC in patch block to GC in patch block. */
1041 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1042
1043 if (pRec->opcode == OP_CALL)
1044 {
1045 /* Special case: call function replacement patch from this patch block.
1046 */
1047 PPATMPATCHREC pFunctionRec = PATMQueryFunctionPatch(pVM, pRec->pTargetGC);
1048 if (!pFunctionRec)
1049 {
1050 int rc;
1051
1052 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1053 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1054 else
1055 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1056
1057 if (RT_FAILURE(rc))
1058 {
1059 uint8_t *pPatchHC;
1060 RTRCPTR pPatchGC;
1061 RTRCPTR pOrgInstrGC;
1062
1063 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1064 Assert(pOrgInstrGC);
1065
1066 /* Failure for some reason -> mark exit point with int 3. */
1067 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1068
1069 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1070 Assert(pPatchGC);
1071
1072 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1073
1074 /* Set a breakpoint at the very beginning of the recompiled instruction */
1075 *pPatchHC = 0xCC;
1076
1077 continue;
1078 }
1079 }
1080 else
1081 {
1082 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1083 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1084 }
1085
1086 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1087 }
1088 else
1089 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1090
1091 if (pBranchTargetGC == 0)
1092 {
1093 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1094 return VERR_PATCHING_REFUSED;
1095 }
1096 /* Our jumps *always* have a dword displacement (to make things easier). */
1097 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1098 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1099 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1100 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1101 }
1102 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1103 Assert(pPatch->JumpTree == 0);
1104 return VINF_SUCCESS;
1105}
1106
1107/* Add an illegal instruction record
1108 *
1109 * @param pVM The VM to operate on.
1110 * @param pPatch Patch structure ptr
1111 * @param pInstrGC Guest context pointer to privileged instruction
1112 *
1113 */
1114static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1115{
1116 PAVLPVNODECORE pRec;
1117
1118 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1119 Assert(pRec);
1120 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
1121
1122 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1123 Assert(ret); NOREF(ret);
1124 pPatch->pTempInfo->nrIllegalInstr++;
1125}
1126
1127static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1128{
1129 PAVLPVNODECORE pRec;
1130
1131 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1132 if (pRec)
1133 return true;
1134 else
1135 return false;
1136}
1137
1138/**
1139 * Add a patch to guest lookup record
1140 *
1141 * @param pVM The VM to operate on.
1142 * @param pPatch Patch structure ptr
1143 * @param pPatchInstrHC Guest context pointer to patch block
1144 * @param pInstrGC Guest context pointer to privileged instruction
1145 * @param enmType Lookup type
1146 * @param fDirty Dirty flag
1147 *
1148 */
1149 /** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
1150void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1151{
1152 bool ret;
1153 PRECPATCHTOGUEST pPatchToGuestRec;
1154 PRECGUESTTOPATCH pGuestToPatchRec;
1155 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1156
1157 LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
1158 pVM, pPatch, pPatchInstrHC, pInstrGC, enmType, fDirty));
1159
1160 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1161 {
1162 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1163 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1164 return; /* already there */
1165
1166 Assert(!pPatchToGuestRec);
1167 }
1168#ifdef VBOX_STRICT
1169 else
1170 {
1171 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1172 Assert(!pPatchToGuestRec);
1173 }
1174#endif
1175
1176 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1177 Assert(pPatchToGuestRec);
1178 pPatchToGuestRec->Core.Key = PatchOffset;
1179 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1180 pPatchToGuestRec->enmType = enmType;
1181 pPatchToGuestRec->fDirty = fDirty;
1182
1183 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1184 Assert(ret);
1185
1186 /* GC to patch address */
1187 if (enmType == PATM_LOOKUP_BOTHDIR)
1188 {
1189 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1190 if (!pGuestToPatchRec)
1191 {
1192 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1193 pGuestToPatchRec->Core.Key = pInstrGC;
1194 pGuestToPatchRec->PatchOffset = PatchOffset;
1195
1196 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1197 Assert(ret);
1198 }
1199 }
1200
1201 pPatch->nrPatch2GuestRecs++;
1202}
1203
1204
1205/**
1206 * Removes a patch to guest lookup record
1207 *
1208 * @param pVM The VM to operate on.
1209 * @param pPatch Patch structure ptr
1210 * @param pPatchInstrGC Guest context pointer to patch block
1211 */
1212void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1213{
1214 PAVLU32NODECORE pNode;
1215 PAVLU32NODECORE pNode2;
1216 PRECPATCHTOGUEST pPatchToGuestRec;
1217 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1218
1219 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1220 Assert(pPatchToGuestRec);
1221 if (pPatchToGuestRec)
1222 {
1223 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1224 {
1225 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1226
1227 Assert(pGuestToPatchRec->Core.Key);
1228 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1229 Assert(pNode2);
1230 }
1231 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1232 Assert(pNode);
1233
1234 MMR3HeapFree(pPatchToGuestRec);
1235 pPatch->nrPatch2GuestRecs--;
1236 }
1237}
1238
1239
1240/**
1241 * RTAvlPVDestroy callback.
1242 */
1243static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1244{
1245 MMR3HeapFree(pNode);
1246 return 0;
1247}
1248
1249/**
1250 * Empty the specified tree (PV tree, MMR3 heap)
1251 *
1252 * @param pVM The VM to operate on.
1253 * @param ppTree Tree to empty
1254 */
1255void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1256{
1257 NOREF(pVM);
1258 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1259}
1260
1261
1262/**
1263 * RTAvlU32Destroy callback.
1264 */
1265static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1266{
1267 MMR3HeapFree(pNode);
1268 return 0;
1269}
1270
1271/**
1272 * Empty the specified tree (U32 tree, MMR3 heap)
1273 *
1274 * @param pVM The VM to operate on.
1275 * @param ppTree Tree to empty
1276 */
1277void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1278{
1279 NOREF(pVM);
1280 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1281}
1282
1283
1284/**
1285 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1286 *
1287 * @returns VBox status code.
1288 * @param pVM The VM to operate on.
1289 * @param pCpu CPU disassembly state
1290 * @param pInstrGC Guest context pointer to privileged instruction
1291 * @param pCurInstrGC Guest context pointer to the current instruction
1292 * @param pCacheRec Cache record ptr
1293 *
1294 */
1295static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1296{
1297 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1298 bool fIllegalInstr = false;
1299
1300 /*
1301 * Preliminary heuristics:
1302 *- no call instructions without a fixed displacement between cli and sti/popf
1303 *- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1304 *- no nested pushf/cli
1305 *- sti/popf should be the (eventual) target of all branches
1306 *- no near or far returns; no int xx, no into
1307 *
1308 * Note: Later on we can impose less stricter guidelines if the need arises
1309 */
1310
1311 /* Bail out if the patch gets too big. */
1312 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1313 {
1314 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1315 fIllegalInstr = true;
1316 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1317 }
1318 else
1319 {
1320 /* No unconditional jumps or calls without fixed displacements. */
1321 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1322 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1323 )
1324 {
1325 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1326 if ( pCpu->param1.size == 6 /* far call/jmp */
1327 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1328 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1329 )
1330 {
1331 fIllegalInstr = true;
1332 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1333 }
1334 }
1335
1336 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1337 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->opcode == OP_JMP)
1338 {
1339 if ( pCurInstrGC > pPatch->pPrivInstrGC
1340 && pCurInstrGC + pCpu->opsize < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1341 {
1342 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1343 /* We turn this one into a int 3 callable patch. */
1344 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1345 }
1346 }
1347 else
1348 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1349 if (pPatch->opcode == OP_PUSHF)
1350 {
1351 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->opcode == OP_PUSHF)
1352 {
1353 fIllegalInstr = true;
1354 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1355 }
1356 }
1357
1358 /* no far returns */
1359 if (pCpu->pCurInstr->opcode == OP_RETF)
1360 {
1361 pPatch->pTempInfo->nrRetInstr++;
1362 fIllegalInstr = true;
1363 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1364 }
1365 else if ( pCpu->pCurInstr->opcode == OP_INT3
1366 || pCpu->pCurInstr->opcode == OP_INT
1367 || pCpu->pCurInstr->opcode == OP_INTO)
1368 {
1369 /* No int xx or into either. */
1370 fIllegalInstr = true;
1371 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1372 }
1373 }
1374
1375 pPatch->cbPatchBlockSize += pCpu->opsize;
1376
1377 /* Illegal instruction -> end of analysis phase for this code block */
1378 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1379 return VINF_SUCCESS;
1380
1381 /* Check for exit points. */
1382 switch (pCpu->pCurInstr->opcode)
1383 {
1384 case OP_SYSEXIT:
1385 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1386
1387 case OP_SYSENTER:
1388 case OP_ILLUD2:
1389 /* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
1390 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1391 return VINF_SUCCESS;
1392
1393 case OP_STI:
1394 case OP_POPF:
1395 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1396 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1397 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1398 {
1399 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1400 return VERR_PATCHING_REFUSED;
1401 }
1402 if (pPatch->opcode == OP_PUSHF)
1403 {
1404 if (pCpu->pCurInstr->opcode == OP_POPF)
1405 {
1406 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1407 return VINF_SUCCESS;
1408
1409 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1410 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1411 pPatch->flags |= PATMFL_CHECK_SIZE;
1412 }
1413 break; /* sti doesn't mark the end of a pushf block; only popf does. */
1414 }
1415 /* else: fall through. */
1416 case OP_RETN: /* exit point for function replacement */
1417 return VINF_SUCCESS;
1418
1419 case OP_IRET:
1420 return VINF_SUCCESS; /* exitpoint */
1421
1422 case OP_CPUID:
1423 case OP_CALL:
1424 case OP_JMP:
1425 break;
1426
1427 default:
1428 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1429 {
1430 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1431 return VINF_SUCCESS; /* exit point */
1432 }
1433 break;
1434 }
1435
1436 /* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
1437 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW))
1438 {
1439 /* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
1440 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->opsize));
1441 return VINF_SUCCESS;
1442 }
1443
1444 return VWRN_CONTINUE_ANALYSIS;
1445}
1446
1447/**
1448 * Analyses the instructions inside a function for compliance
1449 *
1450 * @returns VBox status code.
1451 * @param pVM The VM to operate on.
1452 * @param pCpu CPU disassembly state
1453 * @param pInstrGC Guest context pointer to privileged instruction
1454 * @param pCurInstrGC Guest context pointer to the current instruction
1455 * @param pCacheRec Cache record ptr
1456 *
1457 */
1458static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1459{
1460 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1461 bool fIllegalInstr = false;
1462 NOREF(pInstrGC);
1463
1464 //Preliminary heuristics:
1465 //- no call instructions
1466 //- ret ends a block
1467
1468 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1469
1470 // bail out if the patch gets too big
1471 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1472 {
1473 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1474 fIllegalInstr = true;
1475 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1476 }
1477 else
1478 {
1479 // no unconditional jumps or calls without fixed displacements
1480 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1481 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1482 )
1483 {
1484 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1485 if ( pCpu->param1.size == 6 /* far call/jmp */
1486 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1487 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1488 )
1489 {
1490 fIllegalInstr = true;
1491 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1492 }
1493 }
1494 else /* no far returns */
1495 if (pCpu->pCurInstr->opcode == OP_RETF)
1496 {
1497 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1498 fIllegalInstr = true;
1499 }
1500 else /* no int xx or into either */
1501 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1502 {
1503 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1504 fIllegalInstr = true;
1505 }
1506
1507 #if 0
1508 ///@todo we can handle certain in/out and privileged instructions in the guest context
1509 if (pCpu->pCurInstr->optype & OPTYPE_PRIVILEGED && pCpu->pCurInstr->opcode != OP_STI)
1510 {
1511 Log(("Illegal instructions for function patch!!\n"));
1512 return VERR_PATCHING_REFUSED;
1513 }
1514 #endif
1515 }
1516
1517 pPatch->cbPatchBlockSize += pCpu->opsize;
1518
1519 /* Illegal instruction -> end of analysis phase for this code block */
1520 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1521 {
1522 return VINF_SUCCESS;
1523 }
1524
1525 // Check for exit points
1526 switch (pCpu->pCurInstr->opcode)
1527 {
1528 case OP_ILLUD2:
1529 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1530 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1531 return VINF_SUCCESS;
1532
1533 case OP_IRET:
1534 case OP_SYSEXIT: /* will fault or emulated in GC */
1535 case OP_RETN:
1536 return VINF_SUCCESS;
1537
1538 case OP_POPF:
1539 case OP_STI:
1540 return VWRN_CONTINUE_ANALYSIS;
1541 default:
1542 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1543 {
1544 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1545 return VINF_SUCCESS; /* exit point */
1546 }
1547 return VWRN_CONTINUE_ANALYSIS;
1548 }
1549
1550 return VWRN_CONTINUE_ANALYSIS;
1551}
1552
1553/**
1554 * Recompiles the instructions in a code block
1555 *
1556 * @returns VBox status code.
1557 * @param pVM The VM to operate on.
1558 * @param pCpu CPU disassembly state
1559 * @param pInstrGC Guest context pointer to privileged instruction
1560 * @param pCurInstrGC Guest context pointer to the current instruction
1561 * @param pCacheRec Cache record ptr
1562 *
1563 */
1564static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
1565{
1566 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
1567 int rc = VINF_SUCCESS;
1568 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1569
1570 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1571
1572 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1573 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1574 {
1575 /*
1576 * Been there, done that; so insert a jump (we don't want to duplicate code)
1577 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1578 */
1579 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1580 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->prefix & PREFIX_OPSIZE));
1581 }
1582
1583 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1584 {
1585 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1586 }
1587 else
1588 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pCacheRec);
1589
1590 if (RT_FAILURE(rc))
1591 return rc;
1592
1593 /* Note: Never do a direct return unless a failure is encountered! */
1594
1595 /* Clear recompilation of next instruction flag; we are doing that right here. */
1596 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1597 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1598
1599 /* Add lookup record for patch to guest address translation */
1600 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1601
1602 /* Update lowest and highest instruction address for this patch */
1603 if (pCurInstrGC < pPatch->pInstrGCLowest)
1604 pPatch->pInstrGCLowest = pCurInstrGC;
1605 else
1606 if (pCurInstrGC > pPatch->pInstrGCHighest)
1607 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->opsize;
1608
1609 /* Illegal instruction -> end of recompile phase for this code block. */
1610 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1611 {
1612 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1613 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1614 goto end;
1615 }
1616
1617 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1618 * Indirect calls are handled below.
1619 */
1620 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1621 && (pCpu->pCurInstr->opcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1622 && (OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J))
1623 {
1624 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1625 if (pTargetGC == 0)
1626 {
1627 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
1628 return VERR_PATCHING_REFUSED;
1629 }
1630
1631 if (pCpu->pCurInstr->opcode == OP_CALL)
1632 {
1633 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1634 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1635 if (RT_FAILURE(rc))
1636 goto end;
1637 }
1638 else
1639 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->opcode, !!(pCpu->prefix & PREFIX_OPSIZE));
1640
1641 if (RT_SUCCESS(rc))
1642 rc = VWRN_CONTINUE_RECOMPILE;
1643
1644 goto end;
1645 }
1646
1647 switch (pCpu->pCurInstr->opcode)
1648 {
1649 case OP_CLI:
1650 {
1651 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1652 * until we've found the proper exit point(s).
1653 */
1654 if ( pCurInstrGC != pInstrGC
1655 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1656 )
1657 {
1658 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1659 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1660 }
1661 /* Set by irq inhibition; no longer valid now. */
1662 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1663
1664 rc = patmPatchGenCli(pVM, pPatch);
1665 if (RT_SUCCESS(rc))
1666 rc = VWRN_CONTINUE_RECOMPILE;
1667 break;
1668 }
1669
1670 case OP_MOV:
1671 if (pCpu->pCurInstr->optype & OPTYPE_POTENTIALLY_DANGEROUS)
1672 {
1673 /* mov ss, src? */
1674 if ( (pCpu->param1.flags & USE_REG_SEG)
1675 && (pCpu->param1.base.reg_seg == DIS_SELREG_SS))
1676 {
1677 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1678 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1679 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1680 }
1681#if 0 /* necessary for Haiku */
1682 else
1683 if ( (pCpu->param2.flags & USE_REG_SEG)
1684 && (pCpu->param2.base.reg_seg == USE_REG_SS)
1685 && (pCpu->param1.flags & (USE_REG_GEN32|USE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1686 {
1687 /* mov GPR, ss */
1688 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1689 if (RT_SUCCESS(rc))
1690 rc = VWRN_CONTINUE_RECOMPILE;
1691 break;
1692 }
1693#endif
1694 }
1695 goto duplicate_instr;
1696
1697 case OP_POP:
1698 if (pCpu->pCurInstr->param1 == OP_PARM_REG_SS)
1699 {
1700 Assert(pCpu->pCurInstr->optype & OPTYPE_INHIBIT_IRQS);
1701
1702 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1703 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1704 }
1705 goto duplicate_instr;
1706
1707 case OP_STI:
1708 {
1709 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1710
1711 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1712 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1713 {
1714 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1715 fInhibitIRQInstr = true;
1716 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1717 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1718 }
1719 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1720
1721 if (RT_SUCCESS(rc))
1722 {
1723 DISCPUSTATE cpu = *pCpu;
1724 unsigned opsize;
1725 int disret;
1726 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1727
1728 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1729
1730 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1731 { /* Force pNextInstrHC out of scope after using it */
1732 uint8_t *pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
1733 if (pNextInstrHC == NULL)
1734 {
1735 AssertFailed();
1736 return VERR_PATCHING_REFUSED;
1737 }
1738
1739 // Disassemble the next instruction
1740 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pNextInstrGC, pNextInstrHC, &opsize, NULL);
1741 }
1742 if (disret == false)
1743 {
1744 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1745 return VERR_PATCHING_REFUSED;
1746 }
1747 pReturnInstrGC = pNextInstrGC + opsize;
1748
1749 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1750 || pReturnInstrGC <= pInstrGC
1751 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1752 )
1753 {
1754 /* Not an exit point for function duplication patches */
1755 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1756 && RT_SUCCESS(rc))
1757 {
1758 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1759 rc = VWRN_CONTINUE_RECOMPILE;
1760 }
1761 else
1762 rc = VINF_SUCCESS; //exit point
1763 }
1764 else {
1765 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1766 rc = VERR_PATCHING_REFUSED; //not allowed!!
1767 }
1768 }
1769 break;
1770 }
1771
1772 case OP_POPF:
1773 {
1774 bool fGenerateJmpBack = (pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32);
1775
1776 /* Not an exit point for IDT handler or function replacement patches */
1777 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1778 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1779 fGenerateJmpBack = false;
1780
1781 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->opsize, !!(pCpu->prefix & PREFIX_OPSIZE), fGenerateJmpBack);
1782 if (RT_SUCCESS(rc))
1783 {
1784 if (fGenerateJmpBack == false)
1785 {
1786 /* Not an exit point for IDT handler or function replacement patches */
1787 rc = VWRN_CONTINUE_RECOMPILE;
1788 }
1789 else
1790 {
1791 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1792 rc = VINF_SUCCESS; /* exit point! */
1793 }
1794 }
1795 break;
1796 }
1797
1798 case OP_PUSHF:
1799 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->prefix & PREFIX_OPSIZE));
1800 if (RT_SUCCESS(rc))
1801 rc = VWRN_CONTINUE_RECOMPILE;
1802 break;
1803
1804 case OP_PUSH:
1805 if (pCpu->pCurInstr->param1 == OP_PARM_REG_CS)
1806 {
1807 rc = patmPatchGenPushCS(pVM, pPatch);
1808 if (RT_SUCCESS(rc))
1809 rc = VWRN_CONTINUE_RECOMPILE;
1810 break;
1811 }
1812 goto duplicate_instr;
1813
1814 case OP_IRET:
1815 Log(("IRET at %RRv\n", pCurInstrGC));
1816 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->prefix & PREFIX_OPSIZE));
1817 if (RT_SUCCESS(rc))
1818 {
1819 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1820 rc = VINF_SUCCESS; /* exit point by definition */
1821 }
1822 break;
1823
1824 case OP_ILLUD2:
1825 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1826 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1827 if (RT_SUCCESS(rc))
1828 rc = VINF_SUCCESS; /* exit point by definition */
1829 Log(("Illegal opcode (0xf 0xb)\n"));
1830 break;
1831
1832 case OP_CPUID:
1833 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1834 if (RT_SUCCESS(rc))
1835 rc = VWRN_CONTINUE_RECOMPILE;
1836 break;
1837
1838 case OP_STR:
1839 case OP_SLDT:
1840 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1841 if (RT_SUCCESS(rc))
1842 rc = VWRN_CONTINUE_RECOMPILE;
1843 break;
1844
1845 case OP_SGDT:
1846 case OP_SIDT:
1847 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1848 if (RT_SUCCESS(rc))
1849 rc = VWRN_CONTINUE_RECOMPILE;
1850 break;
1851
1852 case OP_RETN:
1853 /* retn is an exit point for function patches */
1854 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
1855 if (RT_SUCCESS(rc))
1856 rc = VINF_SUCCESS; /* exit point by definition */
1857 break;
1858
1859 case OP_SYSEXIT:
1860 /* Duplicate it, so it can be emulated in GC (or fault). */
1861 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1862 if (RT_SUCCESS(rc))
1863 rc = VINF_SUCCESS; /* exit point by definition */
1864 break;
1865
1866 case OP_CALL:
1867 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1868 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1869 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1870 */
1871 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1872 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far calls! */)
1873 {
1874 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
1875 if (RT_SUCCESS(rc))
1876 {
1877 rc = VWRN_CONTINUE_RECOMPILE;
1878 }
1879 break;
1880 }
1881 goto gen_illegal_instr;
1882
1883 case OP_JMP:
1884 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1885 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1886 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1887 */
1888 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1889 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far jumps! */)
1890 {
1891 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
1892 if (RT_SUCCESS(rc))
1893 rc = VINF_SUCCESS; /* end of branch */
1894 break;
1895 }
1896 goto gen_illegal_instr;
1897
1898 case OP_INT3:
1899 case OP_INT:
1900 case OP_INTO:
1901 goto gen_illegal_instr;
1902
1903 case OP_MOV_DR:
1904 /* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1905 if (pCpu->pCurInstr->param2 == OP_PARM_Dd)
1906 {
1907 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
1908 if (RT_SUCCESS(rc))
1909 rc = VWRN_CONTINUE_RECOMPILE;
1910 break;
1911 }
1912 goto duplicate_instr;
1913
1914 case OP_MOV_CR:
1915 /* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1916 if (pCpu->pCurInstr->param2 == OP_PARM_Cd)
1917 {
1918 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
1919 if (RT_SUCCESS(rc))
1920 rc = VWRN_CONTINUE_RECOMPILE;
1921 break;
1922 }
1923 goto duplicate_instr;
1924
1925 default:
1926 if (pCpu->pCurInstr->optype & (OPTYPE_CONTROLFLOW | OPTYPE_PRIVILEGED_NOTRAP))
1927 {
1928gen_illegal_instr:
1929 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1930 if (RT_SUCCESS(rc))
1931 rc = VINF_SUCCESS; /* exit point by definition */
1932 }
1933 else
1934 {
1935duplicate_instr:
1936 Log(("patmPatchGenDuplicate\n"));
1937 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1938 if (RT_SUCCESS(rc))
1939 rc = VWRN_CONTINUE_RECOMPILE;
1940 }
1941 break;
1942 }
1943
1944end:
1945
1946 if ( !fInhibitIRQInstr
1947 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
1948 {
1949 int rc2;
1950 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1951
1952 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
1953 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
1954 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
1955 {
1956 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
1957
1958 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
1959 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1960 rc = VINF_SUCCESS; /* end of the line */
1961 }
1962 else
1963 {
1964 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
1965 }
1966 if (RT_FAILURE(rc2))
1967 rc = rc2;
1968 }
1969
1970 if (RT_SUCCESS(rc))
1971 {
1972 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1973 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
1974 && pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32
1975 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
1976 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
1977 )
1978 {
1979 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1980
1981 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1982 Log(("patmRecompileCallback: end found for single instruction patch at %RRv opsize %d\n", pNextInstrGC, pCpu->opsize));
1983
1984 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
1985 AssertRC(rc);
1986 }
1987 }
1988 return rc;
1989}
1990
1991
1992#ifdef LOG_ENABLED
1993
1994/* Add a disasm jump record (temporary for prevent duplicate analysis)
1995 *
1996 * @param pVM The VM to operate on.
1997 * @param pPatch Patch structure ptr
1998 * @param pInstrGC Guest context pointer to privileged instruction
1999 *
2000 */
2001static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2002{
2003 PAVLPVNODECORE pRec;
2004
2005 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2006 Assert(pRec);
2007 pRec->Key = (AVLPVKEY)(uintptr_t)pInstrGC;
2008
2009 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2010 Assert(ret);
2011}
2012
2013/**
2014 * Checks if jump target has been analysed before.
2015 *
2016 * @returns VBox status code.
2017 * @param pPatch Patch struct
2018 * @param pInstrGC Jump target
2019 *
2020 */
2021static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2022{
2023 PAVLPVNODECORE pRec;
2024
2025 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)(uintptr_t)pInstrGC);
2026 if (pRec)
2027 return true;
2028 return false;
2029}
2030
2031/**
2032 * For proper disassembly of the final patch block
2033 *
2034 * @returns VBox status code.
2035 * @param pVM The VM to operate on.
2036 * @param pCpu CPU disassembly state
2037 * @param pInstrGC Guest context pointer to privileged instruction
2038 * @param pCurInstrGC Guest context pointer to the current instruction
2039 * @param pCacheRec Cache record ptr
2040 *
2041 */
2042int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
2043{
2044 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2045 NOREF(pInstrGC);
2046
2047 if (pCpu->pCurInstr->opcode == OP_INT3)
2048 {
2049 /* Could be an int3 inserted in a call patch. Check to be sure */
2050 DISCPUSTATE cpu;
2051 RTRCPTR pOrgJumpGC;
2052 uint32_t dummy;
2053
2054 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2055 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2056
2057 { /* Force pOrgJumpHC out of scope after using it */
2058 uint8_t *pOrgJumpHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pOrgJumpGC);
2059
2060 bool disret = PATMR3DISInstr(pVM, pPatch, &cpu, pOrgJumpGC, pOrgJumpHC, &dummy, NULL);
2061 if (!disret || cpu.pCurInstr->opcode != OP_CALL || cpu.param1.size != 4 /* only near calls */)
2062 return VINF_SUCCESS;
2063 }
2064 return VWRN_CONTINUE_ANALYSIS;
2065 }
2066
2067 if ( pCpu->pCurInstr->opcode == OP_ILLUD2
2068 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2069 {
2070 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2071 return VWRN_CONTINUE_ANALYSIS;
2072 }
2073
2074 if ( (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2075 || pCpu->pCurInstr->opcode == OP_INT
2076 || pCpu->pCurInstr->opcode == OP_IRET
2077 || pCpu->pCurInstr->opcode == OP_RETN
2078 || pCpu->pCurInstr->opcode == OP_RETF
2079 )
2080 {
2081 return VINF_SUCCESS;
2082 }
2083
2084 if (pCpu->pCurInstr->opcode == OP_ILLUD2)
2085 return VINF_SUCCESS;
2086
2087 return VWRN_CONTINUE_ANALYSIS;
2088}
2089
2090
2091/**
2092 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2093 *
2094 * @returns VBox status code.
2095 * @param pVM The VM to operate on.
2096 * @param pInstrGC Guest context pointer to the initial privileged instruction
2097 * @param pCurInstrGC Guest context pointer to the current instruction
2098 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2099 * @param pCacheRec Cache record ptr
2100 *
2101 */
2102int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2103{
2104 DISCPUSTATE cpu;
2105 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2106 int rc = VWRN_CONTINUE_ANALYSIS;
2107 uint32_t opsize, delta;
2108 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2109 bool disret;
2110 char szOutput[256];
2111
2112 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2113
2114 /* We need this to determine branch targets (and for disassembling). */
2115 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2116
2117 while(rc == VWRN_CONTINUE_ANALYSIS)
2118 {
2119 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2120
2121 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2122 if (pCurInstrHC == NULL)
2123 {
2124 rc = VERR_PATCHING_REFUSED;
2125 goto end;
2126 }
2127
2128 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2129 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2130 {
2131 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2132
2133 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2134 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2135 else
2136 Log(("DIS %s", szOutput));
2137
2138 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2139 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2140 {
2141 rc = VINF_SUCCESS;
2142 goto end;
2143 }
2144 }
2145 else
2146 Log(("DIS: %s", szOutput));
2147
2148 if (disret == false)
2149 {
2150 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2151 rc = VINF_SUCCESS;
2152 goto end;
2153 }
2154
2155 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2156 if (rc != VWRN_CONTINUE_ANALYSIS) {
2157 break; //done!
2158 }
2159
2160 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2161 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2162 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2163 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2164 )
2165 {
2166 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2167 RTRCPTR pOrgTargetGC;
2168
2169 if (pTargetGC == 0)
2170 {
2171 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2172 rc = VERR_PATCHING_REFUSED;
2173 break;
2174 }
2175
2176 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2177 {
2178 //jump back to guest code
2179 rc = VINF_SUCCESS;
2180 goto end;
2181 }
2182 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2183
2184 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2185 {
2186 rc = VINF_SUCCESS;
2187 goto end;
2188 }
2189
2190 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2191 {
2192 /* New jump, let's check it. */
2193 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2194
2195 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2196 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec);
2197 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2198
2199 if (rc != VINF_SUCCESS) {
2200 break; //done!
2201 }
2202 }
2203 if (cpu.pCurInstr->opcode == OP_JMP)
2204 {
2205 /* Unconditional jump; return to caller. */
2206 rc = VINF_SUCCESS;
2207 goto end;
2208 }
2209
2210 rc = VWRN_CONTINUE_ANALYSIS;
2211 }
2212 pCurInstrGC += opsize;
2213 }
2214end:
2215 return rc;
2216}
2217
2218/**
2219 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2220 *
2221 * @returns VBox status code.
2222 * @param pVM The VM to operate on.
2223 * @param pInstrGC Guest context pointer to the initial privileged instruction
2224 * @param pCurInstrGC Guest context pointer to the current instruction
2225 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2226 * @param pCacheRec Cache record ptr
2227 *
2228 */
2229int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
2230{
2231 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2232
2233 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pCacheRec);
2234 /* Free all disasm jump records. */
2235 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2236 return rc;
2237}
2238
2239#endif /* LOG_ENABLED */
2240
2241/**
2242 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2243 * If so, this patch is permanently disabled.
2244 *
2245 * @param pVM The VM to operate on.
2246 * @param pInstrGC Guest context pointer to instruction
2247 * @param pConflictGC Guest context pointer to check
2248 *
2249 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2250 *
2251 */
2252VMMR3DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2253{
2254 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2255 if (pTargetPatch)
2256 {
2257 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2258 }
2259 return VERR_PATCH_NO_CONFLICT;
2260}
2261
2262/**
2263 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2264 *
2265 * @returns VBox status code.
2266 * @param pVM The VM to operate on.
2267 * @param pInstrGC Guest context pointer to privileged instruction
2268 * @param pCurInstrGC Guest context pointer to the current instruction
2269 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2270 * @param pCacheRec Cache record ptr
2271 *
2272 */
2273static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
2274{
2275 DISCPUSTATE cpu;
2276 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
2277 int rc = VWRN_CONTINUE_ANALYSIS;
2278 uint32_t opsize;
2279 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2280 bool disret;
2281#ifdef LOG_ENABLED
2282 char szOutput[256];
2283#endif
2284
2285 while (rc == VWRN_CONTINUE_RECOMPILE)
2286 {
2287 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2288
2289 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2290 if (pCurInstrHC == NULL)
2291 {
2292 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2293 goto end;
2294 }
2295#ifdef LOG_ENABLED
2296 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput);
2297 Log(("Recompile: %s", szOutput));
2298#else
2299 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2300#endif
2301 if (disret == false)
2302 {
2303 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2304
2305 /* Add lookup record for patch to guest address translation */
2306 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2307 patmPatchGenIllegalInstr(pVM, pPatch);
2308 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2309 goto end;
2310 }
2311
2312 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec);
2313 if (rc != VWRN_CONTINUE_RECOMPILE)
2314 {
2315 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2316 if ( rc == VINF_SUCCESS
2317 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2318 {
2319 DISCPUSTATE cpunext;
2320 uint32_t opsizenext;
2321 uint8_t *pNextInstrHC;
2322 RTRCPTR pNextInstrGC = pCurInstrGC + opsize;
2323
2324 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2325
2326 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2327 * Recompile the next instruction as well
2328 */
2329 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pNextInstrGC);
2330 if (pNextInstrHC == NULL)
2331 {
2332 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2333 goto end;
2334 }
2335 cpunext.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2336 disret = PATMR3DISInstr(pVM, pPatch, &cpunext, pNextInstrGC, pNextInstrHC, &opsizenext, NULL);
2337 if (disret == false)
2338 {
2339 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2340 goto end;
2341 }
2342 switch(cpunext.pCurInstr->opcode)
2343 {
2344 case OP_IRET: /* inhibit cleared in generated code */
2345 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2346 case OP_HLT:
2347 break; /* recompile these */
2348
2349 default:
2350 if (cpunext.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2351 {
2352 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2353
2354 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2355 AssertRC(rc);
2356 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2357 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2358 }
2359 break;
2360 }
2361
2362 /* Note: after a cli we must continue to a proper exit point */
2363 if (cpunext.pCurInstr->opcode != OP_CLI)
2364 {
2365 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec);
2366 if (RT_SUCCESS(rc))
2367 {
2368 rc = VINF_SUCCESS;
2369 goto end;
2370 }
2371 break;
2372 }
2373 else
2374 rc = VWRN_CONTINUE_RECOMPILE;
2375 }
2376 else
2377 break; /* done! */
2378 }
2379
2380 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2381
2382
2383 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2384 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2385 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2386 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2387 )
2388 {
2389 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2390 if (addr == 0)
2391 {
2392 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2393 rc = VERR_PATCHING_REFUSED;
2394 break;
2395 }
2396
2397 Log(("Jump encountered target %RRv\n", addr));
2398
2399 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2400 if (!(cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW))
2401 {
2402 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2403 /* First we need to finish this linear code stream until the next exit point. */
2404 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+opsize, pfnPATMR3Recompile, pCacheRec);
2405 if (RT_FAILURE(rc))
2406 {
2407 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2408 break; //fatal error
2409 }
2410 }
2411
2412 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2413 {
2414 /* New code; let's recompile it. */
2415 Log(("patmRecompileCodeStream continue with jump\n"));
2416
2417 /*
2418 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2419 * this patch so we can continue our analysis
2420 *
2421 * We rely on CSAM to detect and resolve conflicts
2422 */
2423 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, addr);
2424 if(pTargetPatch)
2425 {
2426 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2427 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2428 }
2429
2430 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2431 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec);
2432 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2433
2434 if(pTargetPatch)
2435 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2436
2437 if (RT_FAILURE(rc))
2438 {
2439 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2440 break; //done!
2441 }
2442 }
2443 /* Always return to caller here; we're done! */
2444 rc = VINF_SUCCESS;
2445 goto end;
2446 }
2447 else
2448 if (cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW)
2449 {
2450 rc = VINF_SUCCESS;
2451 goto end;
2452 }
2453 pCurInstrGC += opsize;
2454 }
2455end:
2456 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2457 return rc;
2458}
2459
2460
2461/**
2462 * Generate the jump from guest to patch code
2463 *
2464 * @returns VBox status code.
2465 * @param pVM The VM to operate on.
2466 * @param pPatch Patch record
2467 * @param pCacheRec Guest translation lookup cache record
2468 */
2469static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2470{
2471 uint8_t temp[8];
2472 uint8_t *pPB;
2473 int rc;
2474
2475 Assert(pPatch->cbPatchJump <= sizeof(temp));
2476 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2477
2478 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2479 Assert(pPB);
2480
2481#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2482 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2483 {
2484 Assert(pPatch->pPatchJumpDestGC);
2485
2486 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2487 {
2488 // jmp [PatchCode]
2489 if (fAddFixup)
2490 {
2491 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2492 {
2493 Log(("Relocation failed for the jump in the guest code!!\n"));
2494 return VERR_PATCHING_REFUSED;
2495 }
2496 }
2497
2498 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2499 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2500 }
2501 else
2502 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2503 {
2504 // jmp [PatchCode]
2505 if (fAddFixup)
2506 {
2507 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2508 {
2509 Log(("Relocation failed for the jump in the guest code!!\n"));
2510 return VERR_PATCHING_REFUSED;
2511 }
2512 }
2513
2514 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2515 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2516 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2517 }
2518 else
2519 {
2520 Assert(0);
2521 return VERR_PATCHING_REFUSED;
2522 }
2523 }
2524 else
2525#endif
2526 {
2527 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2528
2529 // jmp [PatchCode]
2530 if (fAddFixup)
2531 {
2532 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2533 {
2534 Log(("Relocation failed for the jump in the guest code!!\n"));
2535 return VERR_PATCHING_REFUSED;
2536 }
2537 }
2538 temp[0] = 0xE9; //jmp
2539 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2540 }
2541 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2542 AssertRC(rc);
2543
2544 if (rc == VINF_SUCCESS)
2545 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2546
2547 return rc;
2548}
2549
2550/**
2551 * Remove the jump from guest to patch code
2552 *
2553 * @returns VBox status code.
2554 * @param pVM The VM to operate on.
2555 * @param pPatch Patch record
2556 */
2557static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2558{
2559#ifdef DEBUG
2560 DISCPUSTATE cpu;
2561 char szOutput[256];
2562 uint32_t opsize, i = 0;
2563 bool disret;
2564
2565 while (i < pPatch->cbPrivInstr)
2566 {
2567 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2568 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
2569 if (disret == false)
2570 break;
2571
2572 Log(("Org patch jump: %s", szOutput));
2573 Assert(opsize);
2574 i += opsize;
2575 }
2576#endif
2577
2578 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2579 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2580#ifdef DEBUG
2581 if (rc == VINF_SUCCESS)
2582 {
2583 i = 0;
2584 while(i < pPatch->cbPrivInstr)
2585 {
2586 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2587 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
2588 if (disret == false)
2589 break;
2590
2591 Log(("Org instr: %s", szOutput));
2592 Assert(opsize);
2593 i += opsize;
2594 }
2595 }
2596#endif
2597 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2598 return rc;
2599}
2600
2601/**
2602 * Generate the call from guest to patch code
2603 *
2604 * @returns VBox status code.
2605 * @param pVM The VM to operate on.
2606 * @param pPatch Patch record
2607 * @param pInstrHC HC address where to insert the jump
2608 * @param pCacheRec Guest translation cache record
2609 */
2610static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
2611{
2612 uint8_t temp[8];
2613 uint8_t *pPB;
2614 int rc;
2615
2616 Assert(pPatch->cbPatchJump <= sizeof(temp));
2617
2618 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
2619 Assert(pPB);
2620
2621 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2622
2623 // jmp [PatchCode]
2624 if (fAddFixup)
2625 {
2626 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2627 {
2628 Log(("Relocation failed for the jump in the guest code!!\n"));
2629 return VERR_PATCHING_REFUSED;
2630 }
2631 }
2632
2633 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2634 temp[0] = pPatch->aPrivInstr[0];
2635 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2636
2637 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2638 AssertRC(rc);
2639
2640 return rc;
2641}
2642
2643
2644/**
2645 * Patch cli/sti pushf/popf instruction block at specified location
2646 *
2647 * @returns VBox status code.
2648 * @param pVM The VM to operate on.
2649 * @param pInstrGC Guest context point to privileged instruction
2650 * @param pInstrHC Host context point to privileged instruction
2651 * @param uOpcode Instruction opcode
2652 * @param uOpSize Size of starting instruction
2653 * @param pPatchRec Patch record
2654 *
2655 * @note returns failure if patching is not allowed or possible
2656 *
2657 */
2658VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2659 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2660{
2661 PPATCHINFO pPatch = &pPatchRec->patch;
2662 int rc = VERR_PATCHING_REFUSED;
2663 DISCPUSTATE cpu;
2664 uint32_t orgOffsetPatchMem = ~0;
2665 RTRCPTR pInstrStart;
2666 bool fInserted;
2667#ifdef LOG_ENABLED
2668 uint32_t opsize;
2669 char szOutput[256];
2670 bool disret;
2671#endif
2672 NOREF(pInstrHC); NOREF(uOpSize);
2673
2674 /* Save original offset (in case of failures later on) */
2675 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2676 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2677
2678 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2679 switch (uOpcode)
2680 {
2681 case OP_MOV:
2682 break;
2683
2684 case OP_CLI:
2685 case OP_PUSHF:
2686 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2687 /* Note: special precautions are taken when disabling and enabling such patches. */
2688 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2689 break;
2690
2691 default:
2692 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2693 {
2694 AssertMsg(0, ("PATMR3PatchBlock: Invalid opcode %x\n", uOpcode));
2695 return VERR_INVALID_PARAMETER;
2696 }
2697 }
2698
2699 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2700 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2701
2702 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2703 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2704 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2705 )
2706 {
2707 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2708 Log(("Patch jump would cross page boundary -> refuse!!\n"));
2709 rc = VERR_PATCHING_REFUSED;
2710 goto failure;
2711 }
2712
2713 pPatch->nrPatch2GuestRecs = 0;
2714 pInstrStart = pInstrGC;
2715
2716#ifdef PATM_ENABLE_CALL
2717 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2718#endif
2719
2720 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2721 pPatch->uCurPatchOffset = 0;
2722
2723 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2724
2725 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2726 {
2727 Assert(pPatch->flags & PATMFL_INTHANDLER);
2728
2729 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2730 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2731 if (RT_FAILURE(rc))
2732 goto failure;
2733 }
2734
2735 /***************************************************************************************************************************/
2736 /* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2737 /***************************************************************************************************************************/
2738#ifdef VBOX_WITH_STATISTICS
2739 if (!(pPatch->flags & PATMFL_SYSENTER))
2740 {
2741 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2742 if (RT_FAILURE(rc))
2743 goto failure;
2744 }
2745#endif
2746
2747 PATMP2GLOOKUPREC cacheRec;
2748 RT_ZERO(cacheRec);
2749 cacheRec.pPatch = pPatch;
2750
2751 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, &cacheRec);
2752 /* Free leftover lock if any. */
2753 if (cacheRec.Lock.pvMap)
2754 {
2755 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2756 cacheRec.Lock.pvMap = NULL;
2757 }
2758 if (rc != VINF_SUCCESS)
2759 {
2760 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2761 goto failure;
2762 }
2763
2764 /* Calculated during analysis. */
2765 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2766 {
2767 /* Most likely cause: we encountered an illegal instruction very early on. */
2768 /** @todo could turn it into an int3 callable patch. */
2769 Log(("PATMR3PatchBlock: patch block too small -> refuse\n"));
2770 rc = VERR_PATCHING_REFUSED;
2771 goto failure;
2772 }
2773
2774 /* size of patch block */
2775 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2776
2777
2778 /* Update free pointer in patch memory. */
2779 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2780 /* Round to next 8 byte boundary. */
2781 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2782
2783 /*
2784 * Insert into patch to guest lookup tree
2785 */
2786 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2787 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2788 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2789 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2790 if (!fInserted)
2791 {
2792 rc = VERR_PATCHING_REFUSED;
2793 goto failure;
2794 }
2795
2796 /* Note that patmr3SetBranchTargets can install additional patches!! */
2797 rc = patmr3SetBranchTargets(pVM, pPatch);
2798 if (rc != VINF_SUCCESS)
2799 {
2800 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2801 goto failure;
2802 }
2803
2804#ifdef LOG_ENABLED
2805 Log(("Patch code ----------------------------------------------------------\n"));
2806 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
2807 /* Free leftover lock if any. */
2808 if (cacheRec.Lock.pvMap)
2809 {
2810 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2811 cacheRec.Lock.pvMap = NULL;
2812 }
2813 Log(("Patch code ends -----------------------------------------------------\n"));
2814#endif
2815
2816 /* make a copy of the guest code bytes that will be overwritten */
2817 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2818
2819 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2820 AssertRC(rc);
2821
2822 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2823 {
2824 /*uint8_t bASMInt3 = 0xCC; - unused */
2825
2826 Log(("PATMR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2827 /* Replace first opcode byte with 'int 3'. */
2828 rc = patmActivateInt3Patch(pVM, pPatch);
2829 if (RT_FAILURE(rc))
2830 goto failure;
2831
2832 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2833 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2834
2835 pPatch->flags &= ~PATMFL_INSTR_HINT;
2836 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2837 }
2838 else
2839 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2840 {
2841 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2842 /* now insert a jump in the guest code */
2843 rc = patmGenJumpToPatch(pVM, pPatch, &cacheRec, true);
2844 AssertRC(rc);
2845 if (RT_FAILURE(rc))
2846 goto failure;
2847
2848 }
2849
2850#ifdef LOG_ENABLED
2851 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2852 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput, PATMREAD_RAWCODE);
2853 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
2854#endif
2855
2856 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2857 pPatch->pTempInfo->nrIllegalInstr = 0;
2858
2859 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2860
2861 pPatch->uState = PATCH_ENABLED;
2862 return VINF_SUCCESS;
2863
2864failure:
2865 if (pPatchRec->CoreOffset.Key)
2866 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
2867
2868 patmEmptyTree(pVM, &pPatch->FixupTree);
2869 pPatch->nrFixups = 0;
2870
2871 patmEmptyTree(pVM, &pPatch->JumpTree);
2872 pPatch->nrJumpRecs = 0;
2873
2874 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2875 pPatch->pTempInfo->nrIllegalInstr = 0;
2876
2877 /* Turn this cli patch into a dummy. */
2878 pPatch->uState = PATCH_REFUSED;
2879 pPatch->pPatchBlockOffset = 0;
2880
2881 // Give back the patch memory we no longer need
2882 Assert(orgOffsetPatchMem != (uint32_t)~0);
2883 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2884
2885 return rc;
2886}
2887
2888/**
2889 * Patch IDT handler
2890 *
2891 * @returns VBox status code.
2892 * @param pVM The VM to operate on.
2893 * @param pInstrGC Guest context point to privileged instruction
2894 * @param uOpSize Size of starting instruction
2895 * @param pPatchRec Patch record
2896 * @param pCacheRec Cache record ptr
2897 *
2898 * @note returns failure if patching is not allowed or possible
2899 *
2900 */
2901static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
2902{
2903 PPATCHINFO pPatch = &pPatchRec->patch;
2904 bool disret;
2905 DISCPUSTATE cpuPush, cpuJmp;
2906 uint32_t opsize;
2907 RTRCPTR pCurInstrGC = pInstrGC;
2908 uint8_t *pCurInstrHC, *pInstrHC;
2909 uint32_t orgOffsetPatchMem = ~0;
2910
2911 pInstrHC = pCurInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
2912 AssertReturn(pCurInstrHC, VERR_PAGE_NOT_PRESENT);
2913
2914 /*
2915 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
2916 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
2917 * condition here and only patch the common entypoint once.
2918 */
2919 cpuPush.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2920 disret = PATMR3DISInstr(pVM, pPatch, &cpuPush, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2921 Assert(disret);
2922 if (disret && cpuPush.pCurInstr->opcode == OP_PUSH)
2923 {
2924 RTRCPTR pJmpInstrGC;
2925 int rc;
2926 pCurInstrGC += opsize;
2927
2928 cpuJmp.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2929 disret = PATMR3DISInstr(pVM, pPatch, &cpuJmp, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2930 if ( disret
2931 && cpuJmp.pCurInstr->opcode == OP_JMP
2932 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
2933 )
2934 {
2935 bool fInserted;
2936 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2937 if (pJmpPatch == 0)
2938 {
2939 /* Patch it first! */
2940 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
2941 if (rc != VINF_SUCCESS)
2942 goto failure;
2943 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2944 Assert(pJmpPatch);
2945 }
2946 if (pJmpPatch->patch.uState != PATCH_ENABLED)
2947 goto failure;
2948
2949 /* save original offset (in case of failures later on) */
2950 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2951
2952 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2953 pPatch->uCurPatchOffset = 0;
2954 pPatch->nrPatch2GuestRecs = 0;
2955
2956#ifdef VBOX_WITH_STATISTICS
2957 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2958 if (RT_FAILURE(rc))
2959 goto failure;
2960#endif
2961
2962 /* Install fake cli patch (to clear the virtual IF) */
2963 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2964 if (RT_FAILURE(rc))
2965 goto failure;
2966
2967 /* Add lookup record for patch to guest address translation (for the push) */
2968 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
2969
2970 /* Duplicate push. */
2971 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
2972 if (RT_FAILURE(rc))
2973 goto failure;
2974
2975 /* Generate jump to common entrypoint. */
2976 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
2977 if (RT_FAILURE(rc))
2978 goto failure;
2979
2980 /* size of patch block */
2981 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2982
2983 /* Update free pointer in patch memory. */
2984 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2985 /* Round to next 8 byte boundary */
2986 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2987
2988 /* There's no jump from guest to patch code. */
2989 pPatch->cbPatchJump = 0;
2990
2991
2992#ifdef LOG_ENABLED
2993 Log(("Patch code ----------------------------------------------------------\n"));
2994 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
2995 Log(("Patch code ends -----------------------------------------------------\n"));
2996#endif
2997 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
2998
2999 /*
3000 * Insert into patch to guest lookup tree
3001 */
3002 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3003 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3004 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3005 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3006
3007 pPatch->uState = PATCH_ENABLED;
3008
3009 return VINF_SUCCESS;
3010 }
3011 }
3012failure:
3013 /* Give back the patch memory we no longer need */
3014 if (orgOffsetPatchMem != (uint32_t)~0)
3015 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3016
3017 return PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3018}
3019
3020/**
3021 * Install a trampoline to call a guest trap handler directly
3022 *
3023 * @returns VBox status code.
3024 * @param pVM The VM to operate on.
3025 * @param pInstrGC Guest context point to privileged instruction
3026 * @param pPatchRec Patch record
3027 * @param pCacheRec Cache record ptr
3028 *
3029 */
3030static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3031{
3032 PPATCHINFO pPatch = &pPatchRec->patch;
3033 int rc = VERR_PATCHING_REFUSED;
3034 uint32_t orgOffsetPatchMem = ~0;
3035 bool fInserted;
3036#ifdef LOG_ENABLED
3037 bool disret;
3038 DISCPUSTATE cpu;
3039 uint32_t opsize;
3040 char szOutput[256];
3041#endif
3042
3043 // save original offset (in case of failures later on)
3044 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3045
3046 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3047 pPatch->uCurPatchOffset = 0;
3048 pPatch->nrPatch2GuestRecs = 0;
3049
3050#ifdef VBOX_WITH_STATISTICS
3051 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3052 if (RT_FAILURE(rc))
3053 goto failure;
3054#endif
3055
3056 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3057 if (RT_FAILURE(rc))
3058 goto failure;
3059
3060 /* size of patch block */
3061 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3062
3063 /* Update free pointer in patch memory. */
3064 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3065 /* Round to next 8 byte boundary */
3066 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3067
3068 /* There's no jump from guest to patch code. */
3069 pPatch->cbPatchJump = 0;
3070
3071#ifdef LOG_ENABLED
3072 Log(("Patch code ----------------------------------------------------------\n"));
3073 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3074 Log(("Patch code ends -----------------------------------------------------\n"));
3075#endif
3076
3077#ifdef LOG_ENABLED
3078 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3079 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3080 Log(("TRAP handler patch: %s", szOutput));
3081#endif
3082 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3083
3084 /*
3085 * Insert into patch to guest lookup tree
3086 */
3087 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3088 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3089 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3090 AssertMsg(fInserted, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3091
3092 pPatch->uState = PATCH_ENABLED;
3093 return VINF_SUCCESS;
3094
3095failure:
3096 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3097
3098 /* Turn this cli patch into a dummy. */
3099 pPatch->uState = PATCH_REFUSED;
3100 pPatch->pPatchBlockOffset = 0;
3101
3102 /* Give back the patch memory we no longer need */
3103 Assert(orgOffsetPatchMem != (uint32_t)~0);
3104 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3105
3106 return rc;
3107}
3108
3109
3110#ifdef LOG_ENABLED
3111/**
3112 * Check if the instruction is patched as a common idt handler
3113 *
3114 * @returns true or false
3115 * @param pVM The VM to operate on.
3116 * @param pInstrGC Guest context point to the instruction
3117 *
3118 */
3119static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3120{
3121 PPATMPATCHREC pRec;
3122
3123 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3124 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3125 return true;
3126 return false;
3127}
3128#endif //DEBUG
3129
3130
3131/**
3132 * Duplicates a complete function
3133 *
3134 * @returns VBox status code.
3135 * @param pVM The VM to operate on.
3136 * @param pInstrGC Guest context point to privileged instruction
3137 * @param pPatchRec Patch record
3138 * @param pCacheRec Cache record ptr
3139 *
3140 */
3141static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
3142{
3143 PPATCHINFO pPatch = &pPatchRec->patch;
3144 int rc = VERR_PATCHING_REFUSED;
3145 DISCPUSTATE cpu;
3146 uint32_t orgOffsetPatchMem = ~0;
3147 bool fInserted;
3148
3149 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3150 /* Save original offset (in case of failures later on). */
3151 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3152
3153 /* We will not go on indefinitely with call instruction handling. */
3154 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3155 {
3156 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3157 return VERR_PATCHING_REFUSED;
3158 }
3159
3160 pVM->patm.s.ulCallDepth++;
3161
3162#ifdef PATM_ENABLE_CALL
3163 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3164#endif
3165
3166 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3167
3168 pPatch->nrPatch2GuestRecs = 0;
3169 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3170 pPatch->uCurPatchOffset = 0;
3171
3172 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3173
3174 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3175 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3176 if (RT_FAILURE(rc))
3177 goto failure;
3178
3179#ifdef VBOX_WITH_STATISTICS
3180 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3181 if (RT_FAILURE(rc))
3182 goto failure;
3183#endif
3184
3185 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pCacheRec);
3186 if (rc != VINF_SUCCESS)
3187 {
3188 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3189 goto failure;
3190 }
3191
3192 //size of patch block
3193 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3194
3195 //update free pointer in patch memory
3196 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3197 /* Round to next 8 byte boundary. */
3198 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3199
3200 pPatch->uState = PATCH_ENABLED;
3201
3202 /*
3203 * Insert into patch to guest lookup tree
3204 */
3205 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3206 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3207 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3208 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3209 if (!fInserted)
3210 {
3211 rc = VERR_PATCHING_REFUSED;
3212 goto failure;
3213 }
3214
3215 /* Note that patmr3SetBranchTargets can install additional patches!! */
3216 rc = patmr3SetBranchTargets(pVM, pPatch);
3217 if (rc != VINF_SUCCESS)
3218 {
3219 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3220 goto failure;
3221 }
3222
3223#ifdef LOG_ENABLED
3224 Log(("Patch code ----------------------------------------------------------\n"));
3225 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
3226 Log(("Patch code ends -----------------------------------------------------\n"));
3227#endif
3228
3229 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3230
3231 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3232 pPatch->pTempInfo->nrIllegalInstr = 0;
3233
3234 pVM->patm.s.ulCallDepth--;
3235 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3236 return VINF_SUCCESS;
3237
3238failure:
3239 if (pPatchRec->CoreOffset.Key)
3240 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3241
3242 patmEmptyTree(pVM, &pPatch->FixupTree);
3243 pPatch->nrFixups = 0;
3244
3245 patmEmptyTree(pVM, &pPatch->JumpTree);
3246 pPatch->nrJumpRecs = 0;
3247
3248 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3249 pPatch->pTempInfo->nrIllegalInstr = 0;
3250
3251 /* Turn this cli patch into a dummy. */
3252 pPatch->uState = PATCH_REFUSED;
3253 pPatch->pPatchBlockOffset = 0;
3254
3255 // Give back the patch memory we no longer need
3256 Assert(orgOffsetPatchMem != (uint32_t)~0);
3257 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3258
3259 pVM->patm.s.ulCallDepth--;
3260 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3261 return rc;
3262}
3263
3264/**
3265 * Creates trampoline code to jump inside an existing patch
3266 *
3267 * @returns VBox status code.
3268 * @param pVM The VM to operate on.
3269 * @param pInstrGC Guest context point to privileged instruction
3270 * @param pPatchRec Patch record
3271 *
3272 */
3273static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3274{
3275 PPATCHINFO pPatch = &pPatchRec->patch;
3276 RTRCPTR pPage, pPatchTargetGC = 0;
3277 uint32_t orgOffsetPatchMem = ~0;
3278 int rc = VERR_PATCHING_REFUSED;
3279 PPATCHINFO pPatchToJmp = NULL; /**< Patch the trampoline jumps to. */
3280 PTRAMPREC pTrampRec = NULL; /**< Trampoline record used to find the patch. */
3281 bool fInserted = false;
3282
3283 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3284 /* Save original offset (in case of failures later on). */
3285 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3286
3287 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3288 /** @todo we already checked this before */
3289 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3290
3291 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3292 if (pPatchPage)
3293 {
3294 uint32_t i;
3295
3296 for (i=0;i<pPatchPage->cCount;i++)
3297 {
3298 if (pPatchPage->aPatch[i])
3299 {
3300 pPatchToJmp = pPatchPage->aPatch[i];
3301
3302 if ( (pPatchToJmp->flags & PATMFL_DUPLICATE_FUNCTION)
3303 && pPatchToJmp->uState == PATCH_ENABLED)
3304 {
3305 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatchToJmp, pInstrGC);
3306 if (pPatchTargetGC)
3307 {
3308 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3309 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
3310 Assert(pPatchToGuestRec);
3311
3312 pPatchToGuestRec->fJumpTarget = true;
3313 Assert(pPatchTargetGC != pPatchToJmp->pPrivInstrGC);
3314 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
3315 break;
3316 }
3317 }
3318 }
3319 }
3320 }
3321 AssertReturn(pPatchPage && pPatchTargetGC && pPatchToJmp, VERR_PATCHING_REFUSED);
3322
3323 /*
3324 * Only record the trampoline patch if this is the first patch to the target
3325 * or we recorded other patches already.
3326 * The goal is to refuse refreshing function duplicates if the guest
3327 * modifies code after a saved state was loaded because it is not possible
3328 * to save the relation between trampoline and target without changing the
3329 * saved satte version.
3330 */
3331 if ( !(pPatchToJmp->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
3332 || pPatchToJmp->pTrampolinePatchesHead)
3333 {
3334 pPatchToJmp->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3335 pTrampRec = (PTRAMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pTrampRec));
3336 if (!pTrampRec)
3337 return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
3338
3339 pTrampRec->pPatchTrampoline = pPatchRec;
3340 }
3341
3342 pPatch->nrPatch2GuestRecs = 0;
3343 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3344 pPatch->uCurPatchOffset = 0;
3345
3346 /* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3347 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3348 if (RT_FAILURE(rc))
3349 goto failure;
3350
3351#ifdef VBOX_WITH_STATISTICS
3352 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3353 if (RT_FAILURE(rc))
3354 goto failure;
3355#endif
3356
3357 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3358 if (RT_FAILURE(rc))
3359 goto failure;
3360
3361 /*
3362 * Insert into patch to guest lookup tree
3363 */
3364 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3365 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3366 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3367 AssertMsg(fInserted, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3368 if (!fInserted)
3369 {
3370 rc = VERR_PATCHING_REFUSED;
3371 goto failure;
3372 }
3373
3374 /* size of patch block */
3375 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3376
3377 /* Update free pointer in patch memory. */
3378 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3379 /* Round to next 8 byte boundary */
3380 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3381
3382 /* There's no jump from guest to patch code. */
3383 pPatch->cbPatchJump = 0;
3384
3385 /* Enable the patch. */
3386 pPatch->uState = PATCH_ENABLED;
3387 /* We allow this patch to be called as a function. */
3388 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3389
3390 if (pTrampRec)
3391 {
3392 pTrampRec->pNext = pPatchToJmp->pTrampolinePatchesHead;
3393 pPatchToJmp->pTrampolinePatchesHead = pTrampRec;
3394 }
3395 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3396 return VINF_SUCCESS;
3397
3398failure:
3399 if (pPatchRec->CoreOffset.Key)
3400 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3401
3402 patmEmptyTree(pVM, &pPatch->FixupTree);
3403 pPatch->nrFixups = 0;
3404
3405 patmEmptyTree(pVM, &pPatch->JumpTree);
3406 pPatch->nrJumpRecs = 0;
3407
3408 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3409 pPatch->pTempInfo->nrIllegalInstr = 0;
3410
3411 /* Turn this cli patch into a dummy. */
3412 pPatch->uState = PATCH_REFUSED;
3413 pPatch->pPatchBlockOffset = 0;
3414
3415 // Give back the patch memory we no longer need
3416 Assert(orgOffsetPatchMem != (uint32_t)~0);
3417 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3418
3419 if (pTrampRec)
3420 MMR3HeapFree(pTrampRec);
3421
3422 return rc;
3423}
3424
3425
3426/**
3427 * Patch branch target function for call/jump at specified location.
3428 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3429 *
3430 * @returns VBox status code.
3431 * @param pVM The VM to operate on.
3432 * @param pCtx Guest context
3433 *
3434 */
3435VMMR3DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3436{
3437 RTRCPTR pBranchTarget, pPage;
3438 int rc;
3439 RTRCPTR pPatchTargetGC = 0;
3440
3441 pBranchTarget = pCtx->edx;
3442 pBranchTarget = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3443
3444 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3445 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3446
3447 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3448 if (pPatchPage)
3449 {
3450 uint32_t i;
3451
3452 for (i=0;i<pPatchPage->cCount;i++)
3453 {
3454 if (pPatchPage->aPatch[i])
3455 {
3456 PPATCHINFO pPatch = pPatchPage->aPatch[i];
3457
3458 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3459 && pPatch->uState == PATCH_ENABLED)
3460 {
3461 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3462 if (pPatchTargetGC)
3463 {
3464 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3465 break;
3466 }
3467 }
3468 }
3469 }
3470 }
3471
3472 if (pPatchTargetGC)
3473 {
3474 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3475 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3476 }
3477 else
3478 {
3479 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3480 }
3481
3482 if (rc == VINF_SUCCESS)
3483 {
3484 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3485 Assert(pPatchTargetGC);
3486 }
3487
3488 if (pPatchTargetGC)
3489 {
3490 pCtx->eax = pPatchTargetGC;
3491 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3492 }
3493 else
3494 {
3495 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3496 pCtx->eax = 0;
3497 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3498 }
3499 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3500 rc = PATMAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3501 AssertRC(rc);
3502
3503 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3504 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3505 return VINF_SUCCESS;
3506}
3507
3508/**
3509 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3510 *
3511 * @returns VBox status code.
3512 * @param pVM The VM to operate on.
3513 * @param pCpu Disassembly CPU structure ptr
3514 * @param pInstrGC Guest context point to privileged instruction
3515 * @param pCacheRec Cache record ptr
3516 *
3517 */
3518static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
3519{
3520 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3521 int rc = VERR_PATCHING_REFUSED;
3522 DISCPUSTATE cpu;
3523 RTRCPTR pTargetGC;
3524 PPATMPATCHREC pPatchFunction;
3525 uint32_t opsize;
3526 bool disret;
3527#ifdef LOG_ENABLED
3528 char szOutput[256];
3529#endif
3530
3531 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3532 Assert((pCpu->pCurInstr->opcode == OP_CALL || pCpu->pCurInstr->opcode == OP_JMP) && pCpu->opsize == SIZEOF_NEARJUMP32);
3533
3534 if ((pCpu->pCurInstr->opcode != OP_CALL && pCpu->pCurInstr->opcode != OP_JMP) || pCpu->opsize != SIZEOF_NEARJUMP32)
3535 {
3536 rc = VERR_PATCHING_REFUSED;
3537 goto failure;
3538 }
3539
3540 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3541 if (pTargetGC == 0)
3542 {
3543 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
3544 rc = VERR_PATCHING_REFUSED;
3545 goto failure;
3546 }
3547
3548 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3549 if (pPatchFunction == NULL)
3550 {
3551 for(;;)
3552 {
3553 /* It could be an indirect call (call -> jmp dest).
3554 * Note that it's dangerous to assume the jump will never change...
3555 */
3556 uint8_t *pTmpInstrHC;
3557
3558 pTmpInstrHC = PATMGCVirtToHCVirt(pVM, pCacheRec, pTargetGC);
3559 Assert(pTmpInstrHC);
3560 if (pTmpInstrHC == 0)
3561 break;
3562
3563 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3564 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pTargetGC, pTmpInstrHC, &opsize, NULL);
3565 if (disret == false || cpu.pCurInstr->opcode != OP_JMP)
3566 break;
3567
3568 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3569 if (pTargetGC == 0)
3570 {
3571 break;
3572 }
3573
3574 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3575 break;
3576 }
3577 if (pPatchFunction == 0)
3578 {
3579 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3580 rc = VERR_PATCHING_REFUSED;
3581 goto failure;
3582 }
3583 }
3584
3585 // make a copy of the guest code bytes that will be overwritten
3586 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3587
3588 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3589 AssertRC(rc);
3590
3591 /* Now replace the original call in the guest code */
3592 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), pCacheRec, true);
3593 AssertRC(rc);
3594 if (RT_FAILURE(rc))
3595 goto failure;
3596
3597 /* Lowest and highest address for write monitoring. */
3598 pPatch->pInstrGCLowest = pInstrGC;
3599 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3600
3601#ifdef LOG_ENABLED
3602 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3603 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3604 Log(("Call patch: %s", szOutput));
3605#endif
3606
3607 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3608
3609 pPatch->uState = PATCH_ENABLED;
3610 return VINF_SUCCESS;
3611
3612failure:
3613 /* Turn this patch into a dummy. */
3614 pPatch->uState = PATCH_REFUSED;
3615
3616 return rc;
3617}
3618
3619/**
3620 * Replace the address in an MMIO instruction with the cached version.
3621 *
3622 * @returns VBox status code.
3623 * @param pVM The VM to operate on.
3624 * @param pInstrGC Guest context point to privileged instruction
3625 * @param pCpu Disassembly CPU structure ptr
3626 * @param pCacheRec Cache record ptr
3627 *
3628 * @note returns failure if patching is not allowed or possible
3629 *
3630 */
3631static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
3632{
3633 PPATCHINFO pPatch = (PPATCHINFO)pCacheRec->pPatch;
3634 uint8_t *pPB;
3635 int rc = VERR_PATCHING_REFUSED;
3636#ifdef LOG_ENABLED
3637 DISCPUSTATE cpu;
3638 uint32_t opsize;
3639 bool disret;
3640 char szOutput[256];
3641#endif
3642
3643 Assert(pVM->patm.s.mmio.pCachedData);
3644 if (!pVM->patm.s.mmio.pCachedData)
3645 goto failure;
3646
3647 if (pCpu->param2.flags != USE_DISPLACEMENT32)
3648 goto failure;
3649
3650 pPB = PATMGCVirtToHCVirt(pVM, pCacheRec, pPatch->pPrivInstrGC);
3651 if (pPB == 0)
3652 goto failure;
3653
3654 /* Add relocation record for cached data access. */
3655 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3656 {
3657 Log(("Relocation failed for cached mmio address!!\n"));
3658 return VERR_PATCHING_REFUSED;
3659 }
3660#ifdef LOG_ENABLED
3661 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3662 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3663 Log(("MMIO patch old instruction: %s", szOutput));
3664#endif
3665
3666 /* Save original instruction. */
3667 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3668 AssertRC(rc);
3669
3670 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3671
3672 /* Replace address with that of the cached item. */
3673 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->opsize - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3674 AssertRC(rc);
3675 if (RT_FAILURE(rc))
3676 {
3677 goto failure;
3678 }
3679
3680#ifdef LOG_ENABLED
3681 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3682 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3683 Log(("MMIO patch: %s", szOutput));
3684#endif
3685 pVM->patm.s.mmio.pCachedData = 0;
3686 pVM->patm.s.mmio.GCPhys = 0;
3687 pPatch->uState = PATCH_ENABLED;
3688 return VINF_SUCCESS;
3689
3690failure:
3691 /* Turn this patch into a dummy. */
3692 pPatch->uState = PATCH_REFUSED;
3693
3694 return rc;
3695}
3696
3697
3698/**
3699 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3700 *
3701 * @returns VBox status code.
3702 * @param pVM The VM to operate on.
3703 * @param pInstrGC Guest context point to privileged instruction
3704 * @param pPatch Patch record
3705 *
3706 * @note returns failure if patching is not allowed or possible
3707 *
3708 */
3709static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3710{
3711 DISCPUSTATE cpu;
3712 uint32_t opsize;
3713 bool disret;
3714 uint8_t *pInstrHC;
3715#ifdef LOG_ENABLED
3716 char szOutput[256];
3717#endif
3718
3719 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3720
3721 /* Convert GC to HC address. */
3722 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3723 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3724
3725 /* Disassemble mmio instruction. */
3726 cpu.mode = pPatch->uOpMode;
3727 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
3728 if (disret == false)
3729 {
3730 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3731 return VERR_PATCHING_REFUSED;
3732 }
3733
3734 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
3735 if (opsize > MAX_INSTR_SIZE)
3736 return VERR_PATCHING_REFUSED;
3737 if (cpu.param2.flags != USE_DISPLACEMENT32)
3738 return VERR_PATCHING_REFUSED;
3739
3740 /* Add relocation record for cached data access. */
3741 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3742 {
3743 Log(("Relocation failed for cached mmio address!!\n"));
3744 return VERR_PATCHING_REFUSED;
3745 }
3746 /* Replace address with that of the cached item. */
3747 *(RTRCPTR *)&pInstrHC[cpu.opsize - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3748
3749 /* Lowest and highest address for write monitoring. */
3750 pPatch->pInstrGCLowest = pInstrGC;
3751 pPatch->pInstrGCHighest = pInstrGC + cpu.opsize;
3752
3753#ifdef LOG_ENABLED
3754 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3755 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3756 Log(("MMIO patch: %s", szOutput));
3757#endif
3758
3759 pVM->patm.s.mmio.pCachedData = 0;
3760 pVM->patm.s.mmio.GCPhys = 0;
3761 return VINF_SUCCESS;
3762}
3763
3764/**
3765 * Activates an int3 patch
3766 *
3767 * @returns VBox status code.
3768 * @param pVM The VM to operate on.
3769 * @param pPatch Patch record
3770 */
3771static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3772{
3773 uint8_t bASMInt3 = 0xCC;
3774 int rc;
3775
3776 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3777 Assert(pPatch->uState != PATCH_ENABLED);
3778
3779 /* Replace first opcode byte with 'int 3'. */
3780 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
3781 AssertRC(rc);
3782
3783 pPatch->cbPatchJump = sizeof(bASMInt3);
3784
3785 return rc;
3786}
3787
3788/**
3789 * Deactivates an int3 patch
3790 *
3791 * @returns VBox status code.
3792 * @param pVM The VM to operate on.
3793 * @param pPatch Patch record
3794 */
3795static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3796{
3797 uint8_t ASMInt3 = 0xCC;
3798 int rc;
3799
3800 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3801 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3802
3803 /* Restore first opcode byte. */
3804 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3805 AssertRC(rc);
3806 return rc;
3807}
3808
3809/**
3810 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically
3811 * in the raw-mode context.
3812 *
3813 * @returns VBox status code.
3814 * @param pVM The VM to operate on.
3815 * @param pInstrGC Guest context point to privileged instruction
3816 * @param pInstrHC Host context point to privileged instruction
3817 * @param pCpu Disassembly CPU structure ptr
3818 * @param pPatch Patch record
3819 *
3820 * @note returns failure if patching is not allowed or possible
3821 *
3822 */
3823VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu,
3824 PPATCHINFO pPatch)
3825{
3826 uint8_t bASMInt3 = 0xCC;
3827 int rc;
3828
3829 /* Note: Do not use patch memory here! It might called during patch installation too. */
3830
3831#ifdef LOG_ENABLED
3832 DISCPUSTATE cpu;
3833 char szOutput[256];
3834 uint32_t opsize;
3835
3836 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3837 PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3838 Log(("PATMR3PatchInstrInt3: %s", szOutput));
3839#endif
3840
3841 /* Save the original instruction. */
3842 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3843 AssertRC(rc);
3844 pPatch->cbPatchJump = sizeof(bASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3845
3846 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3847
3848 /* Replace first opcode byte with 'int 3'. */
3849 rc = patmActivateInt3Patch(pVM, pPatch);
3850 if (RT_FAILURE(rc))
3851 goto failure;
3852
3853 /* Lowest and highest address for write monitoring. */
3854 pPatch->pInstrGCLowest = pInstrGC;
3855 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3856
3857 pPatch->uState = PATCH_ENABLED;
3858 return VINF_SUCCESS;
3859
3860failure:
3861 /* Turn this patch into a dummy. */
3862 return VERR_PATCHING_REFUSED;
3863}
3864
3865#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3866/**
3867 * Patch a jump instruction at specified location
3868 *
3869 * @returns VBox status code.
3870 * @param pVM The VM to operate on.
3871 * @param pInstrGC Guest context point to privileged instruction
3872 * @param pInstrHC Host context point to privileged instruction
3873 * @param pCpu Disassembly CPU structure ptr
3874 * @param pPatchRec Patch record
3875 *
3876 * @note returns failure if patching is not allowed or possible
3877 *
3878 */
3879int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3880{
3881 PPATCHINFO pPatch = &pPatchRec->patch;
3882 int rc = VERR_PATCHING_REFUSED;
3883#ifdef LOG_ENABLED
3884 bool disret;
3885 DISCPUSTATE cpu;
3886 uint32_t opsize;
3887 char szOutput[256];
3888#endif
3889
3890 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3891 pPatch->uCurPatchOffset = 0;
3892 pPatch->cbPatchBlockSize = 0;
3893 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3894
3895 /*
3896 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3897 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3898 */
3899 switch (pCpu->pCurInstr->opcode)
3900 {
3901 case OP_JO:
3902 case OP_JNO:
3903 case OP_JC:
3904 case OP_JNC:
3905 case OP_JE:
3906 case OP_JNE:
3907 case OP_JBE:
3908 case OP_JNBE:
3909 case OP_JS:
3910 case OP_JNS:
3911 case OP_JP:
3912 case OP_JNP:
3913 case OP_JL:
3914 case OP_JNL:
3915 case OP_JLE:
3916 case OP_JNLE:
3917 case OP_JMP:
3918 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3919 Assert(pCpu->param1.flags & USE_IMMEDIATE32_REL);
3920 if (!(pCpu->param1.flags & USE_IMMEDIATE32_REL))
3921 goto failure;
3922
3923 Assert(pCpu->opsize == SIZEOF_NEARJUMP32 || pCpu->opsize == SIZEOF_NEAR_COND_JUMP32);
3924 if (pCpu->opsize != SIZEOF_NEARJUMP32 && pCpu->opsize != SIZEOF_NEAR_COND_JUMP32)
3925 goto failure;
3926
3927 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->opsize))
3928 {
3929 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
3930 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
3931 rc = VERR_PATCHING_REFUSED;
3932 goto failure;
3933 }
3934
3935 break;
3936
3937 default:
3938 goto failure;
3939 }
3940
3941 // make a copy of the guest code bytes that will be overwritten
3942 Assert(pCpu->opsize <= sizeof(pPatch->aPrivInstr));
3943 Assert(pCpu->opsize >= SIZEOF_NEARJUMP32);
3944 pPatch->cbPatchJump = pCpu->opsize;
3945
3946 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3947 AssertRC(rc);
3948
3949 /* Now insert a jump in the guest code. */
3950 /*
3951 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
3952 * references the target instruction in the conflict patch.
3953 */
3954 RTRCPTR pJmpDest = PATMR3GuestGCPtrToPatchGCPtr(pVM, pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval);
3955
3956 AssertMsg(pJmpDest, ("PATMR3GuestGCPtrToPatchGCPtr failed for %RRv\n", pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval));
3957 pPatch->pPatchJumpDestGC = pJmpDest;
3958
3959 PATMP2GLOOKUPREC cacheRec;
3960 RT_ZERO(cacheRec);
3961 cacheRec.pPatch = pPatch;
3962
3963 rc = patmGenJumpToPatch(pVM, pPatch, &cacherec, true);
3964 /* Free leftover lock if any. */
3965 if (cacheRec.Lock.pvMap)
3966 {
3967 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
3968 cacheRec.Lock.pvMap = NULL;
3969 }
3970 AssertRC(rc);
3971 if (RT_FAILURE(rc))
3972 goto failure;
3973
3974 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
3975
3976#ifdef LOG_ENABLED
3977 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3978 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, NULL, &opsize, szOutput);
3979 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
3980#endif
3981
3982 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3983
3984 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
3985
3986 /* Lowest and highest address for write monitoring. */
3987 pPatch->pInstrGCLowest = pInstrGC;
3988 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
3989
3990 pPatch->uState = PATCH_ENABLED;
3991 return VINF_SUCCESS;
3992
3993failure:
3994 /* Turn this cli patch into a dummy. */
3995 pPatch->uState = PATCH_REFUSED;
3996
3997 return rc;
3998}
3999#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
4000
4001
4002/**
4003 * Gives hint to PATM about supervisor guest instructions
4004 *
4005 * @returns VBox status code.
4006 * @param pVM The VM to operate on.
4007 * @param pInstr Guest context point to privileged instruction
4008 * @param flags Patch flags
4009 */
4010VMMR3DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
4011{
4012 Assert(pInstrGC);
4013 Assert(flags == PATMFL_CODE32);
4014
4015 Log(("PATMR3AddHint %RRv\n", pInstrGC));
4016 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
4017}
4018
4019/**
4020 * Patch privileged instruction at specified location
4021 *
4022 * @returns VBox status code.
4023 * @param pVM The VM to operate on.
4024 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
4025 * @param flags Patch flags
4026 *
4027 * @note returns failure if patching is not allowed or possible
4028 */
4029VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
4030{
4031 DISCPUSTATE cpu;
4032 R3PTRTYPE(uint8_t *) pInstrHC;
4033 uint32_t opsize;
4034 PPATMPATCHREC pPatchRec;
4035 PCPUMCTX pCtx = 0;
4036 bool disret;
4037 int rc;
4038 PVMCPU pVCpu = VMMGetCpu0(pVM);
4039
4040 if ( !pVM
4041 || pInstrGC == 0
4042 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4043 {
4044 AssertFailed();
4045 return VERR_INVALID_PARAMETER;
4046 }
4047
4048 if (PATMIsEnabled(pVM) == false)
4049 return VERR_PATCHING_REFUSED;
4050
4051 /* Test for patch conflict only with patches that actually change guest code. */
4052 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4053 {
4054 PPATCHINFO pConflictPatch = PATMFindActivePatchByEntrypoint(pVM, pInstrGC);
4055 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4056 if (pConflictPatch != 0)
4057 return VERR_PATCHING_REFUSED;
4058 }
4059
4060 if (!(flags & PATMFL_CODE32))
4061 {
4062 /** @todo Only 32 bits code right now */
4063 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4064 return VERR_NOT_IMPLEMENTED;
4065 }
4066
4067 /* We ran out of patch memory; don't bother anymore. */
4068 if (pVM->patm.s.fOutOfMemory == true)
4069 return VERR_PATCHING_REFUSED;
4070
4071 /* Make sure the code selector is wide open; otherwise refuse. */
4072 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4073 if (CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0)
4074 {
4075 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4076 if (pInstrGCFlat != pInstrGC)
4077 {
4078 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4079 return VERR_PATCHING_REFUSED;
4080 }
4081 }
4082
4083 /* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4084 if (!(flags & PATMFL_GUEST_SPECIFIC))
4085 {
4086 /* New code. Make sure CSAM has a go at it first. */
4087 CSAMR3CheckCode(pVM, pInstrGC);
4088 }
4089
4090 /* Note: obsolete */
4091 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4092 && (flags & PATMFL_MMIO_ACCESS))
4093 {
4094 RTRCUINTPTR offset;
4095 void *pvPatchCoreOffset;
4096
4097 /* Find the patch record. */
4098 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4099 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4100 if (pvPatchCoreOffset == NULL)
4101 {
4102 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4103 return VERR_PATCH_NOT_FOUND; //fatal error
4104 }
4105 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4106
4107 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4108 }
4109
4110 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4111
4112 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4113 if (pPatchRec)
4114 {
4115 Assert(!(flags & PATMFL_TRAMPOLINE));
4116
4117 /* Hints about existing patches are ignored. */
4118 if (flags & PATMFL_INSTR_HINT)
4119 return VERR_PATCHING_REFUSED;
4120
4121 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4122 {
4123 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4124 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4125 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4126 }
4127
4128 if (pPatchRec->patch.uState == PATCH_DISABLED)
4129 {
4130 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4131 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4132 {
4133 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4134 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4135 }
4136 else
4137 Log(("Enabling patch %RRv again\n", pInstrGC));
4138
4139 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4140 rc = PATMR3EnablePatch(pVM, pInstrGC);
4141 if (RT_SUCCESS(rc))
4142 return VWRN_PATCH_ENABLED;
4143
4144 return rc;
4145 }
4146 if ( pPatchRec->patch.uState == PATCH_ENABLED
4147 || pPatchRec->patch.uState == PATCH_DIRTY)
4148 {
4149 /*
4150 * The patch might have been overwritten.
4151 */
4152 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4153 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4154 {
4155 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4156 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4157 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4158 {
4159 if (flags & PATMFL_IDTHANDLER)
4160 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4161
4162 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4163 }
4164 }
4165 rc = PATMR3RemovePatch(pVM, pInstrGC);
4166 if (RT_FAILURE(rc))
4167 return VERR_PATCHING_REFUSED;
4168 }
4169 else
4170 {
4171 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4172 /* already tried it once! */
4173 return VERR_PATCHING_REFUSED;
4174 }
4175 }
4176
4177 RTGCPHYS GCPhys;
4178 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4179 if (rc != VINF_SUCCESS)
4180 {
4181 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4182 return rc;
4183 }
4184 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4185 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4186 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4187 {
4188 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4189 return VERR_PATCHING_REFUSED;
4190 }
4191
4192 /* Initialize cache record for guest address translations. */
4193 bool fInserted;
4194 PATMP2GLOOKUPREC cacheRec;
4195 RT_ZERO(cacheRec);
4196
4197 pInstrHC = PATMGCVirtToHCVirt(pVM, &cacheRec, pInstrGC);
4198 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
4199
4200 /* Allocate patch record. */
4201 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4202 if (RT_FAILURE(rc))
4203 {
4204 Log(("Out of memory!!!!\n"));
4205 return VERR_NO_MEMORY;
4206 }
4207 pPatchRec->Core.Key = pInstrGC;
4208 pPatchRec->patch.uState = PATCH_REFUSED; /* default value */
4209 /* Insert patch record into the lookup tree. */
4210 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4211 Assert(fInserted);
4212
4213 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4214 pPatchRec->patch.flags = flags;
4215 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4216 pPatchRec->patch.pTrampolinePatchesHead = NULL;
4217
4218 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4219 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4220
4221 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4222 {
4223 /*
4224 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4225 */
4226 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4227 if (pPatchNear)
4228 {
4229 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4230 {
4231 Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4232
4233 pPatchRec->patch.uState = PATCH_UNUSABLE;
4234 /*
4235 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4236 */
4237 return VERR_PATCHING_REFUSED;
4238 }
4239 }
4240 }
4241
4242 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4243 if (pPatchRec->patch.pTempInfo == 0)
4244 {
4245 Log(("Out of memory!!!!\n"));
4246 return VERR_NO_MEMORY;
4247 }
4248
4249 cpu.mode = pPatchRec->patch.uOpMode;
4250 disret = PATMR3DISInstr(pVM, &pPatchRec->patch, &cpu, pInstrGC, NULL, &opsize, NULL);
4251 if (disret == false)
4252 {
4253 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4254 return VERR_PATCHING_REFUSED;
4255 }
4256
4257 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
4258 if (opsize > MAX_INSTR_SIZE)
4259 return VERR_PATCHING_REFUSED;
4260
4261 pPatchRec->patch.cbPrivInstr = opsize;
4262 pPatchRec->patch.opcode = cpu.pCurInstr->opcode;
4263
4264 /* Restricted hinting for now. */
4265 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->opcode == OP_CLI);
4266
4267 /* Initialize cache record patch pointer. */
4268 cacheRec.pPatch = &pPatchRec->patch;
4269
4270 /* Allocate statistics slot */
4271 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4272 {
4273 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4274 }
4275 else
4276 {
4277 Log(("WARNING: Patch index wrap around!!\n"));
4278 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4279 }
4280
4281 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4282 {
4283 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec, &cacheRec);
4284 }
4285 else
4286 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4287 {
4288 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec, &cacheRec);
4289 }
4290 else
4291 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4292 {
4293 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4294 }
4295 else
4296 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4297 {
4298 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &cacheRec);
4299 }
4300 else
4301 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4302 {
4303 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4304 }
4305 else
4306 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4307 {
4308 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &cacheRec);
4309 }
4310 else
4311 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4312 {
4313 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4314 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4315
4316 rc = patmIdtHandler(pVM, pInstrGC, opsize, pPatchRec, &cacheRec);
4317#ifdef VBOX_WITH_STATISTICS
4318 if ( rc == VINF_SUCCESS
4319 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4320 {
4321 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4322 }
4323#endif
4324 }
4325 else
4326 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4327 {
4328 switch (cpu.pCurInstr->opcode)
4329 {
4330 case OP_SYSENTER:
4331 case OP_PUSH:
4332 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4333 if (rc == VINF_SUCCESS)
4334 {
4335 if (rc == VINF_SUCCESS)
4336 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4337 return rc;
4338 }
4339 break;
4340
4341 default:
4342 rc = VERR_NOT_IMPLEMENTED;
4343 break;
4344 }
4345 }
4346 else
4347 {
4348 switch (cpu.pCurInstr->opcode)
4349 {
4350 case OP_SYSENTER:
4351 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4352 if (rc == VINF_SUCCESS)
4353 {
4354 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4355 return VINF_SUCCESS;
4356 }
4357 break;
4358
4359#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4360 case OP_JO:
4361 case OP_JNO:
4362 case OP_JC:
4363 case OP_JNC:
4364 case OP_JE:
4365 case OP_JNE:
4366 case OP_JBE:
4367 case OP_JNBE:
4368 case OP_JS:
4369 case OP_JNS:
4370 case OP_JP:
4371 case OP_JNP:
4372 case OP_JL:
4373 case OP_JNL:
4374 case OP_JLE:
4375 case OP_JNLE:
4376 case OP_JECXZ:
4377 case OP_LOOP:
4378 case OP_LOOPNE:
4379 case OP_LOOPE:
4380 case OP_JMP:
4381 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4382 {
4383 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4384 break;
4385 }
4386 return VERR_NOT_IMPLEMENTED;
4387#endif
4388
4389 case OP_PUSHF:
4390 case OP_CLI:
4391 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4392 rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->opcode, opsize, pPatchRec);
4393 break;
4394
4395 case OP_STR:
4396 case OP_SGDT:
4397 case OP_SLDT:
4398 case OP_SIDT:
4399 case OP_CPUID:
4400 case OP_LSL:
4401 case OP_LAR:
4402 case OP_SMSW:
4403 case OP_VERW:
4404 case OP_VERR:
4405 case OP_IRET:
4406 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4407 break;
4408
4409 default:
4410 return VERR_NOT_IMPLEMENTED;
4411 }
4412 }
4413
4414 if (rc != VINF_SUCCESS)
4415 {
4416 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4417 {
4418 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4419 pPatchRec->patch.nrPatch2GuestRecs = 0;
4420 }
4421 pVM->patm.s.uCurrentPatchIdx--;
4422 }
4423 else
4424 {
4425 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4426 AssertRCReturn(rc, rc);
4427
4428 /* Keep track upper and lower boundaries of patched instructions */
4429 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4430 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4431 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4432 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4433
4434 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4435 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4436
4437 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4438 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4439
4440 rc = VINF_SUCCESS;
4441
4442 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4443 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4444 {
4445 rc = PATMR3DisablePatch(pVM, pInstrGC);
4446 AssertRCReturn(rc, rc);
4447 }
4448
4449#ifdef VBOX_WITH_STATISTICS
4450 /* Register statistics counter */
4451 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4452 {
4453 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4454 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4455#ifndef DEBUG_sandervl
4456 /* Full breakdown for the GUI. */
4457 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4458 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4459 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4460 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4461 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4462 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4463 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4464 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4465 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4466 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4467 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4468 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4469 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4470 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4471 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4472 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4473#endif
4474 }
4475#endif
4476 }
4477 /* Free leftover lock if any. */
4478 if (cacheRec.Lock.pvMap)
4479 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
4480 return rc;
4481}
4482
4483/**
4484 * Query instruction size
4485 *
4486 * @returns VBox status code.
4487 * @param pVM The VM to operate on.
4488 * @param pPatch Patch record
4489 * @param pInstrGC Instruction address
4490 */
4491static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4492{
4493 uint8_t *pInstrHC;
4494 PGMPAGEMAPLOCK Lock;
4495
4496 int rc = PGMPhysGCPtr2CCPtrReadOnly(VMMGetCpu(pVM), pInstrGC, (const void **)&pInstrHC, &Lock);
4497 if (rc == VINF_SUCCESS)
4498 {
4499 DISCPUSTATE cpu;
4500 bool disret;
4501 uint32_t opsize;
4502
4503 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4504 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL, PATMREAD_ORGCODE | PATMREAD_NOCHECK);
4505 PGMPhysReleasePageMappingLock(pVM, &Lock);
4506 if (disret)
4507 return opsize;
4508 }
4509 return 0;
4510}
4511
4512/**
4513 * Add patch to page record
4514 *
4515 * @returns VBox status code.
4516 * @param pVM The VM to operate on.
4517 * @param pPage Page address
4518 * @param pPatch Patch record
4519 */
4520int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4521{
4522 PPATMPATCHPAGE pPatchPage;
4523 int rc;
4524
4525 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4526
4527 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4528 if (pPatchPage)
4529 {
4530 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4531 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4532 {
4533 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4534 PPATCHINFO *paPatchOld = pPatchPage->aPatch;
4535
4536 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4537 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4538 if (RT_FAILURE(rc))
4539 {
4540 Log(("Out of memory!!!!\n"));
4541 return VERR_NO_MEMORY;
4542 }
4543 memcpy(pPatchPage->aPatch, paPatchOld, cMaxPatchesOld*sizeof(PPATCHINFO));
4544 MMHyperFree(pVM, paPatchOld);
4545 }
4546 pPatchPage->aPatch[pPatchPage->cCount] = pPatch;
4547 pPatchPage->cCount++;
4548 }
4549 else
4550 {
4551 bool fInserted;
4552
4553 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4554 if (RT_FAILURE(rc))
4555 {
4556 Log(("Out of memory!!!!\n"));
4557 return VERR_NO_MEMORY;
4558 }
4559 pPatchPage->Core.Key = pPage;
4560 pPatchPage->cCount = 1;
4561 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4562
4563 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4564 if (RT_FAILURE(rc))
4565 {
4566 Log(("Out of memory!!!!\n"));
4567 MMHyperFree(pVM, pPatchPage);
4568 return VERR_NO_MEMORY;
4569 }
4570 pPatchPage->aPatch[0] = pPatch;
4571
4572 fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4573 Assert(fInserted);
4574 pVM->patm.s.cPageRecords++;
4575
4576 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4577 }
4578 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4579
4580 /* Get the closest guest instruction (from below) */
4581 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4582 Assert(pGuestToPatchRec);
4583 if (pGuestToPatchRec)
4584 {
4585 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4586 if ( pPatchPage->pLowestAddrGC == 0
4587 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4588 {
4589 RTRCUINTPTR offset;
4590
4591 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4592
4593 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4594 /* If we're too close to the page boundary, then make sure an instruction from the previous page doesn't cross the boundary itself. */
4595 if (offset && offset < MAX_INSTR_SIZE)
4596 {
4597 /* Get the closest guest instruction (from above) */
4598 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4599
4600 if (pGuestToPatchRec)
4601 {
4602 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4603 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4604 {
4605 pPatchPage->pLowestAddrGC = pPage;
4606 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4607 }
4608 }
4609 }
4610 }
4611 }
4612
4613 /* Get the closest guest instruction (from above) */
4614 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4615 Assert(pGuestToPatchRec);
4616 if (pGuestToPatchRec)
4617 {
4618 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4619 if ( pPatchPage->pHighestAddrGC == 0
4620 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4621 {
4622 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4623 /* Increase by instruction size. */
4624 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4625//// Assert(size);
4626 pPatchPage->pHighestAddrGC += size;
4627 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4628 }
4629 }
4630
4631 return VINF_SUCCESS;
4632}
4633
4634/**
4635 * Remove patch from page record
4636 *
4637 * @returns VBox status code.
4638 * @param pVM The VM to operate on.
4639 * @param pPage Page address
4640 * @param pPatch Patch record
4641 */
4642int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4643{
4644 PPATMPATCHPAGE pPatchPage;
4645 int rc;
4646
4647 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4648 Assert(pPatchPage);
4649
4650 if (!pPatchPage)
4651 return VERR_INVALID_PARAMETER;
4652
4653 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4654
4655 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4656 if (pPatchPage->cCount > 1)
4657 {
4658 uint32_t i;
4659
4660 /* Used by multiple patches */
4661 for (i=0;i<pPatchPage->cCount;i++)
4662 {
4663 if (pPatchPage->aPatch[i] == pPatch)
4664 {
4665 pPatchPage->aPatch[i] = 0;
4666 break;
4667 }
4668 }
4669 /* close the gap between the remaining pointers. */
4670 if (i < pPatchPage->cCount - 1)
4671 {
4672 memcpy(&pPatchPage->aPatch[i], &pPatchPage->aPatch[i+1], sizeof(PPATCHINFO)*(pPatchPage->cCount - (i+1)));
4673 }
4674 AssertMsg(i < pPatchPage->cCount, ("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4675
4676 pPatchPage->cCount--;
4677 }
4678 else
4679 {
4680 PPATMPATCHPAGE pPatchNode;
4681
4682 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4683
4684 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4685 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4686 Assert(pPatchNode && pPatchNode == pPatchPage);
4687
4688 Assert(pPatchPage->aPatch);
4689 rc = MMHyperFree(pVM, pPatchPage->aPatch);
4690 AssertRC(rc);
4691 rc = MMHyperFree(pVM, pPatchPage);
4692 AssertRC(rc);
4693 pVM->patm.s.cPageRecords--;
4694 }
4695 return VINF_SUCCESS;
4696}
4697
4698/**
4699 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4700 *
4701 * @returns VBox status code.
4702 * @param pVM The VM to operate on.
4703 * @param pPatch Patch record
4704 */
4705int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4706{
4707 int rc;
4708 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4709
4710 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4711 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4712 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4713
4714 /** @todo optimize better (large gaps between current and next used page) */
4715 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4716 {
4717 /* Get the closest guest instruction (from above) */
4718 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4719 if ( pGuestToPatchRec
4720 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4721 )
4722 {
4723 /* Code in page really patched -> add record */
4724 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4725 AssertRC(rc);
4726 }
4727 }
4728 pPatch->flags |= PATMFL_CODE_MONITORED;
4729 return VINF_SUCCESS;
4730}
4731
4732/**
4733 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4734 *
4735 * @returns VBox status code.
4736 * @param pVM The VM to operate on.
4737 * @param pPatch Patch record
4738 */
4739int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4740{
4741 int rc;
4742 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4743
4744 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4745 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4746 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4747
4748 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4749 {
4750 /* Get the closest guest instruction (from above) */
4751 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4752 if ( pGuestToPatchRec
4753 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4754 )
4755 {
4756 /* Code in page really patched -> remove record */
4757 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4758 AssertRC(rc);
4759 }
4760 }
4761 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4762 return VINF_SUCCESS;
4763}
4764
4765/**
4766 * Notifies PATM about a (potential) write to code that has been patched.
4767 *
4768 * @returns VBox status code.
4769 * @param pVM The VM to operate on.
4770 * @param GCPtr GC pointer to write address
4771 * @param cbWrite Nr of bytes to write
4772 *
4773 */
4774VMMR3DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4775{
4776 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4777
4778 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4779
4780 Assert(VM_IS_EMT(pVM));
4781
4782 /* Quick boundary check */
4783 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4784 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4785 )
4786 return VINF_SUCCESS;
4787
4788 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4789
4790 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4791 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4792
4793 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4794 {
4795loop_start:
4796 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4797 if (pPatchPage)
4798 {
4799 uint32_t i;
4800 bool fValidPatchWrite = false;
4801
4802 /* Quick check to see if the write is in the patched part of the page */
4803 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4804 || pPatchPage->pHighestAddrGC < GCPtr)
4805 {
4806 break;
4807 }
4808
4809 for (i=0;i<pPatchPage->cCount;i++)
4810 {
4811 if (pPatchPage->aPatch[i])
4812 {
4813 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4814 RTRCPTR pPatchInstrGC;
4815 //unused: bool fForceBreak = false;
4816
4817 Assert(pPatchPage->aPatch[i]->flags & PATMFL_CODE_MONITORED);
4818 /** @todo inefficient and includes redundant checks for multiple pages. */
4819 for (uint32_t j=0; j<cbWrite; j++)
4820 {
4821 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4822
4823 if ( pPatch->cbPatchJump
4824 && pGuestPtrGC >= pPatch->pPrivInstrGC
4825 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4826 {
4827 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4828 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4829 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4830 if (rc == VINF_SUCCESS)
4831 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4832 goto loop_start;
4833
4834 continue;
4835 }
4836
4837 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4838 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4839 if (!pPatchInstrGC)
4840 {
4841 RTRCPTR pClosestInstrGC;
4842 uint32_t size;
4843
4844 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4845 if (pPatchInstrGC)
4846 {
4847 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4848 Assert(pClosestInstrGC <= pGuestPtrGC);
4849 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4850 /* Check if this is not a write into a gap between two patches */
4851 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4852 pPatchInstrGC = 0;
4853 }
4854 }
4855 if (pPatchInstrGC)
4856 {
4857 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4858
4859 fValidPatchWrite = true;
4860
4861 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4862 Assert(pPatchToGuestRec);
4863 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4864 {
4865 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4866
4867 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4868 {
4869 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4870
4871 PATMR3MarkDirtyPatch(pVM, pPatch);
4872
4873 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4874 goto loop_start;
4875 }
4876 else
4877 {
4878 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4879 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4880
4881 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4882 pPatchToGuestRec->fDirty = true;
4883
4884 *pInstrHC = 0xCC;
4885
4886 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4887 }
4888 }
4889 /* else already marked dirty */
4890 }
4891 }
4892 }
4893 } /* for each patch */
4894
4895 if (fValidPatchWrite == false)
4896 {
4897 /* Write to a part of the page that either:
4898 * - doesn't contain any code (shared code/data); rather unlikely
4899 * - old code page that's no longer in active use.
4900 */
4901invalid_write_loop_start:
4902 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4903
4904 if (pPatchPage)
4905 {
4906 for (i=0;i<pPatchPage->cCount;i++)
4907 {
4908 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4909
4910 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
4911 {
4912 /* Note: possibly dangerous assumption that all future writes will be harmless. */
4913 if (pPatch->flags & PATMFL_IDTHANDLER)
4914 {
4915 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4916
4917 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
4918 int rc = patmRemovePatchPages(pVM, pPatch);
4919 AssertRC(rc);
4920 }
4921 else
4922 {
4923 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4924 PATMR3MarkDirtyPatch(pVM, pPatch);
4925 }
4926 /* Note: jump back to the start as the pPatchPage has been deleted or changed */
4927 goto invalid_write_loop_start;
4928 }
4929 } /* for */
4930 }
4931 }
4932 }
4933 }
4934 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
4935 return VINF_SUCCESS;
4936
4937}
4938
4939/**
4940 * Disable all patches in a flushed page
4941 *
4942 * @returns VBox status code
4943 * @param pVM The VM to operate on.
4944 * @param addr GC address of the page to flush
4945 */
4946/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
4947 */
4948VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
4949{
4950 addr &= PAGE_BASE_GC_MASK;
4951
4952 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
4953 if (pPatchPage)
4954 {
4955 int i;
4956
4957 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
4958 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
4959 {
4960 if (pPatchPage->aPatch[i])
4961 {
4962 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4963
4964 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
4965 PATMR3MarkDirtyPatch(pVM, pPatch);
4966 }
4967 }
4968 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
4969 }
4970 return VINF_SUCCESS;
4971}
4972
4973/**
4974 * Checks if the instructions at the specified address has been patched already.
4975 *
4976 * @returns boolean, patched or not
4977 * @param pVM The VM to operate on.
4978 * @param pInstrGC Guest context pointer to instruction
4979 */
4980VMMR3DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
4981{
4982 PPATMPATCHREC pPatchRec;
4983 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4984 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
4985 return true;
4986 return false;
4987}
4988
4989/**
4990 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
4991 *
4992 * @returns VBox status code.
4993 * @param pVM The VM to operate on.
4994 * @param pInstrGC GC address of instr
4995 * @param pByte opcode byte pointer (OUT)
4996 *
4997 */
4998VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
4999{
5000 PPATMPATCHREC pPatchRec;
5001
5002 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
5003
5004 /* Shortcut. */
5005 if ( !PATMIsEnabled(pVM)
5006 || pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
5007 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
5008 {
5009 return VERR_PATCH_NOT_FOUND;
5010 }
5011
5012 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5013 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
5014 if ( pPatchRec
5015 && pPatchRec->patch.uState == PATCH_ENABLED
5016 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
5017 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5018 {
5019 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
5020 *pByte = pPatchRec->patch.aPrivInstr[offset];
5021
5022 if (pPatchRec->patch.cbPatchJump == 1)
5023 {
5024 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
5025 }
5026 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
5027 return VINF_SUCCESS;
5028 }
5029 return VERR_PATCH_NOT_FOUND;
5030}
5031
5032/**
5033 * Disable patch for privileged instruction at specified location
5034 *
5035 * @returns VBox status code.
5036 * @param pVM The VM to operate on.
5037 * @param pInstr Guest context point to privileged instruction
5038 *
5039 * @note returns failure if patching is not allowed or possible
5040 *
5041 */
5042VMMR3DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
5043{
5044 PPATMPATCHREC pPatchRec;
5045 PPATCHINFO pPatch;
5046
5047 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
5048 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5049 if (pPatchRec)
5050 {
5051 int rc = VINF_SUCCESS;
5052
5053 pPatch = &pPatchRec->patch;
5054
5055 /* Already disabled? */
5056 if (pPatch->uState == PATCH_DISABLED)
5057 return VINF_SUCCESS;
5058
5059 /* Clear the IDT entries for the patch we're disabling. */
5060 /* Note: very important as we clear IF in the patch itself */
5061 /** @todo this needs to be changed */
5062 if (pPatch->flags & PATMFL_IDTHANDLER)
5063 {
5064 uint32_t iGate;
5065
5066 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5067 if (iGate != (uint32_t)~0)
5068 {
5069 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5070 if (++cIDTHandlersDisabled < 256)
5071 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5072 }
5073 }
5074
5075 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5076 if ( pPatch->pPatchBlockOffset
5077 && pPatch->uState == PATCH_ENABLED)
5078 {
5079 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5080 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5081 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5082 }
5083
5084 /* IDT or function patches haven't changed any guest code. */
5085 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5086 {
5087 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5088 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5089
5090 if (pPatch->uState != PATCH_REFUSED)
5091 {
5092 uint8_t temp[16];
5093
5094 Assert(pPatch->cbPatchJump < sizeof(temp));
5095
5096 /* Let's first check if the guest code is still the same. */
5097 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5098 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5099 if (rc == VINF_SUCCESS)
5100 {
5101 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5102
5103 if ( temp[0] != 0xE9 /* jmp opcode */
5104 || *(RTRCINTPTR *)(&temp[1]) != displ
5105 )
5106 {
5107 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5108 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5109 /* Remove it completely */
5110 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5111 rc = PATMR3RemovePatch(pVM, pInstrGC);
5112 AssertRC(rc);
5113 return VWRN_PATCH_REMOVED;
5114 }
5115 patmRemoveJumpToPatch(pVM, pPatch);
5116 }
5117 else
5118 {
5119 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5120 pPatch->uState = PATCH_DISABLE_PENDING;
5121 }
5122 }
5123 else
5124 {
5125 AssertMsgFailed(("Patch was refused!\n"));
5126 return VERR_PATCH_ALREADY_DISABLED;
5127 }
5128 }
5129 else
5130 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5131 {
5132 uint8_t temp[16];
5133
5134 Assert(pPatch->cbPatchJump < sizeof(temp));
5135
5136 /* Let's first check if the guest code is still the same. */
5137 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5138 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5139 if (rc == VINF_SUCCESS)
5140 {
5141 if (temp[0] != 0xCC)
5142 {
5143 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5144 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5145 /* Remove it completely */
5146 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5147 rc = PATMR3RemovePatch(pVM, pInstrGC);
5148 AssertRC(rc);
5149 return VWRN_PATCH_REMOVED;
5150 }
5151 patmDeactivateInt3Patch(pVM, pPatch);
5152 }
5153 }
5154
5155 if (rc == VINF_SUCCESS)
5156 {
5157 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5158 if (pPatch->uState == PATCH_DISABLE_PENDING)
5159 {
5160 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5161 pPatch->uState = PATCH_UNUSABLE;
5162 }
5163 else
5164 if (pPatch->uState != PATCH_DIRTY)
5165 {
5166 pPatch->uOldState = pPatch->uState;
5167 pPatch->uState = PATCH_DISABLED;
5168 }
5169 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5170 }
5171
5172 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5173 return VINF_SUCCESS;
5174 }
5175 Log(("Patch not found!\n"));
5176 return VERR_PATCH_NOT_FOUND;
5177}
5178
5179/**
5180 * Permanently disable patch for privileged instruction at specified location
5181 *
5182 * @returns VBox status code.
5183 * @param pVM The VM to operate on.
5184 * @param pInstr Guest context instruction pointer
5185 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5186 * @param pConflictPatch Conflicting patch
5187 *
5188 */
5189static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5190{
5191 NOREF(pConflictAddr);
5192#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5193 PATCHINFO patch;
5194 DISCPUSTATE cpu;
5195 R3PTRTYPE(uint8_t *) pInstrHC;
5196 uint32_t opsize;
5197 bool disret;
5198 int rc;
5199
5200 RT_ZERO(patch);
5201 pInstrHC = PATMGCVirtToHCVirt(pVM, &patch, pInstrGC);
5202 cpu.mode = (pConflictPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5203 disret = PATMR3DISInstr(pVM, &patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
5204 /*
5205 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5206 * with one that jumps right into the conflict patch.
5207 * Otherwise we must disable the conflicting patch to avoid serious problems.
5208 */
5209 if ( disret == true
5210 && (pConflictPatch->flags & PATMFL_CODE32)
5211 && (cpu.pCurInstr->opcode == OP_JMP || (cpu.pCurInstr->optype & OPTYPE_COND_CONTROLFLOW))
5212 && (cpu.param1.flags & USE_IMMEDIATE32_REL))
5213 {
5214 /* Hint patches must be enabled first. */
5215 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5216 {
5217 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5218 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5219 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5220 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5221 /* Enabling might fail if the patched code has changed in the meantime. */
5222 if (rc != VINF_SUCCESS)
5223 return rc;
5224 }
5225
5226 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5227 if (RT_SUCCESS(rc))
5228 {
5229 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5230 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5231 return VINF_SUCCESS;
5232 }
5233 }
5234#endif
5235
5236 if (pConflictPatch->opcode == OP_CLI)
5237 {
5238 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5239 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5240 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5241 if (rc == VWRN_PATCH_REMOVED)
5242 return VINF_SUCCESS;
5243 if (RT_SUCCESS(rc))
5244 {
5245 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5246 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5247 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5248 if (rc == VERR_PATCH_NOT_FOUND)
5249 return VINF_SUCCESS; /* removed already */
5250
5251 AssertRC(rc);
5252 if (RT_SUCCESS(rc))
5253 {
5254 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5255 return VINF_SUCCESS;
5256 }
5257 }
5258 /* else turned into unusable patch (see below) */
5259 }
5260 else
5261 {
5262 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5263 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5264 if (rc == VWRN_PATCH_REMOVED)
5265 return VINF_SUCCESS;
5266 }
5267
5268 /* No need to monitor the code anymore. */
5269 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5270 {
5271 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5272 AssertRC(rc);
5273 }
5274 pConflictPatch->uState = PATCH_UNUSABLE;
5275 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5276 return VERR_PATCH_DISABLED;
5277}
5278
5279/**
5280 * Enable patch for privileged instruction at specified location
5281 *
5282 * @returns VBox status code.
5283 * @param pVM The VM to operate on.
5284 * @param pInstr Guest context point to privileged instruction
5285 *
5286 * @note returns failure if patching is not allowed or possible
5287 *
5288 */
5289VMMR3DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5290{
5291 PPATMPATCHREC pPatchRec;
5292 PPATCHINFO pPatch;
5293
5294 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5295 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5296 if (pPatchRec)
5297 {
5298 int rc = VINF_SUCCESS;
5299
5300 pPatch = &pPatchRec->patch;
5301
5302 if (pPatch->uState == PATCH_DISABLED)
5303 {
5304 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5305 {
5306 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5307 uint8_t temp[16];
5308
5309 Assert(pPatch->cbPatchJump < sizeof(temp));
5310
5311 /* Let's first check if the guest code is still the same. */
5312 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5313 AssertRC(rc2);
5314 if (rc2 == VINF_SUCCESS)
5315 {
5316 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5317 {
5318 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5319 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5320 /* Remove it completely */
5321 rc = PATMR3RemovePatch(pVM, pInstrGC);
5322 AssertRC(rc);
5323 return VERR_PATCH_NOT_FOUND;
5324 }
5325
5326 PATMP2GLOOKUPREC cacheRec;
5327 RT_ZERO(cacheRec);
5328 cacheRec.pPatch = pPatch;
5329
5330 rc2 = patmGenJumpToPatch(pVM, pPatch, &cacheRec, false);
5331 /* Free leftover lock if any. */
5332 if (cacheRec.Lock.pvMap)
5333 {
5334 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
5335 cacheRec.Lock.pvMap = NULL;
5336 }
5337 AssertRC(rc2);
5338 if (RT_FAILURE(rc2))
5339 return rc2;
5340
5341#ifdef DEBUG
5342 {
5343 DISCPUSTATE cpu;
5344 char szOutput[256];
5345 uint32_t opsize, i = 0;
5346 bool disret;
5347 i = 0;
5348 while(i < pPatch->cbPatchJump)
5349 {
5350 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5351 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, NULL, &opsize, szOutput);
5352 Log(("Renewed patch instr: %s", szOutput));
5353 i += opsize;
5354 }
5355 }
5356#endif
5357 }
5358 }
5359 else
5360 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5361 {
5362 uint8_t temp[16];
5363
5364 Assert(pPatch->cbPatchJump < sizeof(temp));
5365
5366 /* Let's first check if the guest code is still the same. */
5367 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5368 AssertRC(rc2);
5369
5370 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5371 {
5372 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5373 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5374 rc = PATMR3RemovePatch(pVM, pInstrGC);
5375 AssertRC(rc);
5376 return VERR_PATCH_NOT_FOUND;
5377 }
5378
5379 rc2 = patmActivateInt3Patch(pVM, pPatch);
5380 if (RT_FAILURE(rc2))
5381 return rc2;
5382 }
5383
5384 pPatch->uState = pPatch->uOldState; //restore state
5385
5386 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5387 if (pPatch->pPatchBlockOffset)
5388 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5389
5390 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5391 }
5392 else
5393 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5394
5395 return rc;
5396 }
5397 return VERR_PATCH_NOT_FOUND;
5398}
5399
5400/**
5401 * Remove patch for privileged instruction at specified location
5402 *
5403 * @returns VBox status code.
5404 * @param pVM The VM to operate on.
5405 * @param pPatchRec Patch record
5406 * @param fForceRemove Remove *all* patches
5407 */
5408int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5409{
5410 PPATCHINFO pPatch;
5411
5412 pPatch = &pPatchRec->patch;
5413
5414 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5415 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5416 {
5417 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5418 return VERR_ACCESS_DENIED;
5419 }
5420 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5421
5422 /* Note: NEVER EVER REUSE PATCH MEMORY */
5423 /* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
5424
5425 if (pPatchRec->patch.pPatchBlockOffset)
5426 {
5427 PAVLOU32NODECORE pNode;
5428
5429 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5430 Assert(pNode);
5431 }
5432
5433 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5434 {
5435 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5436 AssertRC(rc);
5437 }
5438
5439#ifdef VBOX_WITH_STATISTICS
5440 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5441 {
5442 STAMR3Deregister(pVM, &pPatchRec->patch);
5443#ifndef DEBUG_sandervl
5444 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5445 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5446 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5447 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5448 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5449 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5450 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5451 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5452 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5453 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5454 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5455 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5456 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5457 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5458#endif
5459 }
5460#endif
5461
5462 /* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5463 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5464 pPatch->nrPatch2GuestRecs = 0;
5465 Assert(pPatch->Patch2GuestAddrTree == 0);
5466
5467 patmEmptyTree(pVM, &pPatch->FixupTree);
5468 pPatch->nrFixups = 0;
5469 Assert(pPatch->FixupTree == 0);
5470
5471 if (pPatchRec->patch.pTempInfo)
5472 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5473
5474 /* Note: might fail, because it has already been removed (e.g. during reset). */
5475 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5476
5477 /* Free the patch record */
5478 MMHyperFree(pVM, pPatchRec);
5479 return VINF_SUCCESS;
5480}
5481
5482/**
5483 * RTAvlU32DoWithAll() worker.
5484 * Checks whether the current trampoline instruction is the jump to the target patch
5485 * and updates the displacement to jump to the new target.
5486 *
5487 * @returns VBox status code.
5488 * @retval VERR_ALREADY_EXISTS if the jump was found.
5489 * @param pNode The current patch to guest record to check.
5490 * @param pvUser The refresh state.
5491 */
5492static int patmR3PatchRefreshFindTrampolinePatch(PAVLU32NODECORE pNode, void *pvUser)
5493{
5494 PRECPATCHTOGUEST pPatch2GuestRec = (PRECPATCHTOGUEST)pNode;
5495 PPATMREFRESHPATCH pRefreshPatchState = (PPATMREFRESHPATCH)pvUser;
5496 PVM pVM = pRefreshPatchState->pVM;
5497
5498 uint8_t *pPatchInstr = (uint8_t *)(pVM->patm.s.pPatchMemHC + pPatch2GuestRec->Core.Key);
5499
5500 /*
5501 * Check if the patch instruction starts with a jump.
5502 * ASSUMES that there is no other patch to guest record that starts
5503 * with a jump.
5504 */
5505 if (*pPatchInstr == 0xE9)
5506 {
5507 /* Jump found, update the displacement. */
5508 RTRCPTR pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pRefreshPatchState->pPatchRec,
5509 pRefreshPatchState->pPatchTrampoline->pPrivInstrGC);
5510 int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
5511
5512 LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
5513 pPatchTargetGC, displ, *(uint32_t *)&pPatchInstr[1]));
5514
5515 *(uint32_t *)&pPatchInstr[1] = displ;
5516 return VERR_ALREADY_EXISTS; /** @todo better return code */
5517 }
5518
5519 return VINF_SUCCESS;
5520}
5521
5522/**
5523 * Attempt to refresh the patch by recompiling its entire code block
5524 *
5525 * @returns VBox status code.
5526 * @param pVM The VM to operate on.
5527 * @param pPatchRec Patch record
5528 */
5529int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5530{
5531 PPATCHINFO pPatch;
5532 int rc;
5533 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5534 PTRAMPREC pTrampolinePatchesHead = NULL;
5535
5536 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5537
5538 pPatch = &pPatchRec->patch;
5539 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5540 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5541 {
5542 if (!pPatch->pTrampolinePatchesHead)
5543 {
5544 /*
5545 * It is sometimes possible that there are trampoline patches to this patch
5546 * but they are not recorded (after a saved state load for example).
5547 * Refuse to refresh those patches.
5548 * Can hurt performance in theory if the patched code is modified by the guest
5549 * and is executed often. However most of the time states are saved after the guest
5550 * code was modified and is not updated anymore afterwards so this shouldn't be a
5551 * big problem.
5552 */
5553 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
5554 return VERR_PATCHING_REFUSED;
5555 }
5556 Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
5557 pTrampolinePatchesHead = pPatch->pTrampolinePatchesHead;
5558 }
5559
5560 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5561
5562 rc = PATMR3DisablePatch(pVM, pInstrGC);
5563 AssertRC(rc);
5564
5565 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5566 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5567#ifdef VBOX_WITH_STATISTICS
5568 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5569 {
5570 STAMR3Deregister(pVM, &pPatchRec->patch);
5571#ifndef DEBUG_sandervl
5572 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5573 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5574 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5575 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5576 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5577 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5578 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5579 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5580 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5581 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5582 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5583 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5584 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5585 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5586#endif
5587 }
5588#endif
5589
5590 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5591
5592 /* Attempt to install a new patch. */
5593 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5594 if (RT_SUCCESS(rc))
5595 {
5596 RTRCPTR pPatchTargetGC;
5597 PPATMPATCHREC pNewPatchRec;
5598
5599 /* Determine target address in new patch */
5600 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5601 Assert(pPatchTargetGC);
5602 if (!pPatchTargetGC)
5603 {
5604 rc = VERR_PATCHING_REFUSED;
5605 goto failure;
5606 }
5607
5608 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5609 pPatch->uCurPatchOffset = 0;
5610
5611 /* insert jump to new patch in old patch block */
5612 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5613 if (RT_FAILURE(rc))
5614 goto failure;
5615
5616 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5617 Assert(pNewPatchRec); /* can't fail */
5618
5619 /* Remove old patch (only do that when everything is finished) */
5620 int rc2 = PATMRemovePatch(pVM, pPatchRec, true /* force removal */);
5621 AssertRC(rc2);
5622
5623 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5624 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5625 Assert(fInserted); NOREF(fInserted);
5626
5627 Log(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5628 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5629
5630 /* Used by another patch, so don't remove it! */
5631 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5632
5633 if (pTrampolinePatchesHead)
5634 {
5635 /* Update all trampoline patches to jump to the new patch. */
5636 PTRAMPREC pTrampRec = NULL;
5637 PATMREFRESHPATCH RefreshPatch;
5638
5639 RefreshPatch.pVM = pVM;
5640 RefreshPatch.pPatchRec = &pNewPatchRec->patch;
5641
5642 pTrampRec = pTrampolinePatchesHead;
5643
5644 while (pTrampRec)
5645 {
5646 PPATCHINFO pPatchTrampoline = &pTrampRec->pPatchTrampoline->patch;
5647
5648 RefreshPatch.pPatchTrampoline = pPatchTrampoline;
5649 /*
5650 * We have to find the right patch2guest record because there might be others
5651 * for statistics.
5652 */
5653 rc = RTAvlU32DoWithAll(&pPatchTrampoline->Patch2GuestAddrTree, true,
5654 patmR3PatchRefreshFindTrampolinePatch, &RefreshPatch);
5655 Assert(rc == VERR_ALREADY_EXISTS);
5656 rc = VINF_SUCCESS;
5657 pTrampRec = pTrampRec->pNext;
5658 }
5659 pNewPatchRec->patch.pTrampolinePatchesHead = pTrampolinePatchesHead;
5660 pNewPatchRec->patch.flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
5661 /* Clear the list of trampoline patches for the old patch (safety precaution). */
5662 pPatchRec->patch.pTrampolinePatchesHead = NULL;
5663 }
5664 }
5665
5666failure:
5667 if (RT_FAILURE(rc))
5668 {
5669 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5670
5671 /* Remove the new inactive patch */
5672 rc = PATMR3RemovePatch(pVM, pInstrGC);
5673 AssertRC(rc);
5674
5675 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5676 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5677 Assert(fInserted); NOREF(fInserted);
5678
5679 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5680 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5681 AssertRC(rc2);
5682
5683 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5684 }
5685 return rc;
5686}
5687
5688/**
5689 * Find patch for privileged instruction at specified location
5690 *
5691 * @returns Patch structure pointer if found; else NULL
5692 * @param pVM The VM to operate on.
5693 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5694 * @param fIncludeHints Include hinted patches or not
5695 *
5696 */
5697PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5698{
5699 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5700 /* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5701 if (pPatchRec)
5702 {
5703 if ( pPatchRec->patch.uState == PATCH_ENABLED
5704 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5705 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5706 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5707 {
5708 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5709 return &pPatchRec->patch;
5710 }
5711 else
5712 if ( fIncludeHints
5713 && pPatchRec->patch.uState == PATCH_DISABLED
5714 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5715 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5716 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5717 {
5718 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5719 return &pPatchRec->patch;
5720 }
5721 }
5722 return NULL;
5723}
5724
5725/**
5726 * Checks whether the GC address is inside a generated patch jump
5727 *
5728 * @returns true -> yes, false -> no
5729 * @param pVM The VM to operate on.
5730 * @param pAddr Guest context address
5731 * @param pPatchAddr Guest context patch address (if true)
5732 */
5733VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5734{
5735 RTRCPTR addr;
5736 PPATCHINFO pPatch;
5737
5738 if (PATMIsEnabled(pVM) == false)
5739 return false;
5740
5741 if (pPatchAddr == NULL)
5742 pPatchAddr = &addr;
5743
5744 *pPatchAddr = 0;
5745
5746 pPatch = PATMFindActivePatchByEntrypoint(pVM, pAddr);
5747 if (pPatch)
5748 *pPatchAddr = pPatch->pPrivInstrGC;
5749
5750 return *pPatchAddr == 0 ? false : true;
5751}
5752
5753/**
5754 * Remove patch for privileged instruction at specified location
5755 *
5756 * @returns VBox status code.
5757 * @param pVM The VM to operate on.
5758 * @param pInstr Guest context point to privileged instruction
5759 *
5760 * @note returns failure if patching is not allowed or possible
5761 *
5762 */
5763VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5764{
5765 PPATMPATCHREC pPatchRec;
5766
5767 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5768 if (pPatchRec)
5769 {
5770 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5771 if (rc == VWRN_PATCH_REMOVED)
5772 return VINF_SUCCESS;
5773
5774 return PATMRemovePatch(pVM, pPatchRec, false);
5775 }
5776 AssertFailed();
5777 return VERR_PATCH_NOT_FOUND;
5778}
5779
5780/**
5781 * Mark patch as dirty
5782 *
5783 * @returns VBox status code.
5784 * @param pVM The VM to operate on.
5785 * @param pPatch Patch record
5786 *
5787 * @note returns failure if patching is not allowed or possible
5788 *
5789 */
5790VMMR3DECL(int) PATMR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5791{
5792 if (pPatch->pPatchBlockOffset)
5793 {
5794 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5795 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5796 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5797 }
5798
5799 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5800 /* Put back the replaced instruction. */
5801 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5802 if (rc == VWRN_PATCH_REMOVED)
5803 return VINF_SUCCESS;
5804
5805 /* Note: we don't restore patch pages for patches that are not enabled! */
5806 /* Note: be careful when changing this behaviour!! */
5807
5808 /* The patch pages are no longer marked for self-modifying code detection */
5809 if (pPatch->flags & PATMFL_CODE_MONITORED)
5810 {
5811 rc = patmRemovePatchPages(pVM, pPatch);
5812 AssertRCReturn(rc, rc);
5813 }
5814 pPatch->uState = PATCH_DIRTY;
5815
5816 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5817 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5818
5819 return VINF_SUCCESS;
5820}
5821
5822/**
5823 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5824 *
5825 * @returns VBox status code.
5826 * @param pVM The VM to operate on.
5827 * @param pPatch Patch block structure pointer
5828 * @param pPatchGC GC address in patch block
5829 */
5830RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5831{
5832 Assert(pPatch->Patch2GuestAddrTree);
5833 /* Get the closest record from below. */
5834 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5835 if (pPatchToGuestRec)
5836 return pPatchToGuestRec->pOrgInstrGC;
5837
5838 return 0;
5839}
5840
5841/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5842 *
5843 * @returns corresponding GC pointer in patch block
5844 * @param pVM The VM to operate on.
5845 * @param pPatch Current patch block pointer
5846 * @param pInstrGC Guest context pointer to privileged instruction
5847 *
5848 */
5849RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5850{
5851 if (pPatch->Guest2PatchAddrTree)
5852 {
5853 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
5854 if (pGuestToPatchRec)
5855 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5856 }
5857
5858 return 0;
5859}
5860
5861/* Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no identical match)
5862 *
5863 * @returns corresponding GC pointer in patch block
5864 * @param pVM The VM to operate on.
5865 * @param pPatch Current patch block pointer
5866 * @param pInstrGC Guest context pointer to privileged instruction
5867 *
5868 */
5869RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5870{
5871 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
5872 if (pGuestToPatchRec)
5873 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5874
5875 return 0;
5876}
5877
5878/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5879 *
5880 * @returns corresponding GC pointer in patch block
5881 * @param pVM The VM to operate on.
5882 * @param pInstrGC Guest context pointer to privileged instruction
5883 *
5884 */
5885VMMR3DECL(RTRCPTR) PATMR3GuestGCPtrToPatchGCPtr(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
5886{
5887 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5888 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
5889 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
5890 else
5891 return 0;
5892}
5893
5894/**
5895 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5896 *
5897 * @returns original GC instruction pointer or 0 if not found
5898 * @param pVM The VM to operate on.
5899 * @param pPatchGC GC address in patch block
5900 * @param pEnmState State of the translated address (out)
5901 *
5902 */
5903VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
5904{
5905 PPATMPATCHREC pPatchRec;
5906 void *pvPatchCoreOffset;
5907 RTRCPTR pPrivInstrGC;
5908
5909 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
5910 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5911 if (pvPatchCoreOffset == 0)
5912 {
5913 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
5914 return 0;
5915 }
5916 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
5917 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
5918 if (pEnmState)
5919 {
5920 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
5921 || pPatchRec->patch.uState == PATCH_DIRTY
5922 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
5923 || pPatchRec->patch.uState == PATCH_UNUSABLE),
5924 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
5925
5926 if ( !pPrivInstrGC
5927 || pPatchRec->patch.uState == PATCH_UNUSABLE
5928 || pPatchRec->patch.uState == PATCH_REFUSED)
5929 {
5930 pPrivInstrGC = 0;
5931 *pEnmState = PATMTRANS_FAILED;
5932 }
5933 else
5934 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
5935 {
5936 *pEnmState = PATMTRANS_INHIBITIRQ;
5937 }
5938 else
5939 if ( pPatchRec->patch.uState == PATCH_ENABLED
5940 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
5941 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
5942 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5943 {
5944 *pEnmState = PATMTRANS_OVERWRITTEN;
5945 }
5946 else
5947 if (PATMFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
5948 {
5949 *pEnmState = PATMTRANS_OVERWRITTEN;
5950 }
5951 else
5952 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
5953 {
5954 *pEnmState = PATMTRANS_PATCHSTART;
5955 }
5956 else
5957 *pEnmState = PATMTRANS_SAFE;
5958 }
5959 return pPrivInstrGC;
5960}
5961
5962/**
5963 * Returns the GC pointer of the patch for the specified GC address
5964 *
5965 * @returns VBox status code.
5966 * @param pVM The VM to operate on.
5967 * @param pAddrGC Guest context address
5968 */
5969VMMR3DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
5970{
5971 PPATMPATCHREC pPatchRec;
5972
5973 /* Find the patch record. */
5974 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
5975 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
5976 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
5977 return PATCHCODE_PTR_GC(&pPatchRec->patch);
5978 else
5979 return 0;
5980}
5981
5982/**
5983 * Attempt to recover dirty instructions
5984 *
5985 * @returns VBox status code.
5986 * @param pVM The VM to operate on.
5987 * @param pCtx CPU context
5988 * @param pPatch Patch record
5989 * @param pPatchToGuestRec Patch to guest address record
5990 * @param pEip GC pointer of trapping instruction
5991 */
5992static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
5993{
5994 DISCPUSTATE CpuOld, CpuNew;
5995 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
5996 int rc;
5997 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
5998 uint32_t cbDirty;
5999 PRECPATCHTOGUEST pRec;
6000 RTRCPTR const pOrgInstrGC = pPatchToGuestRec->pOrgInstrGC;
6001 PVMCPU pVCpu = VMMGetCpu0(pVM);
6002 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pOrgInstrGC));
6003
6004 pRec = pPatchToGuestRec;
6005 pCurInstrGC = pOrgInstrGC;
6006 pCurPatchInstrGC = pEip;
6007 cbDirty = 0;
6008 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6009
6010 /* Find all adjacent dirty instructions */
6011 while (true)
6012 {
6013 if (pRec->fJumpTarget)
6014 {
6015 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
6016 pRec->fDirty = false;
6017 return VERR_PATCHING_REFUSED;
6018 }
6019
6020 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
6021 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6022 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
6023
6024 /* Only harmless instructions are acceptable. */
6025 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
6026 if ( RT_FAILURE(rc)
6027 || !(CpuOld.pCurInstr->optype & OPTYPE_HARMLESS))
6028 {
6029 if (RT_SUCCESS(rc))
6030 cbDirty += CpuOld.opsize;
6031 else
6032 if (!cbDirty)
6033 cbDirty = 1;
6034 break;
6035 }
6036
6037#ifdef DEBUG
6038 char szBuf[256];
6039 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6040 szBuf, sizeof(szBuf), NULL);
6041 Log(("DIRTY: %s\n", szBuf));
6042#endif
6043 /* Mark as clean; if we fail we'll let it always fault. */
6044 pRec->fDirty = false;
6045
6046 /* Remove old lookup record. */
6047 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
6048 pPatchToGuestRec = NULL;
6049
6050 pCurPatchInstrGC += CpuOld.opsize;
6051 cbDirty += CpuOld.opsize;
6052
6053 /* Let's see if there's another dirty instruction right after. */
6054 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6055 if (!pRec || !pRec->fDirty)
6056 break; /* no more dirty instructions */
6057
6058 /* In case of complex instructions the next guest instruction could be quite far off. */
6059 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
6060 }
6061
6062 if ( RT_SUCCESS(rc)
6063 && (CpuOld.pCurInstr->optype & OPTYPE_HARMLESS)
6064 )
6065 {
6066 uint32_t cbLeft;
6067
6068 pCurPatchInstrHC = pPatchInstrHC;
6069 pCurPatchInstrGC = pEip;
6070 cbLeft = cbDirty;
6071
6072 while (cbLeft && RT_SUCCESS(rc))
6073 {
6074 bool fValidInstr;
6075
6076 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
6077
6078 fValidInstr = !!(CpuNew.pCurInstr->optype & OPTYPE_HARMLESS);
6079 if ( !fValidInstr
6080 && (CpuNew.pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
6081 )
6082 {
6083 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
6084
6085 if ( pTargetGC >= pOrgInstrGC
6086 && pTargetGC <= pOrgInstrGC + cbDirty
6087 )
6088 {
6089 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
6090 fValidInstr = true;
6091 }
6092 }
6093
6094 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
6095 if ( rc == VINF_SUCCESS
6096 && CpuNew.opsize <= cbLeft /* must still fit */
6097 && fValidInstr
6098 )
6099 {
6100#ifdef DEBUG
6101 char szBuf[256];
6102 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6103 szBuf, sizeof(szBuf), NULL);
6104 Log(("NEW: %s\n", szBuf));
6105#endif
6106
6107 /* Copy the new instruction. */
6108 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.opsize);
6109 AssertRC(rc);
6110
6111 /* Add a new lookup record for the duplicated instruction. */
6112 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6113 }
6114 else
6115 {
6116#ifdef DEBUG
6117 char szBuf[256];
6118 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6119 szBuf, sizeof(szBuf), NULL);
6120 Log(("NEW: %s (FAILED)\n", szBuf));
6121#endif
6122 /* Restore the old lookup record for the duplicated instruction. */
6123 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
6124
6125 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
6126 rc = VERR_PATCHING_REFUSED;
6127 break;
6128 }
6129 pCurInstrGC += CpuNew.opsize;
6130 pCurPatchInstrHC += CpuNew.opsize;
6131 pCurPatchInstrGC += CpuNew.opsize;
6132 cbLeft -= CpuNew.opsize;
6133
6134 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
6135 if (!cbLeft)
6136 {
6137 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
6138 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
6139 {
6140 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
6141 if (pRec)
6142 {
6143 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
6144 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
6145
6146 Assert(!pRec->fDirty);
6147
6148 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
6149 if (cbFiller >= SIZEOF_NEARJUMP32)
6150 {
6151 pPatchFillHC[0] = 0xE9;
6152 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
6153#ifdef DEBUG
6154 char szBuf[256];
6155 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
6156 szBuf, sizeof(szBuf), NULL);
6157 Log(("FILL: %s\n", szBuf));
6158#endif
6159 }
6160 else
6161 {
6162 for (unsigned i = 0; i < cbFiller; i++)
6163 {
6164 pPatchFillHC[i] = 0x90; /* NOP */
6165#ifdef DEBUG
6166 char szBuf[256];
6167 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC + i,
6168 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6169 Log(("FILL: %s\n", szBuf));
6170#endif
6171 }
6172 }
6173 }
6174 }
6175 }
6176 }
6177 }
6178 else
6179 rc = VERR_PATCHING_REFUSED;
6180
6181 if (RT_SUCCESS(rc))
6182 {
6183 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6184 }
6185 else
6186 {
6187 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6188 Assert(cbDirty);
6189
6190 /* Mark the whole instruction stream with breakpoints. */
6191 if (cbDirty)
6192 memset(pPatchInstrHC, 0xCC, cbDirty);
6193
6194 if ( pVM->patm.s.fOutOfMemory == false
6195 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6196 {
6197 rc = patmR3RefreshPatch(pVM, pPatch);
6198 if (RT_FAILURE(rc))
6199 {
6200 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6201 }
6202 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6203 rc = VERR_PATCHING_REFUSED;
6204 }
6205 }
6206 return rc;
6207}
6208
6209/**
6210 * Handle trap inside patch code
6211 *
6212 * @returns VBox status code.
6213 * @param pVM The VM to operate on.
6214 * @param pCtx CPU context
6215 * @param pEip GC pointer of trapping instruction
6216 * @param ppNewEip GC pointer to new instruction
6217 */
6218VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6219{
6220 PPATMPATCHREC pPatch = 0;
6221 void *pvPatchCoreOffset;
6222 RTRCUINTPTR offset;
6223 RTRCPTR pNewEip;
6224 int rc ;
6225 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6226 PVMCPU pVCpu = VMMGetCpu0(pVM);
6227
6228 Assert(pVM->cCpus == 1);
6229
6230 pNewEip = 0;
6231 *ppNewEip = 0;
6232
6233 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6234
6235 /* Find the patch record. */
6236 /* Note: there might not be a patch to guest translation record (global function) */
6237 offset = pEip - pVM->patm.s.pPatchMemGC;
6238 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6239 if (pvPatchCoreOffset)
6240 {
6241 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6242
6243 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6244
6245 if (pPatch->patch.uState == PATCH_DIRTY)
6246 {
6247 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6248 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6249 {
6250 /* Function duplication patches set fPIF to 1 on entry */
6251 pVM->patm.s.pGCStateHC->fPIF = 1;
6252 }
6253 }
6254 else
6255 if (pPatch->patch.uState == PATCH_DISABLED)
6256 {
6257 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6258 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6259 {
6260 /* Function duplication patches set fPIF to 1 on entry */
6261 pVM->patm.s.pGCStateHC->fPIF = 1;
6262 }
6263 }
6264 else
6265 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6266 {
6267 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6268
6269 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6270 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6271 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6272 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6273 }
6274
6275 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6276 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6277
6278 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6279 pPatch->patch.cTraps++;
6280 PATM_STAT_FAULT_INC(&pPatch->patch);
6281 }
6282 else
6283 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6284
6285 /* Check if we were interrupted in PATM generated instruction code. */
6286 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6287 {
6288 DISCPUSTATE Cpu;
6289 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6290 AssertRC(rc);
6291
6292 if ( rc == VINF_SUCCESS
6293 && ( Cpu.pCurInstr->opcode == OP_PUSHF
6294 || Cpu.pCurInstr->opcode == OP_PUSH
6295 || Cpu.pCurInstr->opcode == OP_CALL)
6296 )
6297 {
6298 uint64_t fFlags;
6299
6300 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6301
6302 if (Cpu.pCurInstr->opcode == OP_PUSH)
6303 {
6304 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6305 if ( rc == VINF_SUCCESS
6306 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6307 {
6308 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6309
6310 /* Reset the PATM stack. */
6311 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6312
6313 pVM->patm.s.pGCStateHC->fPIF = 1;
6314
6315 Log(("Faulting push -> go back to the original instruction\n"));
6316
6317 /* continue at the original instruction */
6318 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6319 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6320 return VINF_SUCCESS;
6321 }
6322 }
6323
6324 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6325 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6326 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6327 if (rc == VINF_SUCCESS)
6328 {
6329 /* The guest page *must* be present. */
6330 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6331 if ( rc == VINF_SUCCESS
6332 && (fFlags & X86_PTE_P))
6333 {
6334 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6335 return VINF_PATCH_CONTINUE;
6336 }
6337 }
6338 }
6339 else
6340 if (pPatch->patch.pPrivInstrGC == pNewEip)
6341 {
6342 /* Invalidated patch or first instruction overwritten.
6343 * We can ignore the fPIF state in this case.
6344 */
6345 /* Reset the PATM stack. */
6346 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6347
6348 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6349
6350 pVM->patm.s.pGCStateHC->fPIF = 1;
6351
6352 /* continue at the original instruction */
6353 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6354 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6355 return VINF_SUCCESS;
6356 }
6357
6358 char szBuf[256];
6359 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6360
6361 /* Very bad. We crashed in emitted code. Probably stack? */
6362 if (pPatch)
6363 {
6364 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6365 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6366 }
6367 else
6368 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6369 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6370 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6371 }
6372
6373 /* From here on, we must have a valid patch to guest translation. */
6374 if (pvPatchCoreOffset == 0)
6375 {
6376 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6377 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6378 return VERR_PATCH_NOT_FOUND;
6379 }
6380
6381 /* Take care of dirty/changed instructions. */
6382 if (pPatchToGuestRec->fDirty)
6383 {
6384 Assert(pPatchToGuestRec->Core.Key == offset);
6385 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6386
6387 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6388 if (RT_SUCCESS(rc))
6389 {
6390 /* Retry the current instruction. */
6391 pNewEip = pEip;
6392 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6393 }
6394 else
6395 {
6396 /* Reset the PATM stack. */
6397 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6398
6399 rc = VINF_SUCCESS; /* Continue at original instruction. */
6400 }
6401
6402 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6403 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6404 return rc;
6405 }
6406
6407#ifdef VBOX_STRICT
6408 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6409 {
6410 DISCPUSTATE cpu;
6411 bool disret;
6412 uint32_t opsize;
6413 PATMP2GLOOKUPREC cacheRec;
6414 RT_ZERO(cacheRec);
6415 cacheRec.pPatch = &pPatch->patch;
6416
6417 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6418 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6419 if (cacheRec.Lock.pvMap)
6420 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6421
6422 if (disret && cpu.pCurInstr->opcode == OP_RETN)
6423 {
6424 RTRCPTR retaddr;
6425 PCPUMCTX pCtx2;
6426
6427 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6428
6429 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6430 AssertRC(rc);
6431
6432 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6433 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6434 }
6435 }
6436#endif
6437
6438 /* Return original address, correct by subtracting the CS base address. */
6439 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6440
6441 /* Reset the PATM stack. */
6442 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6443
6444 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6445 {
6446 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6447 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6448#ifdef VBOX_STRICT
6449 DISCPUSTATE cpu;
6450 bool disret;
6451 uint32_t opsize;
6452 PATMP2GLOOKUPREC cacheRec;
6453 RT_ZERO(cacheRec);
6454 cacheRec.pPatch = &pPatch->patch;
6455
6456 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6457 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_ORGCODE);
6458 if (cacheRec.Lock.pvMap)
6459 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6460
6461 if (disret && (cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_INT3))
6462 {
6463 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6464 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6465 if (cacheRec.Lock.pvMap)
6466 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
6467
6468 Assert(cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_IRET);
6469 }
6470#endif
6471 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6472 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6473 }
6474
6475 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6476#ifdef LOG_ENABLED
6477 CPUMR3DisasmInstr(pVM, pVCpu, pCtx, pNewEip, "PATCHRET: ");
6478#endif
6479 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6480 {
6481 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6482 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6483 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6484 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6485 return VERR_PATCH_DISABLED;
6486 }
6487
6488#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6489 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6490 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6491 {
6492 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6493 //we are only wasting time, back out the patch
6494 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6495 pTrapRec->pNextPatchInstr = 0;
6496 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6497 return VERR_PATCH_DISABLED;
6498 }
6499#endif
6500
6501 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6502 return VINF_SUCCESS;
6503}
6504
6505
6506/**
6507 * Handle page-fault in monitored page
6508 *
6509 * @returns VBox status code.
6510 * @param pVM The VM to operate on.
6511 */
6512VMMR3DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6513{
6514 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6515
6516 addr &= PAGE_BASE_GC_MASK;
6517
6518 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6519 AssertRC(rc); NOREF(rc);
6520
6521 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6522 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6523 {
6524 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6525 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6526 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6527 if (rc == VWRN_PATCH_REMOVED)
6528 return VINF_SUCCESS;
6529
6530 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6531
6532 if (addr == pPatchRec->patch.pPrivInstrGC)
6533 addr++;
6534 }
6535
6536 for(;;)
6537 {
6538 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6539
6540 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6541 break;
6542
6543 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6544 {
6545 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6546 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6547 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6548 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6549 }
6550 addr = pPatchRec->patch.pPrivInstrGC + 1;
6551 }
6552
6553 pVM->patm.s.pvFaultMonitor = 0;
6554 return VINF_SUCCESS;
6555}
6556
6557
6558#ifdef VBOX_WITH_STATISTICS
6559
6560static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6561{
6562 if (pPatch->flags & PATMFL_SYSENTER)
6563 {
6564 return "SYSENT";
6565 }
6566 else
6567 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6568 {
6569 static char szTrap[16];
6570 uint32_t iGate;
6571
6572 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6573 if (iGate < 256)
6574 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6575 else
6576 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6577 return szTrap;
6578 }
6579 else
6580 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6581 return "DUPFUNC";
6582 else
6583 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6584 return "FUNCCALL";
6585 else
6586 if (pPatch->flags & PATMFL_TRAMPOLINE)
6587 return "TRAMP";
6588 else
6589 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6590}
6591
6592static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6593{
6594 NOREF(pVM);
6595 switch(pPatch->uState)
6596 {
6597 case PATCH_ENABLED:
6598 return "ENA";
6599 case PATCH_DISABLED:
6600 return "DIS";
6601 case PATCH_DIRTY:
6602 return "DIR";
6603 case PATCH_UNUSABLE:
6604 return "UNU";
6605 case PATCH_REFUSED:
6606 return "REF";
6607 case PATCH_DISABLE_PENDING:
6608 return "DIP";
6609 default:
6610 AssertFailed();
6611 return " ";
6612 }
6613}
6614
6615/**
6616 * Resets the sample.
6617 * @param pVM The VM handle.
6618 * @param pvSample The sample registered using STAMR3RegisterCallback.
6619 */
6620static void patmResetStat(PVM pVM, void *pvSample)
6621{
6622 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6623 Assert(pPatch);
6624
6625 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6626 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6627}
6628
6629/**
6630 * Prints the sample into the buffer.
6631 *
6632 * @param pVM The VM handle.
6633 * @param pvSample The sample registered using STAMR3RegisterCallback.
6634 * @param pszBuf The buffer to print into.
6635 * @param cchBuf The size of the buffer.
6636 */
6637static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6638{
6639 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6640 Assert(pPatch);
6641
6642 Assert(pPatch->uState != PATCH_REFUSED);
6643 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6644
6645 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6646 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6647 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6648}
6649
6650/**
6651 * Returns the GC address of the corresponding patch statistics counter
6652 *
6653 * @returns Stat address
6654 * @param pVM The VM to operate on.
6655 * @param pPatch Patch structure
6656 */
6657RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6658{
6659 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6660 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6661}
6662
6663#endif /* VBOX_WITH_STATISTICS */
6664
6665#ifdef VBOX_WITH_DEBUGGER
6666/**
6667 * The '.patmoff' command.
6668 *
6669 * @returns VBox status.
6670 * @param pCmd Pointer to the command descriptor (as registered).
6671 * @param pCmdHlp Pointer to command helper functions.
6672 * @param pVM Pointer to the current VM (if any).
6673 * @param paArgs Pointer to (readonly) array of arguments.
6674 * @param cArgs Number of arguments in the array.
6675 */
6676static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
6677{
6678 /*
6679 * Validate input.
6680 */
6681 NOREF(pCmd); NOREF(cArgs); NOREF(paArgs);
6682 if (!pVM)
6683 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6684
6685 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6686 PATMR3AllowPatching(pVM, false);
6687 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6688}
6689
6690/**
6691 * The '.patmon' command.
6692 *
6693 * @returns VBox status.
6694 * @param pCmd Pointer to the command descriptor (as registered).
6695 * @param pCmdHlp Pointer to command helper functions.
6696 * @param pVM Pointer to the current VM (if any).
6697 * @param paArgs Pointer to (readonly) array of arguments.
6698 * @param cArgs Number of arguments in the array.
6699 */
6700static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
6701{
6702 /*
6703 * Validate input.
6704 */
6705 NOREF(pCmd); NOREF(cArgs); NOREF(paArgs);
6706 if (!pVM)
6707 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6708
6709 PATMR3AllowPatching(pVM, true);
6710 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6711 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6712}
6713#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette