VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/PATM.cpp@ 30500

Last change on this file since 30500 was 30495, checked in by vboxsync, 15 years ago

Drop PGMPhysGCPtr2R3Ptr usage

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 245.6 KB
Line 
1/* $Id: PATM.cpp 30495 2010-06-29 12:44:34Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * NOTE: Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/patm.h>
25#include <VBox/stam.h>
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/cpumdis.h>
29#include <VBox/iom.h>
30#include <VBox/mm.h>
31#include <VBox/ssm.h>
32#include <VBox/trpm.h>
33#include <VBox/cfgm.h>
34#include <VBox/param.h>
35#include <VBox/selm.h>
36#include <iprt/avl.h>
37#include "PATMInternal.h"
38#include "PATMPatch.h"
39#include <VBox/vm.h>
40#include <VBox/csam.h>
41#include <VBox/dbg.h>
42#include <VBox/err.h>
43#include <VBox/log.h>
44#include <iprt/assert.h>
45#include <iprt/asm.h>
46#include <VBox/dis.h>
47#include <VBox/disopcode.h>
48#include <include/internal/pgm.h>
49
50#include <iprt/string.h>
51#include "PATMA.h"
52
53//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
54//#define PATM_DISABLE_ALL
55
56/*******************************************************************************
57* Internal Functions *
58*******************************************************************************/
59
60static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
61static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
62static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
63
64#ifdef LOG_ENABLED // keep gcc quiet
65static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
66#endif
67#ifdef VBOX_WITH_STATISTICS
68static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
69static void patmResetStat(PVM pVM, void *pvSample);
70static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
71#endif
72
73#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
74#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
75
76static int patmReinit(PVM pVM);
77static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
78
79#ifdef VBOX_WITH_DEBUGGER
80static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
81static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
82static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
83
84/** Command descriptors. */
85static const DBGCCMD g_aCmds[] =
86{
87 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, pResultDesc, fFlags, pfnHandler pszSyntax, ....pszDescription */
88 { "patmon", 0, 0, NULL, 0, NULL, 0, patmr3CmdOn, "", "Enable patching." },
89 { "patmoff", 0, 0, NULL, 0, NULL, 0, patmr3CmdOff, "", "Disable patching." },
90};
91#endif
92
93/* Don't want to break saved states, so put it here as a global variable. */
94static unsigned int cIDTHandlersDisabled = 0;
95
96/**
97 * Initializes the PATM.
98 *
99 * @returns VBox status code.
100 * @param pVM The VM to operate on.
101 */
102VMMR3DECL(int) PATMR3Init(PVM pVM)
103{
104 int rc;
105
106 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
107
108 /* These values can't change as they are hardcoded in patch code (old saved states!) */
109 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
110 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
111 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
112 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
113
114 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
115 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
116
117 /* Allocate patch memory and GC patch state memory. */
118 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
119 /* Add another page in case the generated code is much larger than expected. */
120 /** @todo bad safety precaution */
121 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
122 if (RT_FAILURE(rc))
123 {
124 Log(("MMHyperAlloc failed with %Rrc\n", rc));
125 return rc;
126 }
127 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
128
129 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
130 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
131 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
132
133 /*
134 * Hypervisor memory for GC status data (read/write)
135 *
136 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
137 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
138 *
139 */
140 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /** @note hardcoded dependencies on this exist. */
141 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
142 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
143
144 /* Hypervisor memory for patch statistics */
145 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
146 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
147
148 /* Memory for patch lookup trees. */
149 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
150 AssertRCReturn(rc, rc);
151 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
152
153#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
154 /* Check CFGM option. */
155 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
156 if (RT_FAILURE(rc))
157# ifdef PATM_DISABLE_ALL
158 pVM->fPATMEnabled = false;
159# else
160 pVM->fPATMEnabled = true;
161# endif
162#endif
163
164 rc = patmReinit(pVM);
165 AssertRC(rc);
166 if (RT_FAILURE(rc))
167 return rc;
168
169 /*
170 * Register save and load state notificators.
171 */
172 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
173 NULL, NULL, NULL,
174 NULL, patmR3Save, NULL,
175 NULL, patmR3Load, NULL);
176 AssertRCReturn(rc, rc);
177
178#ifdef VBOX_WITH_DEBUGGER
179 /*
180 * Debugger commands.
181 */
182 static bool s_fRegisteredCmds = false;
183 if (!s_fRegisteredCmds)
184 {
185 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
186 if (RT_SUCCESS(rc2))
187 s_fRegisteredCmds = true;
188 }
189#endif
190
191#ifdef VBOX_WITH_STATISTICS
192 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
193 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
194 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
195 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
196 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
197 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
198 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
199 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
200
201 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
202 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
203
204 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
205 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
206 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
207
208 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
209 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
210 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
211 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
212 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
213
214 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
215 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
216
217 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
218 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
219
220 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
221 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
222 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
223
224 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
225 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
226 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
227
228 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
229 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
230
231 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
232 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
233 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
234 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
235
236 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
237 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
238
239 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
240 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
241
242 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
243 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
244 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
245
246 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
247 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
248 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
249 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
250
251 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
252 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
253 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
254 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
255 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
256
257 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
258#endif /* VBOX_WITH_STATISTICS */
259
260 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
261 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
262 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
263 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
264 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
265 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
266 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
267 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
268
269 return rc;
270}
271
272/**
273 * Finalizes HMA page attributes.
274 *
275 * @returns VBox status code.
276 * @param pVM The VM handle.
277 */
278VMMR3DECL(int) PATMR3InitFinalize(PVM pVM)
279{
280 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
281 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
282 if (RT_FAILURE(rc))
283 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
284
285 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
286 if (RT_FAILURE(rc))
287 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
288
289 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
290 if (RT_FAILURE(rc))
291 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
292
293 return rc;
294}
295
296/**
297 * (Re)initializes PATM
298 *
299 * @param pVM The VM.
300 */
301static int patmReinit(PVM pVM)
302{
303 int rc;
304
305 /*
306 * Assert alignment and sizes.
307 */
308 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
309 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
310
311 /*
312 * Setup any fixed pointers and offsets.
313 */
314 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
315
316#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
317#ifndef PATM_DISABLE_ALL
318 pVM->fPATMEnabled = true;
319#endif
320#endif
321
322 Assert(pVM->patm.s.pGCStateHC);
323 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
324 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
325
326 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
327 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
328
329 Assert(pVM->patm.s.pGCStackHC);
330 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
331 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
332 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
333 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
334
335 Assert(pVM->patm.s.pStatsHC);
336 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
337 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
338
339 Assert(pVM->patm.s.pPatchMemHC);
340 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
341 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
342 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
343
344 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
345 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
346
347 Assert(pVM->patm.s.PatchLookupTreeHC);
348 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
349
350 /*
351 * (Re)Initialize PATM structure
352 */
353 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
354 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
355 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
356 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
357 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
358 pVM->patm.s.pvFaultMonitor = 0;
359 pVM->patm.s.deltaReloc = 0;
360
361 /* Lowest and highest patched instruction */
362 pVM->patm.s.pPatchedInstrGCLowest = ~0;
363 pVM->patm.s.pPatchedInstrGCHighest = 0;
364
365 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
366 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
367 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
368
369 pVM->patm.s.pfnSysEnterPatchGC = 0;
370 pVM->patm.s.pfnSysEnterGC = 0;
371
372 pVM->patm.s.fOutOfMemory = false;
373
374 pVM->patm.s.pfnHelperCallGC = 0;
375
376 /* Generate all global functions to be used by future patches. */
377 /* We generate a fake patch in order to use the existing code for relocation. */
378 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
379 if (RT_FAILURE(rc))
380 {
381 Log(("Out of memory!!!!\n"));
382 return VERR_NO_MEMORY;
383 }
384 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
385 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
386 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
387
388 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
389 AssertRC(rc);
390
391 /* Update free pointer in patch memory. */
392 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
393 /* Round to next 8 byte boundary. */
394 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
395 return rc;
396}
397
398
399/**
400 * Applies relocations to data and code managed by this
401 * component. This function will be called at init and
402 * whenever the VMM need to relocate it self inside the GC.
403 *
404 * The PATM will update the addresses used by the switcher.
405 *
406 * @param pVM The VM.
407 */
408VMMR3DECL(void) PATMR3Relocate(PVM pVM)
409{
410 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
411 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
412
413 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
414 if (delta)
415 {
416 PCPUMCTX pCtx;
417
418 /* Update CPUMCTX guest context pointer. */
419 pVM->patm.s.pCPUMCtxGC += delta;
420
421 pVM->patm.s.deltaReloc = delta;
422
423 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
424
425 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
426
427 /* If we are running patch code right now, then also adjust EIP. */
428 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
429 pCtx->eip += delta;
430
431 pVM->patm.s.pGCStateGC = GCPtrNew;
432 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
433
434 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
435
436 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
437
438 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
439
440 if (pVM->patm.s.pfnSysEnterPatchGC)
441 pVM->patm.s.pfnSysEnterPatchGC += delta;
442
443 /* Deal with the global patch functions. */
444 pVM->patm.s.pfnHelperCallGC += delta;
445 pVM->patm.s.pfnHelperRetGC += delta;
446 pVM->patm.s.pfnHelperIretGC += delta;
447 pVM->patm.s.pfnHelperJumpGC += delta;
448
449 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
450 }
451}
452
453
454/**
455 * Terminates the PATM.
456 *
457 * Termination means cleaning up and freeing all resources,
458 * the VM it self is at this point powered off or suspended.
459 *
460 * @returns VBox status code.
461 * @param pVM The VM to operate on.
462 */
463VMMR3DECL(int) PATMR3Term(PVM pVM)
464{
465 /* Memory was all allocated from the two MM heaps and requires no freeing. */
466 return VINF_SUCCESS;
467}
468
469
470/**
471 * PATM reset callback.
472 *
473 * @returns VBox status code.
474 * @param pVM The VM which is reset.
475 */
476VMMR3DECL(int) PATMR3Reset(PVM pVM)
477{
478 Log(("PATMR3Reset\n"));
479
480 /* Free all patches. */
481 while (true)
482 {
483 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
484 if (pPatchRec)
485 {
486 PATMRemovePatch(pVM, pPatchRec, true);
487 }
488 else
489 break;
490 }
491 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
492 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
493 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
494 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
495
496 int rc = patmReinit(pVM);
497 if (RT_SUCCESS(rc))
498 rc = PATMR3InitFinalize(pVM); /* paranoia */
499
500 return rc;
501}
502
503/**
504 * Read callback for disassembly function; supports reading bytes that cross a page boundary
505 *
506 * @returns VBox status code.
507 * @param pSrc GC source pointer
508 * @param pDest HC destination pointer
509 * @param size Number of bytes to read
510 * @param pvUserdata Callback specific user data (pCpu)
511 *
512 */
513int patmReadBytes(RTUINTPTR pSrc, uint8_t *pDest, unsigned size, void *pvUserdata)
514{
515 DISCPUSTATE *pCpu = (DISCPUSTATE *)pvUserdata;
516 PATMDISASM *pDisInfo = (PATMDISASM *)pCpu->apvUserData[0];
517 int orgsize = size;
518
519 Assert(size);
520 if (size == 0)
521 return VERR_INVALID_PARAMETER;
522
523 /*
524 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
525 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
526 */
527 /** @todo could change in the future! */
528 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
529 {
530 for (int i=0;i<orgsize;i++)
531 {
532 int rc = PATMR3QueryOpcode(pDisInfo->pVM, (RTRCPTR)pSrc, pDest);
533 if (RT_SUCCESS(rc))
534 {
535 pSrc++;
536 pDest++;
537 size--;
538 }
539 else break;
540 }
541 if (size == 0)
542 return VINF_SUCCESS;
543#ifdef VBOX_STRICT
544 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
545 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
546 {
547 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc, NULL) == false);
548 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc+size-1, NULL) == false);
549 }
550#endif
551 }
552
553
554 if (PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(pSrc + size - 1) && !PATMIsPatchGCAddr(pDisInfo->pVM, pSrc))
555 {
556 return PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], pDest, pSrc, size);
557 }
558 else
559 {
560 uint8_t *pInstrHC = pDisInfo->pInstrHC;
561
562 Assert(pInstrHC);
563
564 /* pInstrHC is the base address; adjust according to the GC pointer. */
565 pInstrHC = pInstrHC + (pSrc - pDisInfo->pInstrGC);
566
567 memcpy(pDest, (void *)pInstrHC, size);
568 }
569
570 return VINF_SUCCESS;
571}
572
573/**
574 * Callback function for RTAvloU32DoWithAll
575 *
576 * Updates all fixups in the patches
577 *
578 * @returns VBox status code.
579 * @param pNode Current node
580 * @param pParam The VM to operate on.
581 */
582static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
583{
584 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
585 PVM pVM = (PVM)pParam;
586 RTRCINTPTR delta;
587#ifdef LOG_ENABLED
588 DISCPUSTATE cpu;
589 char szOutput[256];
590 uint32_t opsize;
591 bool disret;
592#endif
593 int rc;
594
595 /* Nothing to do if the patch is not active. */
596 if (pPatch->patch.uState == PATCH_REFUSED)
597 return 0;
598
599#ifdef LOG_ENABLED
600 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
601 {
602 PGMPAGEMAPLOCK PageLock;
603
604 /** @note pPrivInstrHC is probably not valid anymore */
605 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, pPatch->patch.pPrivInstrGC, (const void **)&pPatch->patch.pPrivInstrHC, &PageLock);
606 if (rc == VINF_SUCCESS)
607 {
608 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
609 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, pPatch->patch.pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
610 Log(("Org patch jump: %s", szOutput));
611
612 PGMPhysReleasePageMappingLock(pVM, &PageLock);
613 }
614 }
615#endif
616
617 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
618 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
619
620 /*
621 * Apply fixups
622 */
623 PRELOCREC pRec = 0;
624 AVLPVKEY key = 0;
625
626 while (true)
627 {
628 /* Get the record that's closest from above */
629 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
630 if (pRec == 0)
631 break;
632
633 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
634
635 switch (pRec->uType)
636 {
637 case FIXUP_ABSOLUTE:
638 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
639 if (!pRec->pSource || PATMIsPatchGCAddr(pVM, pRec->pSource))
640 {
641 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
642 }
643 else
644 {
645 uint8_t curInstr[15];
646 uint8_t oldInstr[15];
647 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
648
649 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
650
651 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
652 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
653
654 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
655 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
656
657 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
658
659 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
660 {
661 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
662
663 Log(("PATM: Patch page not present -> check later!\n"));
664 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
665 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
666 }
667 else
668 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
669 {
670 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
671 /*
672 * Disable patch; this is not a good solution
673 */
674 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
675 pPatch->patch.uState = PATCH_DISABLED;
676 }
677 else
678 if (RT_SUCCESS(rc))
679 {
680 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
681 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
682 AssertRC(rc);
683 }
684 }
685 break;
686
687 case FIXUP_REL_JMPTOPATCH:
688 {
689 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
690
691 if ( pPatch->patch.uState == PATCH_ENABLED
692 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
693 {
694 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
695 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
696 RTRCPTR pJumpOffGC;
697 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
698 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
699
700 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
701
702 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
703#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
704 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
705 {
706 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
707
708 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
709 oldJump[0] = pPatch->patch.aPrivInstr[0];
710 oldJump[1] = pPatch->patch.aPrivInstr[1];
711 *(RTRCUINTPTR *)&oldJump[2] = displOld;
712 }
713 else
714#endif
715 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
716 {
717 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
718 oldJump[0] = 0xE9;
719 *(RTRCUINTPTR *)&oldJump[1] = displOld;
720 }
721 else
722 {
723 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
724 continue; //this should never happen!!
725 }
726 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
727
728 /*
729 * Read old patch jump and compare it to the one we previously installed
730 */
731 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
732 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
733
734 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
735 {
736 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
737
738 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
739 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
740 }
741 else
742 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
743 {
744 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
745 /*
746 * Disable patch; this is not a good solution
747 */
748 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
749 pPatch->patch.uState = PATCH_DISABLED;
750 }
751 else
752 if (RT_SUCCESS(rc))
753 {
754 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
755 AssertRC(rc);
756 }
757 else
758 {
759 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
760 }
761 }
762 else
763 {
764 Log(("Skip the guest jump to patch code for this disabled patch %08X - %08X\n", pPatch->patch.pPrivInstrHC, pRec->pRelocPos));
765 }
766
767 pRec->pDest = pTarget;
768 break;
769 }
770
771 case FIXUP_REL_JMPTOGUEST:
772 {
773 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
774 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
775
776 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
777 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
778 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
779 pRec->pSource = pSource;
780 break;
781 }
782
783 default:
784 AssertMsg(0, ("Invalid fixup type!!\n"));
785 return VERR_INVALID_PARAMETER;
786 }
787 }
788
789#ifdef LOG_ENABLED
790 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
791 {
792 PGMPAGEMAPLOCK PageLock;
793
794 /** @note pPrivInstrHC is probably not valid anymore */
795 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, pPatch->patch.pPrivInstrGC, (const void **)&pPatch->patch.pPrivInstrHC, &PageLock);
796 if (rc == VINF_SUCCESS)
797 {
798 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
799 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, pPatch->patch.pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
800 Log(("Rel patch jump: %s", szOutput));
801
802 PGMPhysReleasePageMappingLock(pVM, &PageLock);
803 }
804 }
805#endif
806 return 0;
807}
808
809/**
810 * \#PF Handler callback for virtual access handler ranges.
811 *
812 * Important to realize that a physical page in a range can have aliases, and
813 * for ALL and WRITE handlers these will also trigger.
814 *
815 * @returns VINF_SUCCESS if the handler have carried out the operation.
816 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
817 * @param pVM VM Handle.
818 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
819 * @param pvPtr The HC mapping of that address.
820 * @param pvBuf What the guest is reading/writing.
821 * @param cbBuf How much it's reading/writing.
822 * @param enmAccessType The access type.
823 * @param pvUser User argument.
824 */
825DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
826{
827 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
828 /** @todo could be the wrong virtual address (alias) */
829 pVM->patm.s.pvFaultMonitor = GCPtr;
830 PATMR3HandleMonitoredPage(pVM);
831 return VINF_PGM_HANDLER_DO_DEFAULT;
832}
833
834
835#ifdef VBOX_WITH_DEBUGGER
836/**
837 * Callback function for RTAvloU32DoWithAll
838 *
839 * Enables the patch that's being enumerated
840 *
841 * @returns 0 (continue enumeration).
842 * @param pNode Current node
843 * @param pVM The VM to operate on.
844 */
845static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
846{
847 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
848
849 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
850 return 0;
851}
852#endif /* VBOX_WITH_DEBUGGER */
853
854
855#ifdef VBOX_WITH_DEBUGGER
856/**
857 * Callback function for RTAvloU32DoWithAll
858 *
859 * Disables the patch that's being enumerated
860 *
861 * @returns 0 (continue enumeration).
862 * @param pNode Current node
863 * @param pVM The VM to operate on.
864 */
865static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
866{
867 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
868
869 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
870 return 0;
871}
872#endif
873
874/**
875 * Returns the host context pointer and size of the patch memory block
876 *
877 * @returns VBox status code.
878 * @param pVM The VM to operate on.
879 * @param pcb Size of the patch memory block
880 */
881VMMR3DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
882{
883 if (pcb)
884 {
885 *pcb = pVM->patm.s.cbPatchMem;
886 }
887 return pVM->patm.s.pPatchMemHC;
888}
889
890
891/**
892 * Returns the guest context pointer and size of the patch memory block
893 *
894 * @returns VBox status code.
895 * @param pVM The VM to operate on.
896 * @param pcb Size of the patch memory block
897 */
898VMMR3DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
899{
900 if (pcb)
901 {
902 *pcb = pVM->patm.s.cbPatchMem;
903 }
904 return pVM->patm.s.pPatchMemGC;
905}
906
907
908/**
909 * Returns the host context pointer of the GC context structure
910 *
911 * @returns VBox status code.
912 * @param pVM The VM to operate on.
913 */
914VMMR3DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
915{
916 return pVM->patm.s.pGCStateHC;
917}
918
919
920/**
921 * Checks whether the HC address is part of our patch region
922 *
923 * @returns VBox status code.
924 * @param pVM The VM to operate on.
925 * @param pAddrGC Guest context address
926 */
927VMMR3DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, R3PTRTYPE(uint8_t *) pAddrHC)
928{
929 return (pAddrHC >= pVM->patm.s.pPatchMemHC && pAddrHC < pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) ? true : false;
930}
931
932
933/**
934 * Allows or disallow patching of privileged instructions executed by the guest OS
935 *
936 * @returns VBox status code.
937 * @param pVM The VM to operate on.
938 * @param fAllowPatching Allow/disallow patching
939 */
940VMMR3DECL(int) PATMR3AllowPatching(PVM pVM, uint32_t fAllowPatching)
941{
942 pVM->fPATMEnabled = (fAllowPatching) ? true : false;
943 return VINF_SUCCESS;
944}
945
946/**
947 * Convert a GC patch block pointer to a HC patch pointer
948 *
949 * @returns HC pointer or NULL if it's not a GC patch pointer
950 * @param pVM The VM to operate on.
951 * @param pAddrGC GC pointer
952 */
953VMMR3DECL(R3PTRTYPE(void *)) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
954{
955 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
956 {
957 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
958 }
959 return NULL;
960}
961
962/**
963 * Query PATM state (enabled/disabled)
964 *
965 * @returns 0 - disabled, 1 - enabled
966 * @param pVM The VM to operate on.
967 */
968VMMR3DECL(int) PATMR3IsEnabled(PVM pVM)
969{
970 return pVM->fPATMEnabled;
971}
972
973
974/**
975 * Convert guest context address to host context pointer
976 *
977 * @returns VBox status code.
978 * @param pVM The VM to operate on.
979 * @param pPatch Patch block structure pointer
980 * @param pGCPtr Guest context pointer
981 *
982 * @returns Host context pointer or NULL in case of an error
983 *
984 */
985R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pGCPtr)
986{
987 int rc;
988 R3PTRTYPE(uint8_t *) pHCPtr;
989 uint32_t offset;
990
991 if (PATMIsPatchGCAddr(pVM, pGCPtr))
992 {
993 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
994 }
995
996 offset = pGCPtr & PAGE_OFFSET_MASK;
997 if (pPatch->cacheRec.pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
998 {
999 return pPatch->cacheRec.pPatchLocStartHC + offset;
1000 }
1001
1002 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pGCPtr, (void **)&pHCPtr);
1003 if (rc != VINF_SUCCESS)
1004 {
1005 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1006 return NULL;
1007 }
1008////invalid? Assert(sizeof(R3PTRTYPE(uint8_t*)) == sizeof(uint32_t));
1009
1010 pPatch->cacheRec.pPatchLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1011 pPatch->cacheRec.pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1012 return pHCPtr;
1013}
1014
1015
1016/* Calculates and fills in all branch targets
1017 *
1018 * @returns VBox status code.
1019 * @param pVM The VM to operate on.
1020 * @param pPatch Current patch block pointer
1021 *
1022 */
1023static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1024{
1025 int32_t displ;
1026
1027 PJUMPREC pRec = 0;
1028 int nrJumpRecs = 0;
1029
1030 /*
1031 * Set all branch targets inside the patch block.
1032 * We remove all jump records as they are no longer needed afterwards.
1033 */
1034 while (true)
1035 {
1036 RCPTRTYPE(uint8_t *) pInstrGC;
1037 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1038
1039 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1040 if (pRec == 0)
1041 break;
1042
1043 nrJumpRecs++;
1044
1045 /* HC in patch block to GC in patch block. */
1046 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1047
1048 if (pRec->opcode == OP_CALL)
1049 {
1050 /* Special case: call function replacement patch from this patch block.
1051 */
1052 PPATMPATCHREC pFunctionRec = PATMQueryFunctionPatch(pVM, pRec->pTargetGC);
1053 if (!pFunctionRec)
1054 {
1055 int rc;
1056
1057 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1058 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1059 else
1060 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1061
1062 if (RT_FAILURE(rc))
1063 {
1064 uint8_t *pPatchHC;
1065 RTRCPTR pPatchGC;
1066 RTRCPTR pOrgInstrGC;
1067
1068 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1069 Assert(pOrgInstrGC);
1070
1071 /* Failure for some reason -> mark exit point with int 3. */
1072 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1073
1074 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1075 Assert(pPatchGC);
1076
1077 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1078
1079 /* Set a breakpoint at the very beginning of the recompiled instruction */
1080 *pPatchHC = 0xCC;
1081
1082 continue;
1083 }
1084 }
1085 else
1086 {
1087 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1088 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1089 }
1090
1091 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1092 }
1093 else
1094 {
1095 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1096 }
1097
1098 if (pBranchTargetGC == 0)
1099 {
1100 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1101 return VERR_PATCHING_REFUSED;
1102 }
1103 /* Our jumps *always* have a dword displacement (to make things easier). */
1104 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1105 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1106 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1107 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1108 }
1109 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1110 Assert(pPatch->JumpTree == 0);
1111 return VINF_SUCCESS;
1112}
1113
1114/* Add an illegal instruction record
1115 *
1116 * @param pVM The VM to operate on.
1117 * @param pPatch Patch structure ptr
1118 * @param pInstrGC Guest context pointer to privileged instruction
1119 *
1120 */
1121static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1122{
1123 PAVLPVNODECORE pRec;
1124
1125 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1126 Assert(pRec);
1127 pRec->Key = (AVLPVKEY)pInstrGC;
1128
1129 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1130 Assert(ret); NOREF(ret);
1131 pPatch->pTempInfo->nrIllegalInstr++;
1132}
1133
1134static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1135{
1136 PAVLPVNODECORE pRec;
1137
1138 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)pInstrGC);
1139 if (pRec)
1140 return true;
1141 return false;
1142}
1143
1144/**
1145 * Add a patch to guest lookup record
1146 *
1147 * @param pVM The VM to operate on.
1148 * @param pPatch Patch structure ptr
1149 * @param pPatchInstrHC Guest context pointer to patch block
1150 * @param pInstrGC Guest context pointer to privileged instruction
1151 * @param enmType Lookup type
1152 * @param fDirty Dirty flag
1153 *
1154 */
1155 /** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
1156void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1157{
1158 bool ret;
1159 PRECPATCHTOGUEST pPatchToGuestRec;
1160 PRECGUESTTOPATCH pGuestToPatchRec;
1161 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1162
1163 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1164 {
1165 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1166 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1167 return; /* already there */
1168
1169 Assert(!pPatchToGuestRec);
1170 }
1171#ifdef VBOX_STRICT
1172 else
1173 {
1174 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1175 Assert(!pPatchToGuestRec);
1176 }
1177#endif
1178
1179 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1180 Assert(pPatchToGuestRec);
1181 pPatchToGuestRec->Core.Key = PatchOffset;
1182 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1183 pPatchToGuestRec->enmType = enmType;
1184 pPatchToGuestRec->fDirty = fDirty;
1185
1186 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1187 Assert(ret);
1188
1189 /* GC to patch address */
1190 if (enmType == PATM_LOOKUP_BOTHDIR)
1191 {
1192 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1193 if (!pGuestToPatchRec)
1194 {
1195 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1196 pGuestToPatchRec->Core.Key = pInstrGC;
1197 pGuestToPatchRec->PatchOffset = PatchOffset;
1198
1199 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1200 Assert(ret);
1201 }
1202 }
1203
1204 pPatch->nrPatch2GuestRecs++;
1205}
1206
1207
1208/**
1209 * Removes a patch to guest lookup record
1210 *
1211 * @param pVM The VM to operate on.
1212 * @param pPatch Patch structure ptr
1213 * @param pPatchInstrGC Guest context pointer to patch block
1214 */
1215void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1216{
1217 PAVLU32NODECORE pNode;
1218 PAVLU32NODECORE pNode2;
1219 PRECPATCHTOGUEST pPatchToGuestRec;
1220 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1221
1222 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1223 Assert(pPatchToGuestRec);
1224 if (pPatchToGuestRec)
1225 {
1226 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1227 {
1228 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1229
1230 Assert(pGuestToPatchRec->Core.Key);
1231 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1232 Assert(pNode2);
1233 }
1234 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1235 Assert(pNode);
1236
1237 MMR3HeapFree(pPatchToGuestRec);
1238 pPatch->nrPatch2GuestRecs--;
1239 }
1240}
1241
1242
1243/**
1244 * RTAvlPVDestroy callback.
1245 */
1246static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1247{
1248 MMR3HeapFree(pNode);
1249 return 0;
1250}
1251
1252/**
1253 * Empty the specified tree (PV tree, MMR3 heap)
1254 *
1255 * @param pVM The VM to operate on.
1256 * @param ppTree Tree to empty
1257 */
1258void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1259{
1260 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1261}
1262
1263
1264/**
1265 * RTAvlU32Destroy callback.
1266 */
1267static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1268{
1269 MMR3HeapFree(pNode);
1270 return 0;
1271}
1272
1273/**
1274 * Empty the specified tree (U32 tree, MMR3 heap)
1275 *
1276 * @param pVM The VM to operate on.
1277 * @param ppTree Tree to empty
1278 */
1279void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1280{
1281 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1282}
1283
1284
1285/**
1286 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1287 *
1288 * @returns VBox status code.
1289 * @param pVM The VM to operate on.
1290 * @param pCpu CPU disassembly state
1291 * @param pInstrGC Guest context pointer to privileged instruction
1292 * @param pCurInstrGC Guest context pointer to the current instruction
1293 * @param pUserData User pointer (callback specific)
1294 *
1295 */
1296static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1297{
1298 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1299 bool fIllegalInstr = false;
1300
1301 //Preliminary heuristics:
1302 //- no call instructions without a fixed displacement between cli and sti/popf
1303 //- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1304 //- no nested pushf/cli
1305 //- sti/popf should be the (eventual) target of all branches
1306 //- no near or far returns; no int xx, no into
1307 //
1308 // Note: Later on we can impose less stricter guidelines if the need arises
1309
1310 /* Bail out if the patch gets too big. */
1311 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1312 {
1313 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1314 fIllegalInstr = true;
1315 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1316 }
1317 else
1318 {
1319 /* No unconditinal jumps or calls without fixed displacements. */
1320 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1321 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1322 )
1323 {
1324 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1325 if ( pCpu->param1.size == 6 /* far call/jmp */
1326 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1327 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1328 )
1329 {
1330 fIllegalInstr = true;
1331 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1332 }
1333 }
1334
1335 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1336 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->opcode == OP_JMP)
1337 {
1338 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC + pCpu->opsize < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1339 {
1340 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1341 /* We turn this one into a int 3 callable patch. */
1342 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1343 }
1344 }
1345 else
1346 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1347 if (pPatch->opcode == OP_PUSHF)
1348 {
1349 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->opcode == OP_PUSHF)
1350 {
1351 fIllegalInstr = true;
1352 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1353 }
1354 }
1355
1356 // no far returns
1357 if (pCpu->pCurInstr->opcode == OP_RETF)
1358 {
1359 pPatch->pTempInfo->nrRetInstr++;
1360 fIllegalInstr = true;
1361 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1362 }
1363 else
1364 // no int xx or into either
1365 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1366 {
1367 fIllegalInstr = true;
1368 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1369 }
1370 }
1371
1372 pPatch->cbPatchBlockSize += pCpu->opsize;
1373
1374 /* Illegal instruction -> end of analysis phase for this code block */
1375 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1376 return VINF_SUCCESS;
1377
1378 /* Check for exit points. */
1379 switch (pCpu->pCurInstr->opcode)
1380 {
1381 case OP_SYSEXIT:
1382 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1383
1384 case OP_SYSENTER:
1385 case OP_ILLUD2:
1386 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1387 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1388 return VINF_SUCCESS;
1389
1390 case OP_STI:
1391 case OP_POPF:
1392 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1393 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1394 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1395 {
1396 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1397 return VERR_PATCHING_REFUSED;
1398 }
1399 if (pPatch->opcode == OP_PUSHF)
1400 {
1401 if (pCpu->pCurInstr->opcode == OP_POPF)
1402 {
1403 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1404 return VINF_SUCCESS;
1405
1406 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1407 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1408 pPatch->flags |= PATMFL_CHECK_SIZE;
1409 }
1410 break; //sti doesn't mark the end of a pushf block; only popf does
1411 }
1412 //else no break
1413 case OP_RETN: /* exit point for function replacement */
1414 return VINF_SUCCESS;
1415
1416 case OP_IRET:
1417 return VINF_SUCCESS; /* exitpoint */
1418
1419 case OP_CPUID:
1420 case OP_CALL:
1421 case OP_JMP:
1422 break;
1423
1424 default:
1425 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1426 {
1427 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1428 return VINF_SUCCESS; /* exit point */
1429 }
1430 break;
1431 }
1432
1433 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1434 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW))
1435 {
1436 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1437 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->opsize));
1438 return VINF_SUCCESS;
1439 }
1440
1441 return VWRN_CONTINUE_ANALYSIS;
1442}
1443
1444/**
1445 * Analyses the instructions inside a function for compliance
1446 *
1447 * @returns VBox status code.
1448 * @param pVM The VM to operate on.
1449 * @param pCpu CPU disassembly state
1450 * @param pInstrGC Guest context pointer to privileged instruction
1451 * @param pCurInstrGC Guest context pointer to the current instruction
1452 * @param pUserData User pointer (callback specific)
1453 *
1454 */
1455static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1456{
1457 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1458 bool fIllegalInstr = false;
1459
1460 //Preliminary heuristics:
1461 //- no call instructions
1462 //- ret ends a block
1463
1464 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1465
1466 // bail out if the patch gets too big
1467 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1468 {
1469 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1470 fIllegalInstr = true;
1471 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1472 }
1473 else
1474 {
1475 // no unconditinal jumps or calls without fixed displacements
1476 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1477 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1478 )
1479 {
1480 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1481 if ( pCpu->param1.size == 6 /* far call/jmp */
1482 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1483 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1484 )
1485 {
1486 fIllegalInstr = true;
1487 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1488 }
1489 }
1490 else /* no far returns */
1491 if (pCpu->pCurInstr->opcode == OP_RETF)
1492 {
1493 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1494 fIllegalInstr = true;
1495 }
1496 else /* no int xx or into either */
1497 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1498 {
1499 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1500 fIllegalInstr = true;
1501 }
1502
1503 #if 0
1504 ///@todo we can handle certain in/out and privileged instructions in the guest context
1505 if (pCpu->pCurInstr->optype & OPTYPE_PRIVILEGED && pCpu->pCurInstr->opcode != OP_STI)
1506 {
1507 Log(("Illegal instructions for function patch!!\n"));
1508 return VERR_PATCHING_REFUSED;
1509 }
1510 #endif
1511 }
1512
1513 pPatch->cbPatchBlockSize += pCpu->opsize;
1514
1515 /* Illegal instruction -> end of analysis phase for this code block */
1516 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1517 {
1518 return VINF_SUCCESS;
1519 }
1520
1521 // Check for exit points
1522 switch (pCpu->pCurInstr->opcode)
1523 {
1524 case OP_ILLUD2:
1525 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1526 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1527 return VINF_SUCCESS;
1528
1529 case OP_IRET:
1530 case OP_SYSEXIT: /* will fault or emulated in GC */
1531 case OP_RETN:
1532 return VINF_SUCCESS;
1533
1534 case OP_POPF:
1535 case OP_STI:
1536 return VWRN_CONTINUE_ANALYSIS;
1537 default:
1538 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1539 {
1540 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1541 return VINF_SUCCESS; /* exit point */
1542 }
1543 return VWRN_CONTINUE_ANALYSIS;
1544 }
1545
1546 return VWRN_CONTINUE_ANALYSIS;
1547}
1548
1549/**
1550 * Recompiles the instructions in a code block
1551 *
1552 * @returns VBox status code.
1553 * @param pVM The VM to operate on.
1554 * @param pCpu CPU disassembly state
1555 * @param pInstrGC Guest context pointer to privileged instruction
1556 * @param pCurInstrGC Guest context pointer to the current instruction
1557 * @param pUserData User pointer (callback specific)
1558 *
1559 */
1560static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1561{
1562 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1563 int rc = VINF_SUCCESS;
1564 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1565
1566 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1567
1568 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1569 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1570 {
1571 /*
1572 * Been there, done that; so insert a jump (we don't want to duplicate code)
1573 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1574 */
1575 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1576 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->prefix & PREFIX_OPSIZE));
1577 }
1578
1579 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1580 {
1581 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pUserData);
1582 }
1583 else
1584 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pUserData);
1585
1586 if (RT_FAILURE(rc))
1587 return rc;
1588
1589 /** @note Never do a direct return unless a failure is encountered! */
1590
1591 /* Clear recompilation of next instruction flag; we are doing that right here. */
1592 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1593 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1594
1595 /* Add lookup record for patch to guest address translation */
1596 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1597
1598 /* Update lowest and highest instruction address for this patch */
1599 if (pCurInstrGC < pPatch->pInstrGCLowest)
1600 pPatch->pInstrGCLowest = pCurInstrGC;
1601 else
1602 if (pCurInstrGC > pPatch->pInstrGCHighest)
1603 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->opsize;
1604
1605 /* Illegal instruction -> end of recompile phase for this code block. */
1606 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1607 {
1608 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1609 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1610 goto end;
1611 }
1612
1613 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1614 * Indirect calls are handled below.
1615 */
1616 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1617 && (pCpu->pCurInstr->opcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1618 && (OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J))
1619 {
1620 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1621 if (pTargetGC == 0)
1622 {
1623 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
1624 return VERR_PATCHING_REFUSED;
1625 }
1626
1627 if (pCpu->pCurInstr->opcode == OP_CALL)
1628 {
1629 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1630 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1631 if (RT_FAILURE(rc))
1632 goto end;
1633 }
1634 else
1635 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->opcode, !!(pCpu->prefix & PREFIX_OPSIZE));
1636
1637 if (RT_SUCCESS(rc))
1638 rc = VWRN_CONTINUE_RECOMPILE;
1639
1640 goto end;
1641 }
1642
1643 switch (pCpu->pCurInstr->opcode)
1644 {
1645 case OP_CLI:
1646 {
1647 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1648 * until we've found the proper exit point(s).
1649 */
1650 if ( pCurInstrGC != pInstrGC
1651 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1652 )
1653 {
1654 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1655 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1656 }
1657 /* Set by irq inhibition; no longer valid now. */
1658 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1659
1660 rc = patmPatchGenCli(pVM, pPatch);
1661 if (RT_SUCCESS(rc))
1662 rc = VWRN_CONTINUE_RECOMPILE;
1663 break;
1664 }
1665
1666 case OP_MOV:
1667 if (pCpu->pCurInstr->optype & OPTYPE_POTENTIALLY_DANGEROUS)
1668 {
1669 /* mov ss, src? */
1670 if ( (pCpu->param1.flags & USE_REG_SEG)
1671 && (pCpu->param1.base.reg_seg == DIS_SELREG_SS))
1672 {
1673 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1674 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1675 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1676 }
1677#if 0 /* necessary for Haiku */
1678 else
1679 if ( (pCpu->param2.flags & USE_REG_SEG)
1680 && (pCpu->param2.base.reg_seg == USE_REG_SS)
1681 && (pCpu->param1.flags & (USE_REG_GEN32|USE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1682 {
1683 /* mov GPR, ss */
1684 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1685 if (RT_SUCCESS(rc))
1686 rc = VWRN_CONTINUE_RECOMPILE;
1687 break;
1688 }
1689#endif
1690 }
1691 goto duplicate_instr;
1692
1693 case OP_POP:
1694 if (pCpu->pCurInstr->param1 == OP_PARM_REG_SS)
1695 {
1696 Assert(pCpu->pCurInstr->optype & OPTYPE_INHIBIT_IRQS);
1697
1698 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1699 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1700 }
1701 goto duplicate_instr;
1702
1703 case OP_STI:
1704 {
1705 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1706
1707 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1708 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1709 {
1710 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1711 fInhibitIRQInstr = true;
1712 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1713 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1714 }
1715 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1716
1717 if (RT_SUCCESS(rc))
1718 {
1719 DISCPUSTATE cpu = *pCpu;
1720 unsigned opsize;
1721 int disret;
1722 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1723 R3PTRTYPE(uint8_t *) pNextInstrHC;
1724
1725 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1726
1727 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1728 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pNextInstrGC);
1729 if (pNextInstrHC == NULL)
1730 {
1731 AssertFailed();
1732 return VERR_PATCHING_REFUSED;
1733 }
1734
1735 // Disassemble the next instruction
1736 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pNextInstrGC, pNextInstrHC, &opsize, NULL);
1737 if (disret == false)
1738 {
1739 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1740 return VERR_PATCHING_REFUSED;
1741 }
1742 pReturnInstrGC = pNextInstrGC + opsize;
1743
1744 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1745 || pReturnInstrGC <= pInstrGC
1746 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1747 )
1748 {
1749 /* Not an exit point for function duplication patches */
1750 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1751 && RT_SUCCESS(rc))
1752 {
1753 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1754 rc = VWRN_CONTINUE_RECOMPILE;
1755 }
1756 else
1757 rc = VINF_SUCCESS; //exit point
1758 }
1759 else {
1760 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1761 rc = VERR_PATCHING_REFUSED; //not allowed!!
1762 }
1763 }
1764 break;
1765 }
1766
1767 case OP_POPF:
1768 {
1769 bool fGenerateJmpBack = (pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32);
1770
1771 /* Not an exit point for IDT handler or function replacement patches */
1772 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1773 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1774 fGenerateJmpBack = false;
1775
1776 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->opsize, !!(pCpu->prefix & PREFIX_OPSIZE), fGenerateJmpBack);
1777 if (RT_SUCCESS(rc))
1778 {
1779 if (fGenerateJmpBack == false)
1780 {
1781 /* Not an exit point for IDT handler or function replacement patches */
1782 rc = VWRN_CONTINUE_RECOMPILE;
1783 }
1784 else
1785 {
1786 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1787 rc = VINF_SUCCESS; /* exit point! */
1788 }
1789 }
1790 break;
1791 }
1792
1793 case OP_PUSHF:
1794 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->prefix & PREFIX_OPSIZE));
1795 if (RT_SUCCESS(rc))
1796 rc = VWRN_CONTINUE_RECOMPILE;
1797 break;
1798
1799 case OP_PUSH:
1800 if (pCpu->pCurInstr->param1 == OP_PARM_REG_CS)
1801 {
1802 rc = patmPatchGenPushCS(pVM, pPatch);
1803 if (RT_SUCCESS(rc))
1804 rc = VWRN_CONTINUE_RECOMPILE;
1805 break;
1806 }
1807 goto duplicate_instr;
1808
1809 case OP_IRET:
1810 Log(("IRET at %RRv\n", pCurInstrGC));
1811 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->prefix & PREFIX_OPSIZE));
1812 if (RT_SUCCESS(rc))
1813 {
1814 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1815 rc = VINF_SUCCESS; /* exit point by definition */
1816 }
1817 break;
1818
1819 case OP_ILLUD2:
1820 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1821 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1822 if (RT_SUCCESS(rc))
1823 rc = VINF_SUCCESS; /* exit point by definition */
1824 Log(("Illegal opcode (0xf 0xb)\n"));
1825 break;
1826
1827 case OP_CPUID:
1828 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1829 if (RT_SUCCESS(rc))
1830 rc = VWRN_CONTINUE_RECOMPILE;
1831 break;
1832
1833 case OP_STR:
1834 case OP_SLDT:
1835 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1836 if (RT_SUCCESS(rc))
1837 rc = VWRN_CONTINUE_RECOMPILE;
1838 break;
1839
1840 case OP_SGDT:
1841 case OP_SIDT:
1842 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1843 if (RT_SUCCESS(rc))
1844 rc = VWRN_CONTINUE_RECOMPILE;
1845 break;
1846
1847 case OP_RETN:
1848 /* retn is an exit point for function patches */
1849 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
1850 if (RT_SUCCESS(rc))
1851 rc = VINF_SUCCESS; /* exit point by definition */
1852 break;
1853
1854 case OP_SYSEXIT:
1855 /* Duplicate it, so it can be emulated in GC (or fault). */
1856 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1857 if (RT_SUCCESS(rc))
1858 rc = VINF_SUCCESS; /* exit point by definition */
1859 break;
1860
1861 case OP_CALL:
1862 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1863 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1864 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1865 */
1866 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1867 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far calls! */)
1868 {
1869 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
1870 if (RT_SUCCESS(rc))
1871 {
1872 rc = VWRN_CONTINUE_RECOMPILE;
1873 }
1874 break;
1875 }
1876 goto gen_illegal_instr;
1877
1878 case OP_JMP:
1879 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1880 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1881 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1882 */
1883 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1884 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far jumps! */)
1885 {
1886 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
1887 if (RT_SUCCESS(rc))
1888 rc = VINF_SUCCESS; /* end of branch */
1889 break;
1890 }
1891 goto gen_illegal_instr;
1892
1893 case OP_INT3:
1894 case OP_INT:
1895 case OP_INTO:
1896 goto gen_illegal_instr;
1897
1898 case OP_MOV_DR:
1899 /** @note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1900 if (pCpu->pCurInstr->param2 == OP_PARM_Dd)
1901 {
1902 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
1903 if (RT_SUCCESS(rc))
1904 rc = VWRN_CONTINUE_RECOMPILE;
1905 break;
1906 }
1907 goto duplicate_instr;
1908
1909 case OP_MOV_CR:
1910 /** @note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1911 if (pCpu->pCurInstr->param2 == OP_PARM_Cd)
1912 {
1913 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
1914 if (RT_SUCCESS(rc))
1915 rc = VWRN_CONTINUE_RECOMPILE;
1916 break;
1917 }
1918 goto duplicate_instr;
1919
1920 default:
1921 if (pCpu->pCurInstr->optype & (OPTYPE_CONTROLFLOW | OPTYPE_PRIVILEGED_NOTRAP))
1922 {
1923gen_illegal_instr:
1924 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1925 if (RT_SUCCESS(rc))
1926 rc = VINF_SUCCESS; /* exit point by definition */
1927 }
1928 else
1929 {
1930duplicate_instr:
1931 Log(("patmPatchGenDuplicate\n"));
1932 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1933 if (RT_SUCCESS(rc))
1934 rc = VWRN_CONTINUE_RECOMPILE;
1935 }
1936 break;
1937 }
1938
1939end:
1940
1941 if ( !fInhibitIRQInstr
1942 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
1943 {
1944 int rc2;
1945 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1946
1947 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
1948 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
1949 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
1950 {
1951 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
1952
1953 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
1954 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1955 rc = VINF_SUCCESS; /* end of the line */
1956 }
1957 else
1958 {
1959 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
1960 }
1961 if (RT_FAILURE(rc2))
1962 rc = rc2;
1963 }
1964
1965 if (RT_SUCCESS(rc))
1966 {
1967 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1968 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
1969 && pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32
1970 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
1971 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
1972 )
1973 {
1974 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1975
1976 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1977 Log(("patmRecompileCallback: end found for single instruction patch at %RRv opsize %d\n", pNextInstrGC, pCpu->opsize));
1978
1979 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
1980 AssertRC(rc);
1981 }
1982 }
1983 return rc;
1984}
1985
1986
1987#ifdef LOG_ENABLED
1988
1989/* Add a disasm jump record (temporary for prevent duplicate analysis)
1990 *
1991 * @param pVM The VM to operate on.
1992 * @param pPatch Patch structure ptr
1993 * @param pInstrGC Guest context pointer to privileged instruction
1994 *
1995 */
1996static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1997{
1998 PAVLPVNODECORE pRec;
1999
2000 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2001 Assert(pRec);
2002 pRec->Key = (AVLPVKEY)pInstrGC;
2003
2004 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2005 Assert(ret);
2006}
2007
2008/**
2009 * Checks if jump target has been analysed before.
2010 *
2011 * @returns VBox status code.
2012 * @param pPatch Patch struct
2013 * @param pInstrGC Jump target
2014 *
2015 */
2016static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2017{
2018 PAVLPVNODECORE pRec;
2019
2020 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)pInstrGC);
2021 if (pRec)
2022 return true;
2023 return false;
2024}
2025
2026/**
2027 * For proper disassembly of the final patch block
2028 *
2029 * @returns VBox status code.
2030 * @param pVM The VM to operate on.
2031 * @param pCpu CPU disassembly state
2032 * @param pInstrGC Guest context pointer to privileged instruction
2033 * @param pCurInstrGC Guest context pointer to the current instruction
2034 * @param pUserData User pointer (callback specific)
2035 *
2036 */
2037int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
2038{
2039 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2040
2041 if (pCpu->pCurInstr->opcode == OP_INT3)
2042 {
2043 /* Could be an int3 inserted in a call patch. Check to be sure */
2044 DISCPUSTATE cpu;
2045 uint8_t *pOrgJumpHC;
2046 RTRCPTR pOrgJumpGC;
2047 uint32_t dummy;
2048
2049 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2050 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2051 pOrgJumpHC = PATMGCVirtToHCVirt(pVM, pPatch, pOrgJumpGC);
2052
2053 bool disret = PATMR3DISInstr(pVM, pPatch, &cpu, pOrgJumpGC, pOrgJumpHC, &dummy, NULL);
2054 if (!disret || cpu.pCurInstr->opcode != OP_CALL || cpu.param1.size != 4 /* only near calls */)
2055 return VINF_SUCCESS;
2056
2057 return VWRN_CONTINUE_ANALYSIS;
2058 }
2059
2060 if ( pCpu->pCurInstr->opcode == OP_ILLUD2
2061 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2062 {
2063 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2064 return VWRN_CONTINUE_ANALYSIS;
2065 }
2066
2067 if ( (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2068 || pCpu->pCurInstr->opcode == OP_INT
2069 || pCpu->pCurInstr->opcode == OP_IRET
2070 || pCpu->pCurInstr->opcode == OP_RETN
2071 || pCpu->pCurInstr->opcode == OP_RETF
2072 )
2073 {
2074 return VINF_SUCCESS;
2075 }
2076
2077 if (pCpu->pCurInstr->opcode == OP_ILLUD2)
2078 return VINF_SUCCESS;
2079
2080 return VWRN_CONTINUE_ANALYSIS;
2081}
2082
2083
2084/**
2085 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2086 *
2087 * @returns VBox status code.
2088 * @param pVM The VM to operate on.
2089 * @param pInstrGC Guest context pointer to the initial privileged instruction
2090 * @param pCurInstrGC Guest context pointer to the current instruction
2091 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2092 * @param pUserData User pointer (callback specific)
2093 *
2094 */
2095int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, void *pUserData)
2096{
2097 DISCPUSTATE cpu;
2098 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2099 int rc = VWRN_CONTINUE_ANALYSIS;
2100 uint32_t opsize, delta;
2101 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2102 bool disret;
2103 char szOutput[256];
2104
2105 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2106
2107 /* We need this to determine branch targets (and for disassembling). */
2108 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2109
2110 while(rc == VWRN_CONTINUE_ANALYSIS)
2111 {
2112 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2113
2114 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2115 if (pCurInstrHC == NULL)
2116 {
2117 rc = VERR_PATCHING_REFUSED;
2118 goto end;
2119 }
2120
2121 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2122 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2123 {
2124 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2125
2126 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2127 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2128 else
2129 Log(("DIS %s", szOutput));
2130
2131 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2132 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2133 {
2134 rc = VINF_SUCCESS;
2135 goto end;
2136 }
2137 }
2138 else
2139 Log(("DIS: %s", szOutput));
2140
2141 if (disret == false)
2142 {
2143 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2144 rc = VINF_SUCCESS;
2145 goto end;
2146 }
2147
2148 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pUserData);
2149 if (rc != VWRN_CONTINUE_ANALYSIS) {
2150 break; //done!
2151 }
2152
2153 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2154 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2155 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2156 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2157 )
2158 {
2159 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2160 RTRCPTR pOrgTargetGC;
2161
2162 if (pTargetGC == 0)
2163 {
2164 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2165 rc = VERR_PATCHING_REFUSED;
2166 break;
2167 }
2168
2169 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2170 {
2171 //jump back to guest code
2172 rc = VINF_SUCCESS;
2173 goto end;
2174 }
2175 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2176
2177 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2178 {
2179 rc = VINF_SUCCESS;
2180 goto end;
2181 }
2182
2183 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2184 {
2185 /* New jump, let's check it. */
2186 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2187
2188 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2189 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pUserData);
2190 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2191
2192 if (rc != VINF_SUCCESS) {
2193 break; //done!
2194 }
2195 }
2196 if (cpu.pCurInstr->opcode == OP_JMP)
2197 {
2198 /* Unconditional jump; return to caller. */
2199 rc = VINF_SUCCESS;
2200 goto end;
2201 }
2202
2203 rc = VWRN_CONTINUE_ANALYSIS;
2204 }
2205 pCurInstrGC += opsize;
2206 }
2207end:
2208 return rc;
2209}
2210
2211/**
2212 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2213 *
2214 * @returns VBox status code.
2215 * @param pVM The VM to operate on.
2216 * @param pInstrGC Guest context pointer to the initial privileged instruction
2217 * @param pCurInstrGC Guest context pointer to the current instruction
2218 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2219 * @param pUserData User pointer (callback specific)
2220 *
2221 */
2222int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, void *pUserData)
2223{
2224 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2225
2226 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pUserData);
2227 /* Free all disasm jump records. */
2228 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2229 return rc;
2230}
2231
2232#endif /* LOG_ENABLED */
2233
2234/**
2235 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2236 * If so, this patch is permanently disabled.
2237 *
2238 * @param pVM The VM to operate on.
2239 * @param pInstrGC Guest context pointer to instruction
2240 * @param pConflictGC Guest context pointer to check
2241 *
2242 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2243 *
2244 */
2245VMMR3DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2246{
2247 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2248 if (pTargetPatch)
2249 {
2250 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2251 }
2252 return VERR_PATCH_NO_CONFLICT;
2253}
2254
2255/**
2256 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2257 *
2258 * @returns VBox status code.
2259 * @param pVM The VM to operate on.
2260 * @param pInstrGC Guest context pointer to privileged instruction
2261 * @param pCurInstrGC Guest context pointer to the current instruction
2262 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2263 * @param pUserData User pointer (callback specific)
2264 *
2265 */
2266static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, void *pUserData)
2267{
2268 DISCPUSTATE cpu;
2269 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2270 int rc = VWRN_CONTINUE_ANALYSIS;
2271 uint32_t opsize;
2272 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2273 bool disret;
2274#ifdef LOG_ENABLED
2275 char szOutput[256];
2276#endif
2277
2278 while (rc == VWRN_CONTINUE_RECOMPILE)
2279 {
2280 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2281
2282 ////Log(("patmRecompileCodeStream %RRv %RRv\n", pInstrGC, pCurInstrGC));
2283
2284 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2285 if (pCurInstrHC == NULL)
2286 {
2287 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2288 goto end;
2289 }
2290#ifdef LOG_ENABLED
2291 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput);
2292 Log(("Recompile: %s", szOutput));
2293#else
2294 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2295#endif
2296 if (disret == false)
2297 {
2298 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2299
2300 /* Add lookup record for patch to guest address translation */
2301 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2302 patmPatchGenIllegalInstr(pVM, pPatch);
2303 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2304 goto end;
2305 }
2306
2307 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pUserData);
2308 if (rc != VWRN_CONTINUE_RECOMPILE)
2309 {
2310 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2311 if ( rc == VINF_SUCCESS
2312 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2313 {
2314 DISCPUSTATE cpunext;
2315 uint32_t opsizenext;
2316 uint8_t *pNextInstrHC;
2317 RTRCPTR pNextInstrGC = pCurInstrGC + opsize;
2318
2319 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2320
2321 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2322 * Recompile the next instruction as well
2323 */
2324 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pNextInstrGC);
2325 if (pNextInstrHC == NULL)
2326 {
2327 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2328 goto end;
2329 }
2330 cpunext.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2331 disret = PATMR3DISInstr(pVM, pPatch, &cpunext, pNextInstrGC, pNextInstrHC, &opsizenext, NULL);
2332 if (disret == false)
2333 {
2334 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2335 goto end;
2336 }
2337 switch(cpunext.pCurInstr->opcode)
2338 {
2339 case OP_IRET: /* inhibit cleared in generated code */
2340 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2341 case OP_HLT:
2342 break; /* recompile these */
2343
2344 default:
2345 if (cpunext.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2346 {
2347 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2348
2349 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2350 AssertRC(rc);
2351 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2352 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2353 }
2354 break;
2355 }
2356
2357 /** @note after a cli we must continue to a proper exit point */
2358 if (cpunext.pCurInstr->opcode != OP_CLI)
2359 {
2360 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pUserData);
2361 if (RT_SUCCESS(rc))
2362 {
2363 rc = VINF_SUCCESS;
2364 goto end;
2365 }
2366 break;
2367 }
2368 else
2369 rc = VWRN_CONTINUE_RECOMPILE;
2370 }
2371 else
2372 break; /* done! */
2373 }
2374
2375 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2376
2377
2378 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2379 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2380 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2381 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2382 )
2383 {
2384 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2385 if (addr == 0)
2386 {
2387 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2388 rc = VERR_PATCHING_REFUSED;
2389 break;
2390 }
2391
2392 Log(("Jump encountered target %RRv\n", addr));
2393
2394 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2395 if (!(cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW))
2396 {
2397 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2398 /* First we need to finish this linear code stream until the next exit point. */
2399 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+opsize, pfnPATMR3Recompile, pUserData);
2400 if (RT_FAILURE(rc))
2401 {
2402 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2403 break; //fatal error
2404 }
2405 }
2406
2407 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2408 {
2409 /* New code; let's recompile it. */
2410 Log(("patmRecompileCodeStream continue with jump\n"));
2411
2412 /*
2413 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2414 * this patch so we can continue our analysis
2415 *
2416 * We rely on CSAM to detect and resolve conflicts
2417 */
2418 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, addr);
2419 if(pTargetPatch)
2420 {
2421 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2422 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2423 }
2424
2425 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2426 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pUserData);
2427 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2428
2429 if(pTargetPatch)
2430 {
2431 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2432 }
2433
2434 if (RT_FAILURE(rc))
2435 {
2436 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2437 break; //done!
2438 }
2439 }
2440 /* Always return to caller here; we're done! */
2441 rc = VINF_SUCCESS;
2442 goto end;
2443 }
2444 else
2445 if (cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW)
2446 {
2447 rc = VINF_SUCCESS;
2448 goto end;
2449 }
2450 pCurInstrGC += opsize;
2451 }
2452end:
2453 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2454 return rc;
2455}
2456
2457
2458/**
2459 * Generate the jump from guest to patch code
2460 *
2461 * @returns VBox status code.
2462 * @param pVM The VM to operate on.
2463 * @param pPatch Patch record
2464 */
2465static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, bool fAddFixup = true)
2466{
2467 uint8_t temp[8];
2468 uint8_t *pPB;
2469 int rc;
2470
2471 Assert(pPatch->cbPatchJump <= sizeof(temp));
2472 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2473
2474 pPB = pPatch->pPrivInstrHC;
2475
2476#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2477 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2478 {
2479 Assert(pPatch->pPatchJumpDestGC);
2480
2481 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2482 {
2483 // jmp [PatchCode]
2484 if (fAddFixup)
2485 {
2486 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2487 {
2488 Log(("Relocation failed for the jump in the guest code!!\n"));
2489 return VERR_PATCHING_REFUSED;
2490 }
2491 }
2492
2493 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2494 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2495 }
2496 else
2497 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2498 {
2499 // jmp [PatchCode]
2500 if (fAddFixup)
2501 {
2502 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2503 {
2504 Log(("Relocation failed for the jump in the guest code!!\n"));
2505 return VERR_PATCHING_REFUSED;
2506 }
2507 }
2508
2509 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2510 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2511 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2512 }
2513 else
2514 {
2515 Assert(0);
2516 return VERR_PATCHING_REFUSED;
2517 }
2518 }
2519 else
2520#endif
2521 {
2522 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2523
2524 // jmp [PatchCode]
2525 if (fAddFixup)
2526 {
2527 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2528 {
2529 Log(("Relocation failed for the jump in the guest code!!\n"));
2530 return VERR_PATCHING_REFUSED;
2531 }
2532 }
2533 temp[0] = 0xE9; //jmp
2534 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2535 }
2536 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2537 AssertRC(rc);
2538
2539 if (rc == VINF_SUCCESS)
2540 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2541
2542 return rc;
2543}
2544
2545/**
2546 * Remove the jump from guest to patch code
2547 *
2548 * @returns VBox status code.
2549 * @param pVM The VM to operate on.
2550 * @param pPatch Patch record
2551 */
2552static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2553{
2554#ifdef DEBUG
2555 DISCPUSTATE cpu;
2556 char szOutput[256];
2557 uint32_t opsize, i = 0;
2558 bool disret;
2559
2560 while(i < pPatch->cbPrivInstr)
2561 {
2562 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2563 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
2564 if (disret == false)
2565 break;
2566
2567 Log(("Org patch jump: %s", szOutput));
2568 Assert(opsize);
2569 i += opsize;
2570 }
2571#endif
2572
2573 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2574 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2575#ifdef DEBUG
2576 if (rc == VINF_SUCCESS)
2577 {
2578 i = 0;
2579 while(i < pPatch->cbPrivInstr)
2580 {
2581 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2582 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
2583 if (disret == false)
2584 break;
2585
2586 Log(("Org instr: %s", szOutput));
2587 Assert(opsize);
2588 i += opsize;
2589 }
2590 }
2591#endif
2592 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2593 return rc;
2594}
2595
2596/**
2597 * Generate the call from guest to patch code
2598 *
2599 * @returns VBox status code.
2600 * @param pVM The VM to operate on.
2601 * @param pPatch Patch record
2602 */
2603static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, bool fAddFixup = true)
2604{
2605 uint8_t temp[8];
2606 uint8_t *pPB;
2607 int rc;
2608
2609 Assert(pPatch->cbPatchJump <= sizeof(temp));
2610
2611 pPB = pPatch->pPrivInstrHC;
2612
2613 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2614
2615 // jmp [PatchCode]
2616 if (fAddFixup)
2617 {
2618 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2619 {
2620 Log(("Relocation failed for the jump in the guest code!!\n"));
2621 return VERR_PATCHING_REFUSED;
2622 }
2623 }
2624
2625 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2626 temp[0] = pPatch->aPrivInstr[0];
2627 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2628
2629 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2630 AssertRC(rc);
2631
2632 return rc;
2633}
2634
2635
2636/**
2637 * Patch cli/sti pushf/popf instruction block at specified location
2638 *
2639 * @returns VBox status code.
2640 * @param pVM The VM to operate on.
2641 * @param pInstrGC Guest context point to privileged instruction
2642 * @param pInstrHC Host context point to privileged instruction
2643 * @param uOpcode Instruction opcode
2644 * @param uOpSize Size of starting instruction
2645 * @param pPatchRec Patch record
2646 *
2647 * @note returns failure if patching is not allowed or possible
2648 *
2649 */
2650VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2651 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2652{
2653 PPATCHINFO pPatch = &pPatchRec->patch;
2654 int rc = VERR_PATCHING_REFUSED;
2655 DISCPUSTATE cpu;
2656 uint32_t orgOffsetPatchMem = ~0;
2657 RTRCPTR pInstrStart;
2658#ifdef LOG_ENABLED
2659 uint32_t opsize;
2660 char szOutput[256];
2661 bool disret;
2662#endif
2663
2664 /* Save original offset (in case of failures later on) */
2665 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2666 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2667
2668 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2669 switch (uOpcode)
2670 {
2671 case OP_MOV:
2672 break;
2673
2674 case OP_CLI:
2675 case OP_PUSHF:
2676 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2677 /** @note special precautions are taken when disabling and enabling such patches. */
2678 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2679 break;
2680
2681 default:
2682 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2683 {
2684 AssertMsg(0, ("PATMR3PatchBlock: Invalid opcode %x\n", uOpcode));
2685 return VERR_INVALID_PARAMETER;
2686 }
2687 }
2688
2689 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2690 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2691
2692 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2693 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2694 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2695 )
2696 {
2697 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2698#ifdef DEBUG_sandervl
2699//// AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
2700#endif
2701 rc = VERR_PATCHING_REFUSED;
2702 goto failure;
2703 }
2704
2705 pPatch->nrPatch2GuestRecs = 0;
2706 pInstrStart = pInstrGC;
2707
2708#ifdef PATM_ENABLE_CALL
2709 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2710#endif
2711
2712 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2713 pPatch->uCurPatchOffset = 0;
2714
2715 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2716
2717 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2718 {
2719 Assert(pPatch->flags & PATMFL_INTHANDLER);
2720
2721 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2722 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2723 if (RT_FAILURE(rc))
2724 goto failure;
2725 }
2726
2727 /***************************************************************************************************************************/
2728 /** @note We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2729 /***************************************************************************************************************************/
2730#ifdef VBOX_WITH_STATISTICS
2731 if (!(pPatch->flags & PATMFL_SYSENTER))
2732 {
2733 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2734 if (RT_FAILURE(rc))
2735 goto failure;
2736 }
2737#endif
2738
2739 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pPatch);
2740 if (rc != VINF_SUCCESS)
2741 {
2742 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2743 goto failure;
2744 }
2745
2746 /* Calculated during analysis. */
2747 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2748 {
2749 /* Most likely cause: we encountered an illegal instruction very early on. */
2750 /** @todo could turn it into an int3 callable patch. */
2751 Log(("PATMR3PatchBlock: patch block too small -> refuse\n"));
2752 rc = VERR_PATCHING_REFUSED;
2753 goto failure;
2754 }
2755
2756 /* size of patch block */
2757 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2758
2759
2760 /* Update free pointer in patch memory. */
2761 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2762 /* Round to next 8 byte boundary. */
2763 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2764
2765 /*
2766 * Insert into patch to guest lookup tree
2767 */
2768 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2769 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2770 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2771 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2772 if (!rc)
2773 {
2774 rc = VERR_PATCHING_REFUSED;
2775 goto failure;
2776 }
2777
2778 /* Note that patmr3SetBranchTargets can install additional patches!! */
2779 rc = patmr3SetBranchTargets(pVM, pPatch);
2780 if (rc != VINF_SUCCESS)
2781 {
2782 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2783 goto failure;
2784 }
2785
2786#ifdef LOG_ENABLED
2787 Log(("Patch code ----------------------------------------------------------\n"));
2788 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
2789 Log(("Patch code ends -----------------------------------------------------\n"));
2790#endif
2791
2792 /* make a copy of the guest code bytes that will be overwritten */
2793 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2794
2795 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2796 AssertRC(rc);
2797
2798 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2799 {
2800 /*uint8_t ASMInt3 = 0xCC; - unused */
2801
2802 Log(("PATMR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2803 /* Replace first opcode byte with 'int 3'. */
2804 rc = patmActivateInt3Patch(pVM, pPatch);
2805 if (RT_FAILURE(rc))
2806 goto failure;
2807
2808 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2809 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2810
2811 pPatch->flags &= ~PATMFL_INSTR_HINT;
2812 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2813 }
2814 else
2815 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2816 {
2817 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2818 /* now insert a jump in the guest code */
2819 rc = patmGenJumpToPatch(pVM, pPatch, true);
2820 AssertRC(rc);
2821 if (RT_FAILURE(rc))
2822 goto failure;
2823
2824 }
2825
2826#ifdef LOG_ENABLED
2827 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2828 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2829 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
2830#endif
2831
2832 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2833 pPatch->pTempInfo->nrIllegalInstr = 0;
2834
2835 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2836
2837 pPatch->uState = PATCH_ENABLED;
2838 return VINF_SUCCESS;
2839
2840failure:
2841 if (pPatchRec->CoreOffset.Key)
2842 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
2843
2844 patmEmptyTree(pVM, &pPatch->FixupTree);
2845 pPatch->nrFixups = 0;
2846
2847 patmEmptyTree(pVM, &pPatch->JumpTree);
2848 pPatch->nrJumpRecs = 0;
2849
2850 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2851 pPatch->pTempInfo->nrIllegalInstr = 0;
2852
2853 /* Turn this cli patch into a dummy. */
2854 pPatch->uState = PATCH_REFUSED;
2855 pPatch->pPatchBlockOffset = 0;
2856
2857 // Give back the patch memory we no longer need
2858 Assert(orgOffsetPatchMem != (uint32_t)~0);
2859 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2860
2861 return rc;
2862}
2863
2864/**
2865 * Patch IDT handler
2866 *
2867 * @returns VBox status code.
2868 * @param pVM The VM to operate on.
2869 * @param pInstrGC Guest context point to privileged instruction
2870 * @param pInstrHC Host context point to privileged instruction
2871 * @param uOpSize Size of starting instruction
2872 * @param pPatchRec Patch record
2873 *
2874 * @note returns failure if patching is not allowed or possible
2875 *
2876 */
2877static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2878 uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2879{
2880 PPATCHINFO pPatch = &pPatchRec->patch;
2881 bool disret;
2882 DISCPUSTATE cpuPush, cpuJmp;
2883 uint32_t opsize;
2884 RTRCPTR pCurInstrGC = pInstrGC;
2885 uint8_t *pCurInstrHC = pInstrHC;
2886 uint32_t orgOffsetPatchMem = ~0;
2887
2888 /*
2889 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
2890 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
2891 * condition here and only patch the common entypoint once.
2892 */
2893 cpuPush.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2894 disret = PATMR3DISInstr(pVM, pPatch, &cpuPush, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2895 Assert(disret);
2896 if (disret && cpuPush.pCurInstr->opcode == OP_PUSH)
2897 {
2898 RTRCPTR pJmpInstrGC;
2899 int rc;
2900
2901 pCurInstrGC += opsize;
2902 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2903
2904 cpuJmp.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2905 disret = PATMR3DISInstr(pVM, pPatch, &cpuJmp, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2906 if ( disret
2907 && cpuJmp.pCurInstr->opcode == OP_JMP
2908 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
2909 )
2910 {
2911 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2912 if (pJmpPatch == 0)
2913 {
2914 /* Patch it first! */
2915 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
2916 if (rc != VINF_SUCCESS)
2917 goto failure;
2918 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2919 Assert(pJmpPatch);
2920 }
2921 if (pJmpPatch->patch.uState != PATCH_ENABLED)
2922 goto failure;
2923
2924 /* save original offset (in case of failures later on) */
2925 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2926
2927 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2928 pPatch->uCurPatchOffset = 0;
2929 pPatch->nrPatch2GuestRecs = 0;
2930
2931#ifdef VBOX_WITH_STATISTICS
2932 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2933 if (RT_FAILURE(rc))
2934 goto failure;
2935#endif
2936
2937 /* Install fake cli patch (to clear the virtual IF) */
2938 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2939 if (RT_FAILURE(rc))
2940 goto failure;
2941
2942 /* Add lookup record for patch to guest address translation (for the push) */
2943 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
2944
2945 /* Duplicate push. */
2946 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
2947 if (RT_FAILURE(rc))
2948 goto failure;
2949
2950 /* Generate jump to common entrypoint. */
2951 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
2952 if (RT_FAILURE(rc))
2953 goto failure;
2954
2955 /* size of patch block */
2956 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2957
2958 /* Update free pointer in patch memory. */
2959 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2960 /* Round to next 8 byte boundary */
2961 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2962
2963 /* There's no jump from guest to patch code. */
2964 pPatch->cbPatchJump = 0;
2965
2966
2967#ifdef LOG_ENABLED
2968 Log(("Patch code ----------------------------------------------------------\n"));
2969 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
2970 Log(("Patch code ends -----------------------------------------------------\n"));
2971#endif
2972 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
2973
2974 /*
2975 * Insert into patch to guest lookup tree
2976 */
2977 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2978 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2979 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2980 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2981
2982 pPatch->uState = PATCH_ENABLED;
2983
2984 return VINF_SUCCESS;
2985 }
2986 }
2987failure:
2988 /* Give back the patch memory we no longer need */
2989 if (orgOffsetPatchMem != (uint32_t)~0)
2990 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2991
2992 return PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
2993}
2994
2995/**
2996 * Install a trampoline to call a guest trap handler directly
2997 *
2998 * @returns VBox status code.
2999 * @param pVM The VM to operate on.
3000 * @param pInstrGC Guest context point to privileged instruction
3001 * @param pPatchRec Patch record
3002 *
3003 */
3004static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3005{
3006 PPATCHINFO pPatch = &pPatchRec->patch;
3007 int rc = VERR_PATCHING_REFUSED;
3008 uint32_t orgOffsetPatchMem = ~0;
3009#ifdef LOG_ENABLED
3010 bool disret;
3011 DISCPUSTATE cpu;
3012 uint32_t opsize;
3013 char szOutput[256];
3014#endif
3015
3016 // save original offset (in case of failures later on)
3017 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3018
3019 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3020 pPatch->uCurPatchOffset = 0;
3021 pPatch->nrPatch2GuestRecs = 0;
3022
3023#ifdef VBOX_WITH_STATISTICS
3024 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3025 if (RT_FAILURE(rc))
3026 goto failure;
3027#endif
3028
3029 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3030 if (RT_FAILURE(rc))
3031 goto failure;
3032
3033 /* size of patch block */
3034 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3035
3036 /* Update free pointer in patch memory. */
3037 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3038 /* Round to next 8 byte boundary */
3039 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3040
3041 /* There's no jump from guest to patch code. */
3042 pPatch->cbPatchJump = 0;
3043
3044#ifdef LOG_ENABLED
3045 Log(("Patch code ----------------------------------------------------------\n"));
3046 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
3047 Log(("Patch code ends -----------------------------------------------------\n"));
3048#endif
3049
3050#ifdef LOG_ENABLED
3051 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3052 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3053 Log(("TRAP handler patch: %s", szOutput));
3054#endif
3055 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3056
3057 /*
3058 * Insert into patch to guest lookup tree
3059 */
3060 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3061 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3062 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3063 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3064
3065 pPatch->uState = PATCH_ENABLED;
3066 return VINF_SUCCESS;
3067
3068failure:
3069 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3070
3071 /* Turn this cli patch into a dummy. */
3072 pPatch->uState = PATCH_REFUSED;
3073 pPatch->pPatchBlockOffset = 0;
3074
3075 /* Give back the patch memory we no longer need */
3076 Assert(orgOffsetPatchMem != (uint32_t)~0);
3077 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3078
3079 return rc;
3080}
3081
3082
3083#ifdef LOG_ENABLED
3084/**
3085 * Check if the instruction is patched as a common idt handler
3086 *
3087 * @returns true or false
3088 * @param pVM The VM to operate on.
3089 * @param pInstrGC Guest context point to the instruction
3090 *
3091 */
3092static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3093{
3094 PPATMPATCHREC pRec;
3095
3096 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3097 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3098 return true;
3099 return false;
3100}
3101#endif //DEBUG
3102
3103
3104/**
3105 * Duplicates a complete function
3106 *
3107 * @returns VBox status code.
3108 * @param pVM The VM to operate on.
3109 * @param pInstrGC Guest context point to privileged instruction
3110 * @param pPatchRec Patch record
3111 *
3112 */
3113static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3114{
3115 PPATCHINFO pPatch = &pPatchRec->patch;
3116 int rc = VERR_PATCHING_REFUSED;
3117 DISCPUSTATE cpu;
3118 uint32_t orgOffsetPatchMem = ~0;
3119
3120 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3121 /* Save original offset (in case of failures later on). */
3122 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3123
3124 /* We will not go on indefinitely with call instruction handling. */
3125 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3126 {
3127 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3128 return VERR_PATCHING_REFUSED;
3129 }
3130
3131 pVM->patm.s.ulCallDepth++;
3132
3133#ifdef PATM_ENABLE_CALL
3134 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3135#endif
3136
3137 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3138
3139 pPatch->nrPatch2GuestRecs = 0;
3140 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3141 pPatch->uCurPatchOffset = 0;
3142
3143 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3144
3145 /** @note Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3146 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3147 if (RT_FAILURE(rc))
3148 goto failure;
3149
3150#ifdef VBOX_WITH_STATISTICS
3151 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3152 if (RT_FAILURE(rc))
3153 goto failure;
3154#endif
3155 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pPatch);
3156 if (rc != VINF_SUCCESS)
3157 {
3158 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3159 goto failure;
3160 }
3161
3162 //size of patch block
3163 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3164
3165 //update free pointer in patch memory
3166 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3167 /* Round to next 8 byte boundary. */
3168 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3169
3170 pPatch->uState = PATCH_ENABLED;
3171
3172 /*
3173 * Insert into patch to guest lookup tree
3174 */
3175 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3176 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3177 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3178 AssertMsg(rc, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3179 if (!rc)
3180 {
3181 rc = VERR_PATCHING_REFUSED;
3182 goto failure;
3183 }
3184
3185 /* Note that patmr3SetBranchTargets can install additional patches!! */
3186 rc = patmr3SetBranchTargets(pVM, pPatch);
3187 if (rc != VINF_SUCCESS)
3188 {
3189 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3190 goto failure;
3191 }
3192
3193#ifdef LOG_ENABLED
3194 Log(("Patch code ----------------------------------------------------------\n"));
3195 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
3196 Log(("Patch code ends -----------------------------------------------------\n"));
3197#endif
3198
3199 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3200
3201 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3202 pPatch->pTempInfo->nrIllegalInstr = 0;
3203
3204 pVM->patm.s.ulCallDepth--;
3205 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3206 return VINF_SUCCESS;
3207
3208failure:
3209 if (pPatchRec->CoreOffset.Key)
3210 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3211
3212 patmEmptyTree(pVM, &pPatch->FixupTree);
3213 pPatch->nrFixups = 0;
3214
3215 patmEmptyTree(pVM, &pPatch->JumpTree);
3216 pPatch->nrJumpRecs = 0;
3217
3218 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3219 pPatch->pTempInfo->nrIllegalInstr = 0;
3220
3221 /* Turn this cli patch into a dummy. */
3222 pPatch->uState = PATCH_REFUSED;
3223 pPatch->pPatchBlockOffset = 0;
3224
3225 // Give back the patch memory we no longer need
3226 Assert(orgOffsetPatchMem != (uint32_t)~0);
3227 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3228
3229 pVM->patm.s.ulCallDepth--;
3230 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3231 return rc;
3232}
3233
3234/**
3235 * Creates trampoline code to jump inside an existing patch
3236 *
3237 * @returns VBox status code.
3238 * @param pVM The VM to operate on.
3239 * @param pInstrGC Guest context point to privileged instruction
3240 * @param pPatchRec Patch record
3241 *
3242 */
3243static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3244{
3245 PPATCHINFO pPatch = &pPatchRec->patch;
3246 RTRCPTR pPage, pPatchTargetGC = 0;
3247 uint32_t orgOffsetPatchMem = ~0;
3248 int rc = VERR_PATCHING_REFUSED;
3249
3250 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3251 /* Save original offset (in case of failures later on). */
3252 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3253
3254 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3255 /** @todo we already checked this before */
3256 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3257
3258 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3259 if (pPatchPage)
3260 {
3261 uint32_t i;
3262
3263 for (i=0;i<pPatchPage->cCount;i++)
3264 {
3265 if (pPatchPage->aPatch[i])
3266 {
3267 PPATCHINFO pPatch2 = pPatchPage->aPatch[i];
3268
3269 if ( (pPatch2->flags & PATMFL_DUPLICATE_FUNCTION)
3270 && pPatch2->uState == PATCH_ENABLED)
3271 {
3272 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch2, pInstrGC);
3273 if (pPatchTargetGC)
3274 {
3275 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3276 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch2->Patch2GuestAddrTree, offsetPatch, false);
3277 Assert(pPatchToGuestRec);
3278
3279 pPatchToGuestRec->fJumpTarget = true;
3280 Assert(pPatchTargetGC != pPatch2->pPrivInstrGC);
3281 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv\n", pPatch2->pPrivInstrGC));
3282 pPatch2->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3283 break;
3284 }
3285 }
3286 }
3287 }
3288 }
3289 AssertReturn(pPatchPage && pPatchTargetGC, VERR_PATCHING_REFUSED);
3290
3291 pPatch->nrPatch2GuestRecs = 0;
3292 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3293 pPatch->uCurPatchOffset = 0;
3294
3295 /** @note Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3296 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3297 if (RT_FAILURE(rc))
3298 goto failure;
3299
3300#ifdef VBOX_WITH_STATISTICS
3301 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3302 if (RT_FAILURE(rc))
3303 goto failure;
3304#endif
3305
3306 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3307 if (RT_FAILURE(rc))
3308 goto failure;
3309
3310 /*
3311 * Insert into patch to guest lookup tree
3312 */
3313 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3314 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3315 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3316 AssertMsg(rc, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3317 if (!rc)
3318 {
3319 rc = VERR_PATCHING_REFUSED;
3320 goto failure;
3321 }
3322
3323 /* size of patch block */
3324 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3325
3326 /* Update free pointer in patch memory. */
3327 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3328 /* Round to next 8 byte boundary */
3329 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3330
3331 /* There's no jump from guest to patch code. */
3332 pPatch->cbPatchJump = 0;
3333
3334 /* Enable the patch. */
3335 pPatch->uState = PATCH_ENABLED;
3336 /* We allow this patch to be called as a function. */
3337 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3338 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3339 return VINF_SUCCESS;
3340
3341failure:
3342 if (pPatchRec->CoreOffset.Key)
3343 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3344
3345 patmEmptyTree(pVM, &pPatch->FixupTree);
3346 pPatch->nrFixups = 0;
3347
3348 patmEmptyTree(pVM, &pPatch->JumpTree);
3349 pPatch->nrJumpRecs = 0;
3350
3351 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3352 pPatch->pTempInfo->nrIllegalInstr = 0;
3353
3354 /* Turn this cli patch into a dummy. */
3355 pPatch->uState = PATCH_REFUSED;
3356 pPatch->pPatchBlockOffset = 0;
3357
3358 // Give back the patch memory we no longer need
3359 Assert(orgOffsetPatchMem != (uint32_t)~0);
3360 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3361
3362 return rc;
3363}
3364
3365
3366/**
3367 * Patch branch target function for call/jump at specified location.
3368 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3369 *
3370 * @returns VBox status code.
3371 * @param pVM The VM to operate on.
3372 * @param pCtx Guest context
3373 *
3374 */
3375VMMR3DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3376{
3377 RTRCPTR pBranchTarget, pPage;
3378 int rc;
3379 RTRCPTR pPatchTargetGC = 0;
3380
3381 pBranchTarget = pCtx->edx;
3382 pBranchTarget = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3383
3384 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3385 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3386
3387 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3388 if (pPatchPage)
3389 {
3390 uint32_t i;
3391
3392 for (i=0;i<pPatchPage->cCount;i++)
3393 {
3394 if (pPatchPage->aPatch[i])
3395 {
3396 PPATCHINFO pPatch = pPatchPage->aPatch[i];
3397
3398 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3399 && pPatch->uState == PATCH_ENABLED)
3400 {
3401 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3402 if (pPatchTargetGC)
3403 {
3404 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3405 break;
3406 }
3407 }
3408 }
3409 }
3410 }
3411
3412 if (pPatchTargetGC)
3413 {
3414 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3415 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3416 }
3417 else
3418 {
3419 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3420 }
3421
3422 if (rc == VINF_SUCCESS)
3423 {
3424 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3425 Assert(pPatchTargetGC);
3426 }
3427
3428 if (pPatchTargetGC)
3429 {
3430 pCtx->eax = pPatchTargetGC;
3431 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3432 }
3433 else
3434 {
3435 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3436 pCtx->eax = 0;
3437 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3438 }
3439 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3440 rc = PATMAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3441 AssertRC(rc);
3442
3443 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3444 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3445 return VINF_SUCCESS;
3446}
3447
3448/**
3449 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3450 *
3451 * @returns VBox status code.
3452 * @param pVM The VM to operate on.
3453 * @param pCpu Disassembly CPU structure ptr
3454 * @param pInstrGC Guest context point to privileged instruction
3455 * @param pPatch Patch record
3456 *
3457 */
3458static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3459{
3460 int rc = VERR_PATCHING_REFUSED;
3461 DISCPUSTATE cpu;
3462 RTRCPTR pTargetGC;
3463 PPATMPATCHREC pPatchFunction;
3464 uint32_t opsize;
3465 bool disret;
3466#ifdef LOG_ENABLED
3467 char szOutput[256];
3468#endif
3469
3470 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3471 Assert((pCpu->pCurInstr->opcode == OP_CALL || pCpu->pCurInstr->opcode == OP_JMP) && pCpu->opsize == SIZEOF_NEARJUMP32);
3472
3473 if ((pCpu->pCurInstr->opcode != OP_CALL && pCpu->pCurInstr->opcode != OP_JMP) || pCpu->opsize != SIZEOF_NEARJUMP32)
3474 {
3475 rc = VERR_PATCHING_REFUSED;
3476 goto failure;
3477 }
3478
3479 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3480 if (pTargetGC == 0)
3481 {
3482 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
3483 rc = VERR_PATCHING_REFUSED;
3484 goto failure;
3485 }
3486
3487 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3488 if (pPatchFunction == NULL)
3489 {
3490 for(;;)
3491 {
3492 /* It could be an indirect call (call -> jmp dest).
3493 * Note that it's dangerous to assume the jump will never change...
3494 */
3495 uint8_t *pTmpInstrHC;
3496
3497 pTmpInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pTargetGC);
3498 Assert(pTmpInstrHC);
3499 if (pTmpInstrHC == 0)
3500 break;
3501
3502 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3503 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pTargetGC, pTmpInstrHC, &opsize, NULL);
3504 if (disret == false || cpu.pCurInstr->opcode != OP_JMP)
3505 break;
3506
3507 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3508 if (pTargetGC == 0)
3509 {
3510 break;
3511 }
3512
3513 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3514 break;
3515 }
3516 if (pPatchFunction == 0)
3517 {
3518 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3519 rc = VERR_PATCHING_REFUSED;
3520 goto failure;
3521 }
3522 }
3523
3524 // make a copy of the guest code bytes that will be overwritten
3525 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3526
3527 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3528 AssertRC(rc);
3529
3530 /* Now replace the original call in the guest code */
3531 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), true);
3532 AssertRC(rc);
3533 if (RT_FAILURE(rc))
3534 goto failure;
3535
3536 /* Lowest and highest address for write monitoring. */
3537 pPatch->pInstrGCLowest = pInstrGC;
3538 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3539
3540#ifdef LOG_ENABLED
3541 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3542 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3543 Log(("Call patch: %s", szOutput));
3544#endif
3545
3546 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3547
3548 pPatch->uState = PATCH_ENABLED;
3549 return VINF_SUCCESS;
3550
3551failure:
3552 /* Turn this patch into a dummy. */
3553 pPatch->uState = PATCH_REFUSED;
3554
3555 return rc;
3556}
3557
3558/**
3559 * Replace the address in an MMIO instruction with the cached version.
3560 *
3561 * @returns VBox status code.
3562 * @param pVM The VM to operate on.
3563 * @param pInstrGC Guest context point to privileged instruction
3564 * @param pCpu Disassembly CPU structure ptr
3565 * @param pPatch Patch record
3566 *
3567 * @note returns failure if patching is not allowed or possible
3568 *
3569 */
3570static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3571{
3572 uint8_t *pPB;
3573 int rc = VERR_PATCHING_REFUSED;
3574#ifdef LOG_ENABLED
3575 DISCPUSTATE cpu;
3576 uint32_t opsize;
3577 bool disret;
3578 char szOutput[256];
3579#endif
3580
3581 Assert(pVM->patm.s.mmio.pCachedData);
3582 if (!pVM->patm.s.mmio.pCachedData)
3583 goto failure;
3584
3585 if (pCpu->param2.flags != USE_DISPLACEMENT32)
3586 goto failure;
3587
3588 pPB = pPatch->pPrivInstrHC;
3589
3590 /* Add relocation record for cached data access. */
3591 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3592 {
3593 Log(("Relocation failed for cached mmio address!!\n"));
3594 return VERR_PATCHING_REFUSED;
3595 }
3596#ifdef LOG_ENABLED
3597 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3598 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3599 Log(("MMIO patch old instruction: %s", szOutput));
3600#endif
3601
3602 /* Save original instruction. */
3603 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3604 AssertRC(rc);
3605
3606 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3607
3608 /* Replace address with that of the cached item. */
3609 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->opsize - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3610 AssertRC(rc);
3611 if (RT_FAILURE(rc))
3612 {
3613 goto failure;
3614 }
3615
3616#ifdef LOG_ENABLED
3617 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3618 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3619 Log(("MMIO patch: %s", szOutput));
3620#endif
3621 pVM->patm.s.mmio.pCachedData = 0;
3622 pVM->patm.s.mmio.GCPhys = 0;
3623 pPatch->uState = PATCH_ENABLED;
3624 return VINF_SUCCESS;
3625
3626failure:
3627 /* Turn this patch into a dummy. */
3628 pPatch->uState = PATCH_REFUSED;
3629
3630 return rc;
3631}
3632
3633
3634/**
3635 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3636 *
3637 * @returns VBox status code.
3638 * @param pVM The VM to operate on.
3639 * @param pInstrGC Guest context point to privileged instruction
3640 * @param pPatch Patch record
3641 *
3642 * @note returns failure if patching is not allowed or possible
3643 *
3644 */
3645static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3646{
3647 DISCPUSTATE cpu;
3648 uint32_t opsize;
3649 bool disret;
3650 uint8_t *pInstrHC;
3651#ifdef LOG_ENABLED
3652 char szOutput[256];
3653#endif
3654
3655 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3656
3657 /* Convert GC to HC address. */
3658 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3659 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3660
3661 /* Disassemble mmio instruction. */
3662 cpu.mode = pPatch->uOpMode;
3663 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
3664 if (disret == false)
3665 {
3666 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3667 return VERR_PATCHING_REFUSED;
3668 }
3669
3670 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
3671 if (opsize > MAX_INSTR_SIZE)
3672 return VERR_PATCHING_REFUSED;
3673 if (cpu.param2.flags != USE_DISPLACEMENT32)
3674 return VERR_PATCHING_REFUSED;
3675
3676 /* Add relocation record for cached data access. */
3677 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3678 {
3679 Log(("Relocation failed for cached mmio address!!\n"));
3680 return VERR_PATCHING_REFUSED;
3681 }
3682 /* Replace address with that of the cached item. */
3683 *(RTRCPTR *)&pInstrHC[cpu.opsize - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3684
3685 /* Lowest and highest address for write monitoring. */
3686 pPatch->pInstrGCLowest = pInstrGC;
3687 pPatch->pInstrGCHighest = pInstrGC + cpu.opsize;
3688
3689#ifdef LOG_ENABLED
3690 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3691 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3692 Log(("MMIO patch: %s", szOutput));
3693#endif
3694
3695 pVM->patm.s.mmio.pCachedData = 0;
3696 pVM->patm.s.mmio.GCPhys = 0;
3697 return VINF_SUCCESS;
3698}
3699
3700/**
3701 * Activates an int3 patch
3702 *
3703 * @returns VBox status code.
3704 * @param pVM The VM to operate on.
3705 * @param pPatch Patch record
3706 */
3707static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3708{
3709 uint8_t ASMInt3 = 0xCC;
3710 int rc;
3711
3712 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3713 Assert(pPatch->uState != PATCH_ENABLED);
3714
3715 /* Replace first opcode byte with 'int 3'. */
3716 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &ASMInt3, sizeof(ASMInt3));
3717 AssertRC(rc);
3718
3719 pPatch->cbPatchJump = sizeof(ASMInt3);
3720
3721 return rc;
3722}
3723
3724/**
3725 * Deactivates an int3 patch
3726 *
3727 * @returns VBox status code.
3728 * @param pVM The VM to operate on.
3729 * @param pPatch Patch record
3730 */
3731static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3732{
3733 uint8_t ASMInt3 = 0xCC;
3734 int rc;
3735
3736 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3737 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3738
3739 /* Restore first opcode byte. */
3740 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3741 AssertRC(rc);
3742 return rc;
3743}
3744
3745/**
3746 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically in the guest context.
3747 *
3748 * @returns VBox status code.
3749 * @param pVM The VM to operate on.
3750 * @param pInstrGC Guest context point to privileged instruction
3751 * @param pInstrHC Host context point to privileged instruction
3752 * @param pCpu Disassembly CPU structure ptr
3753 * @param pPatch Patch record
3754 *
3755 * @note returns failure if patching is not allowed or possible
3756 *
3757 */
3758VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3759{
3760 uint8_t ASMInt3 = 0xCC;
3761 int rc;
3762
3763 /** @note Do not use patch memory here! It might called during patch installation too. */
3764
3765#ifdef LOG_ENABLED
3766 DISCPUSTATE cpu;
3767 char szOutput[256];
3768 uint32_t opsize;
3769
3770 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3771 PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3772 Log(("PATMR3PatchInstrInt3: %s", szOutput));
3773#endif
3774
3775 /* Save the original instruction. */
3776 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3777 AssertRC(rc);
3778 pPatch->cbPatchJump = sizeof(ASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3779
3780 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3781
3782 /* Replace first opcode byte with 'int 3'. */
3783 rc = patmActivateInt3Patch(pVM, pPatch);
3784 if (RT_FAILURE(rc))
3785 goto failure;
3786
3787 /* Lowest and highest address for write monitoring. */
3788 pPatch->pInstrGCLowest = pInstrGC;
3789 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3790
3791 pPatch->uState = PATCH_ENABLED;
3792 return VINF_SUCCESS;
3793
3794failure:
3795 /* Turn this patch into a dummy. */
3796 return VERR_PATCHING_REFUSED;
3797}
3798
3799#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3800/**
3801 * Patch a jump instruction at specified location
3802 *
3803 * @returns VBox status code.
3804 * @param pVM The VM to operate on.
3805 * @param pInstrGC Guest context point to privileged instruction
3806 * @param pInstrHC Host context point to privileged instruction
3807 * @param pCpu Disassembly CPU structure ptr
3808 * @param pPatchRec Patch record
3809 *
3810 * @note returns failure if patching is not allowed or possible
3811 *
3812 */
3813int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3814{
3815 PPATCHINFO pPatch = &pPatchRec->patch;
3816 int rc = VERR_PATCHING_REFUSED;
3817#ifdef LOG_ENABLED
3818 bool disret;
3819 DISCPUSTATE cpu;
3820 uint32_t opsize;
3821 char szOutput[256];
3822#endif
3823
3824 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3825 pPatch->uCurPatchOffset = 0;
3826 pPatch->cbPatchBlockSize = 0;
3827 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3828
3829 /*
3830 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3831 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3832 */
3833 switch (pCpu->pCurInstr->opcode)
3834 {
3835 case OP_JO:
3836 case OP_JNO:
3837 case OP_JC:
3838 case OP_JNC:
3839 case OP_JE:
3840 case OP_JNE:
3841 case OP_JBE:
3842 case OP_JNBE:
3843 case OP_JS:
3844 case OP_JNS:
3845 case OP_JP:
3846 case OP_JNP:
3847 case OP_JL:
3848 case OP_JNL:
3849 case OP_JLE:
3850 case OP_JNLE:
3851 case OP_JMP:
3852 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3853 Assert(pCpu->param1.flags & USE_IMMEDIATE32_REL);
3854 if (!(pCpu->param1.flags & USE_IMMEDIATE32_REL))
3855 goto failure;
3856
3857 Assert(pCpu->opsize == SIZEOF_NEARJUMP32 || pCpu->opsize == SIZEOF_NEAR_COND_JUMP32);
3858 if (pCpu->opsize != SIZEOF_NEARJUMP32 && pCpu->opsize != SIZEOF_NEAR_COND_JUMP32)
3859 goto failure;
3860
3861 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->opsize))
3862 {
3863 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
3864 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
3865 rc = VERR_PATCHING_REFUSED;
3866 goto failure;
3867 }
3868
3869 break;
3870
3871 default:
3872 goto failure;
3873 }
3874
3875 // make a copy of the guest code bytes that will be overwritten
3876 Assert(pCpu->opsize <= sizeof(pPatch->aPrivInstr));
3877 Assert(pCpu->opsize >= SIZEOF_NEARJUMP32);
3878 pPatch->cbPatchJump = pCpu->opsize;
3879
3880 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3881 AssertRC(rc);
3882
3883 /* Now insert a jump in the guest code. */
3884 /*
3885 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
3886 * references the target instruction in the conflict patch.
3887 */
3888 RTRCPTR pJmpDest = PATMR3GuestGCPtrToPatchGCPtr(pVM, pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval);
3889
3890 AssertMsg(pJmpDest, ("PATMR3GuestGCPtrToPatchGCPtr failed for %RRv\n", pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval));
3891 pPatch->pPatchJumpDestGC = pJmpDest;
3892
3893 rc = patmGenJumpToPatch(pVM, pPatch, true);
3894 AssertRC(rc);
3895 if (RT_FAILURE(rc))
3896 goto failure;
3897
3898 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
3899
3900#ifdef LOG_ENABLED
3901 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3902 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3903 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
3904#endif
3905
3906 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3907
3908 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
3909
3910 /* Lowest and highest address for write monitoring. */
3911 pPatch->pInstrGCLowest = pInstrGC;
3912 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
3913
3914 pPatch->uState = PATCH_ENABLED;
3915 return VINF_SUCCESS;
3916
3917failure:
3918 /* Turn this cli patch into a dummy. */
3919 pPatch->uState = PATCH_REFUSED;
3920
3921 return rc;
3922}
3923#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
3924
3925
3926/**
3927 * Gives hint to PATM about supervisor guest instructions
3928 *
3929 * @returns VBox status code.
3930 * @param pVM The VM to operate on.
3931 * @param pInstr Guest context point to privileged instruction
3932 * @param flags Patch flags
3933 */
3934VMMR3DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
3935{
3936 Assert(pInstrGC);
3937 Assert(flags == PATMFL_CODE32);
3938
3939 Log(("PATMR3AddHint %RRv\n", pInstrGC));
3940 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
3941}
3942
3943/**
3944 * Patch privileged instruction at specified location
3945 *
3946 * @returns VBox status code.
3947 * @param pVM The VM to operate on.
3948 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
3949 * @param flags Patch flags
3950 *
3951 * @note returns failure if patching is not allowed or possible
3952 */
3953VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
3954{
3955 DISCPUSTATE cpu;
3956 R3PTRTYPE(uint8_t *) pInstrHC;
3957 uint32_t opsize;
3958 PPATMPATCHREC pPatchRec;
3959 PCPUMCTX pCtx = 0;
3960 bool disret;
3961 int rc;
3962 PVMCPU pVCpu = VMMGetCpu0(pVM);
3963
3964 if (!pVM || pInstrGC == 0 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
3965 {
3966 AssertFailed();
3967 return VERR_INVALID_PARAMETER;
3968 }
3969
3970 if (PATMIsEnabled(pVM) == false)
3971 return VERR_PATCHING_REFUSED;
3972
3973 /* Test for patch conflict only with patches that actually change guest code. */
3974 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
3975 {
3976 PPATCHINFO pConflictPatch = PATMFindActivePatchByEntrypoint(pVM, pInstrGC);
3977 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
3978 if (pConflictPatch != 0)
3979 return VERR_PATCHING_REFUSED;
3980 }
3981
3982 if (!(flags & PATMFL_CODE32))
3983 {
3984 /** @todo Only 32 bits code right now */
3985 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
3986 return VERR_NOT_IMPLEMENTED;
3987 }
3988
3989 /* We ran out of patch memory; don't bother anymore. */
3990 if (pVM->patm.s.fOutOfMemory == true)
3991 return VERR_PATCHING_REFUSED;
3992
3993 /* Make sure the code selector is wide open; otherwise refuse. */
3994 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
3995 if (CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0)
3996 {
3997 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
3998 if (pInstrGCFlat != pInstrGC)
3999 {
4000 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4001 return VERR_PATCHING_REFUSED;
4002 }
4003 }
4004
4005 /** @note the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4006 if (!(flags & PATMFL_GUEST_SPECIFIC))
4007 {
4008 /* New code. Make sure CSAM has a go at it first. */
4009 CSAMR3CheckCode(pVM, pInstrGC);
4010 }
4011
4012 /** @note obsolete */
4013 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4014 && (flags & PATMFL_MMIO_ACCESS))
4015 {
4016 RTRCUINTPTR offset;
4017 void *pvPatchCoreOffset;
4018
4019 /* Find the patch record. */
4020 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4021 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4022 if (pvPatchCoreOffset == NULL)
4023 {
4024 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4025 return VERR_PATCH_NOT_FOUND; //fatal error
4026 }
4027 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4028
4029 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4030 }
4031
4032 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4033
4034 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4035 if (pPatchRec)
4036 {
4037 Assert(!(flags & PATMFL_TRAMPOLINE));
4038
4039 /* Hints about existing patches are ignored. */
4040 if (flags & PATMFL_INSTR_HINT)
4041 return VERR_PATCHING_REFUSED;
4042
4043 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4044 {
4045 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4046 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4047 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4048 }
4049
4050 if (pPatchRec->patch.uState == PATCH_DISABLED)
4051 {
4052 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4053 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4054 {
4055 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4056 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4057 }
4058 else
4059 Log(("Enabling patch %RRv again\n", pInstrGC));
4060
4061 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4062 rc = PATMR3EnablePatch(pVM, pInstrGC);
4063 if (RT_SUCCESS(rc))
4064 return VWRN_PATCH_ENABLED;
4065
4066 return rc;
4067 }
4068 if ( pPatchRec->patch.uState == PATCH_ENABLED
4069 || pPatchRec->patch.uState == PATCH_DIRTY)
4070 {
4071 /*
4072 * The patch might have been overwritten.
4073 */
4074 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4075 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4076 {
4077 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4078 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4079 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4080 {
4081 if (flags & PATMFL_IDTHANDLER)
4082 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4083
4084 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4085 }
4086 }
4087 rc = PATMR3RemovePatch(pVM, pInstrGC);
4088 if (RT_FAILURE(rc))
4089 return VERR_PATCHING_REFUSED;
4090 }
4091 else
4092 {
4093 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4094 /* already tried it once! */
4095 return VERR_PATCHING_REFUSED;
4096 }
4097 }
4098
4099 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4100 if (RT_FAILURE(rc))
4101 {
4102 Log(("Out of memory!!!!\n"));
4103 return VERR_NO_MEMORY;
4104 }
4105 pPatchRec->Core.Key = pInstrGC;
4106 pPatchRec->patch.uState = PATCH_REFUSED; //default
4107 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4108 Assert(rc);
4109
4110 RTGCPHYS GCPhys;
4111 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4112 if (rc != VINF_SUCCESS)
4113 {
4114 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4115 return rc;
4116 }
4117 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4118 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4119 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4120 {
4121 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4122 return VERR_PATCHING_REFUSED;
4123 }
4124 GCPhys = GCPhys + (pInstrGC & PAGE_OFFSET_MASK);
4125 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, MAX_INSTR_SIZE, (void **)&pInstrHC);
4126 AssertRCReturn(rc, rc);
4127
4128 pPatchRec->patch.pPrivInstrHC = pInstrHC;
4129 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4130 pPatchRec->patch.flags = flags;
4131 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4132
4133 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4134 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4135
4136 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4137 {
4138 /*
4139 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4140 */
4141 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4142 if (pPatchNear)
4143 {
4144 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4145 {
4146 Log(("Dangerous patch; would overwrite the ususable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4147
4148 pPatchRec->patch.uState = PATCH_UNUSABLE;
4149 /*
4150 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4151 */
4152 return VERR_PATCHING_REFUSED;
4153 }
4154 }
4155 }
4156
4157 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4158 if (pPatchRec->patch.pTempInfo == 0)
4159 {
4160 Log(("Out of memory!!!!\n"));
4161 return VERR_NO_MEMORY;
4162 }
4163
4164 cpu.mode = pPatchRec->patch.uOpMode;
4165 disret = PATMR3DISInstr(pVM, &pPatchRec->patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
4166 if (disret == false)
4167 {
4168 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4169 return VERR_PATCHING_REFUSED;
4170 }
4171
4172 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
4173 if (opsize > MAX_INSTR_SIZE)
4174 {
4175 return VERR_PATCHING_REFUSED;
4176 }
4177
4178 pPatchRec->patch.cbPrivInstr = opsize;
4179 pPatchRec->patch.opcode = cpu.pCurInstr->opcode;
4180
4181 /* Restricted hinting for now. */
4182 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->opcode == OP_CLI);
4183
4184 /* Allocate statistics slot */
4185 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4186 {
4187 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4188 }
4189 else
4190 {
4191 Log(("WARNING: Patch index wrap around!!\n"));
4192 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4193 }
4194
4195 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4196 {
4197 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec);
4198 }
4199 else
4200 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4201 {
4202 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec);
4203 }
4204 else
4205 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4206 {
4207 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4208 }
4209 else
4210 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4211 {
4212 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &pPatchRec->patch);
4213 }
4214 else
4215 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4216 {
4217 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4218 }
4219 else
4220 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4221 {
4222 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &pPatchRec->patch);
4223 }
4224 else
4225 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4226 {
4227 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4228 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4229
4230 rc = patmIdtHandler(pVM, pInstrGC, pInstrHC, opsize, pPatchRec);
4231#ifdef VBOX_WITH_STATISTICS
4232 if ( rc == VINF_SUCCESS
4233 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4234 {
4235 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4236 }
4237#endif
4238 }
4239 else
4240 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4241 {
4242 switch (cpu.pCurInstr->opcode)
4243 {
4244 case OP_SYSENTER:
4245 case OP_PUSH:
4246 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4247 if (rc == VINF_SUCCESS)
4248 {
4249 if (rc == VINF_SUCCESS)
4250 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4251 return rc;
4252 }
4253 break;
4254
4255 default:
4256 rc = VERR_NOT_IMPLEMENTED;
4257 break;
4258 }
4259 }
4260 else
4261 {
4262 switch (cpu.pCurInstr->opcode)
4263 {
4264 case OP_SYSENTER:
4265 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4266 if (rc == VINF_SUCCESS)
4267 {
4268 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4269 return VINF_SUCCESS;
4270 }
4271 break;
4272
4273#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4274 case OP_JO:
4275 case OP_JNO:
4276 case OP_JC:
4277 case OP_JNC:
4278 case OP_JE:
4279 case OP_JNE:
4280 case OP_JBE:
4281 case OP_JNBE:
4282 case OP_JS:
4283 case OP_JNS:
4284 case OP_JP:
4285 case OP_JNP:
4286 case OP_JL:
4287 case OP_JNL:
4288 case OP_JLE:
4289 case OP_JNLE:
4290 case OP_JECXZ:
4291 case OP_LOOP:
4292 case OP_LOOPNE:
4293 case OP_LOOPE:
4294 case OP_JMP:
4295 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4296 {
4297 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4298 break;
4299 }
4300 return VERR_NOT_IMPLEMENTED;
4301#endif
4302
4303 case OP_PUSHF:
4304 case OP_CLI:
4305 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4306 rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->opcode, opsize, pPatchRec);
4307 break;
4308
4309 case OP_STR:
4310 case OP_SGDT:
4311 case OP_SLDT:
4312 case OP_SIDT:
4313 case OP_CPUID:
4314 case OP_LSL:
4315 case OP_LAR:
4316 case OP_SMSW:
4317 case OP_VERW:
4318 case OP_VERR:
4319 case OP_IRET:
4320 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4321 break;
4322
4323 default:
4324 return VERR_NOT_IMPLEMENTED;
4325 }
4326 }
4327
4328 if (rc != VINF_SUCCESS)
4329 {
4330 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4331 {
4332 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4333 pPatchRec->patch.nrPatch2GuestRecs = 0;
4334 }
4335 pVM->patm.s.uCurrentPatchIdx--;
4336 }
4337 else
4338 {
4339 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4340 AssertRCReturn(rc, rc);
4341
4342 /* Keep track upper and lower boundaries of patched instructions */
4343 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4344 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4345 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4346 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4347
4348 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4349 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4350
4351 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4352 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4353
4354 rc = VINF_SUCCESS;
4355
4356 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4357 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4358 {
4359 rc = PATMR3DisablePatch(pVM, pInstrGC);
4360 AssertRCReturn(rc, rc);
4361 }
4362
4363#ifdef VBOX_WITH_STATISTICS
4364 /* Register statistics counter */
4365 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4366 {
4367 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4368 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4369#ifndef DEBUG_sandervl
4370 /* Full breakdown for the GUI. */
4371 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4372 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4373 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4374 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4375 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4376 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4377 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4378 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4379 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4380 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4381 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4382 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4383 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4384 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4385 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4386 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4387#endif
4388 }
4389#endif
4390 }
4391 return rc;
4392}
4393
4394/**
4395 * Query instruction size
4396 *
4397 * @returns VBox status code.
4398 * @param pVM The VM to operate on.
4399 * @param pPatch Patch record
4400 * @param pInstrGC Instruction address
4401 */
4402static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4403{
4404 uint8_t *pInstrHC;
4405
4406 int rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pInstrGC, (PRTR3PTR)&pInstrHC);
4407 if (rc == VINF_SUCCESS)
4408 {
4409 DISCPUSTATE cpu;
4410 bool disret;
4411 uint32_t opsize;
4412
4413 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4414 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL, PATMREAD_ORGCODE | PATMREAD_NOCHECK);
4415 if (disret)
4416 return opsize;
4417 }
4418 return 0;
4419}
4420
4421/**
4422 * Add patch to page record
4423 *
4424 * @returns VBox status code.
4425 * @param pVM The VM to operate on.
4426 * @param pPage Page address
4427 * @param pPatch Patch record
4428 */
4429int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4430{
4431 PPATMPATCHPAGE pPatchPage;
4432 int rc;
4433
4434 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4435
4436 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4437 if (pPatchPage)
4438 {
4439 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4440 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4441 {
4442 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4443 PPATCHINFO *paPatchOld = pPatchPage->aPatch;
4444
4445 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4446 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4447 if (RT_FAILURE(rc))
4448 {
4449 Log(("Out of memory!!!!\n"));
4450 return VERR_NO_MEMORY;
4451 }
4452 memcpy(pPatchPage->aPatch, paPatchOld, cMaxPatchesOld*sizeof(PPATCHINFO));
4453 MMHyperFree(pVM, paPatchOld);
4454 }
4455 pPatchPage->aPatch[pPatchPage->cCount] = pPatch;
4456 pPatchPage->cCount++;
4457 }
4458 else
4459 {
4460 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4461 if (RT_FAILURE(rc))
4462 {
4463 Log(("Out of memory!!!!\n"));
4464 return VERR_NO_MEMORY;
4465 }
4466 pPatchPage->Core.Key = pPage;
4467 pPatchPage->cCount = 1;
4468 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4469
4470 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4471 if (RT_FAILURE(rc))
4472 {
4473 Log(("Out of memory!!!!\n"));
4474 MMHyperFree(pVM, pPatchPage);
4475 return VERR_NO_MEMORY;
4476 }
4477 pPatchPage->aPatch[0] = pPatch;
4478
4479 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4480 Assert(rc);
4481 pVM->patm.s.cPageRecords++;
4482
4483 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4484 }
4485 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4486
4487 /* Get the closest guest instruction (from below) */
4488 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4489 Assert(pGuestToPatchRec);
4490 if (pGuestToPatchRec)
4491 {
4492 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4493 if ( pPatchPage->pLowestAddrGC == 0
4494 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4495 {
4496 RTRCUINTPTR offset;
4497
4498 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4499
4500 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4501 /* If we're too close to the page boundary, then make sure an instruction from the previous page doesn't cross the boundary itself. */
4502 if (offset && offset < MAX_INSTR_SIZE)
4503 {
4504 /* Get the closest guest instruction (from above) */
4505 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4506
4507 if (pGuestToPatchRec)
4508 {
4509 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4510 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4511 {
4512 pPatchPage->pLowestAddrGC = pPage;
4513 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4514 }
4515 }
4516 }
4517 }
4518 }
4519
4520 /* Get the closest guest instruction (from above) */
4521 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4522 Assert(pGuestToPatchRec);
4523 if (pGuestToPatchRec)
4524 {
4525 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4526 if ( pPatchPage->pHighestAddrGC == 0
4527 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4528 {
4529 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4530 /* Increase by instruction size. */
4531 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4532//// Assert(size);
4533 pPatchPage->pHighestAddrGC += size;
4534 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4535 }
4536 }
4537
4538 return VINF_SUCCESS;
4539}
4540
4541/**
4542 * Remove patch from page record
4543 *
4544 * @returns VBox status code.
4545 * @param pVM The VM to operate on.
4546 * @param pPage Page address
4547 * @param pPatch Patch record
4548 */
4549int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4550{
4551 PPATMPATCHPAGE pPatchPage;
4552 int rc;
4553
4554 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4555 Assert(pPatchPage);
4556
4557 if (!pPatchPage)
4558 return VERR_INVALID_PARAMETER;
4559
4560 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4561
4562 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4563 if (pPatchPage->cCount > 1)
4564 {
4565 uint32_t i;
4566
4567 /* Used by multiple patches */
4568 for (i=0;i<pPatchPage->cCount;i++)
4569 {
4570 if (pPatchPage->aPatch[i] == pPatch)
4571 {
4572 pPatchPage->aPatch[i] = 0;
4573 break;
4574 }
4575 }
4576 /* close the gap between the remaining pointers. */
4577 if (i < pPatchPage->cCount - 1)
4578 {
4579 memcpy(&pPatchPage->aPatch[i], &pPatchPage->aPatch[i+1], sizeof(PPATCHINFO)*(pPatchPage->cCount - (i+1)));
4580 }
4581 AssertMsg(i < pPatchPage->cCount, ("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4582
4583 pPatchPage->cCount--;
4584 }
4585 else
4586 {
4587 PPATMPATCHPAGE pPatchNode;
4588
4589 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4590
4591 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4592 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4593 Assert(pPatchNode && pPatchNode == pPatchPage);
4594
4595 Assert(pPatchPage->aPatch);
4596 rc = MMHyperFree(pVM, pPatchPage->aPatch);
4597 AssertRC(rc);
4598 rc = MMHyperFree(pVM, pPatchPage);
4599 AssertRC(rc);
4600 pVM->patm.s.cPageRecords--;
4601 }
4602 return VINF_SUCCESS;
4603}
4604
4605/**
4606 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4607 *
4608 * @returns VBox status code.
4609 * @param pVM The VM to operate on.
4610 * @param pPatch Patch record
4611 */
4612int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4613{
4614 int rc;
4615 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4616
4617 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4618 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4619 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4620
4621 /** @todo optimize better (large gaps between current and next used page) */
4622 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4623 {
4624 /* Get the closest guest instruction (from above) */
4625 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4626 if ( pGuestToPatchRec
4627 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4628 )
4629 {
4630 /* Code in page really patched -> add record */
4631 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4632 AssertRC(rc);
4633 }
4634 }
4635 pPatch->flags |= PATMFL_CODE_MONITORED;
4636 return VINF_SUCCESS;
4637}
4638
4639/**
4640 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4641 *
4642 * @returns VBox status code.
4643 * @param pVM The VM to operate on.
4644 * @param pPatch Patch record
4645 */
4646int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4647{
4648 int rc;
4649 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4650
4651 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4652 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4653 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4654
4655 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4656 {
4657 /* Get the closest guest instruction (from above) */
4658 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4659 if ( pGuestToPatchRec
4660 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4661 )
4662 {
4663 /* Code in page really patched -> remove record */
4664 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4665 AssertRC(rc);
4666 }
4667 }
4668 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4669 return VINF_SUCCESS;
4670}
4671
4672/**
4673 * Notifies PATM about a (potential) write to code that has been patched.
4674 *
4675 * @returns VBox status code.
4676 * @param pVM The VM to operate on.
4677 * @param GCPtr GC pointer to write address
4678 * @param cbWrite Nr of bytes to write
4679 *
4680 */
4681VMMR3DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4682{
4683 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4684
4685 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4686
4687 Assert(VM_IS_EMT(pVM));
4688
4689 /* Quick boundary check */
4690 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4691 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4692 )
4693 return VINF_SUCCESS;
4694
4695 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4696
4697 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4698 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4699
4700 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4701 {
4702loop_start:
4703 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4704 if (pPatchPage)
4705 {
4706 uint32_t i;
4707 bool fValidPatchWrite = false;
4708
4709 /* Quick check to see if the write is in the patched part of the page */
4710 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4711 || pPatchPage->pHighestAddrGC < GCPtr)
4712 {
4713 break;
4714 }
4715
4716 for (i=0;i<pPatchPage->cCount;i++)
4717 {
4718 if (pPatchPage->aPatch[i])
4719 {
4720 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4721 RTRCPTR pPatchInstrGC;
4722 //unused: bool fForceBreak = false;
4723
4724 Assert(pPatchPage->aPatch[i]->flags & PATMFL_CODE_MONITORED);
4725 /** @todo inefficient and includes redundant checks for multiple pages. */
4726 for (uint32_t j=0; j<cbWrite; j++)
4727 {
4728 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4729
4730 if ( pPatch->cbPatchJump
4731 && pGuestPtrGC >= pPatch->pPrivInstrGC
4732 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4733 {
4734 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4735 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4736 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4737 if (rc == VINF_SUCCESS)
4738 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4739 goto loop_start;
4740
4741 continue;
4742 }
4743
4744 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4745 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4746 if (!pPatchInstrGC)
4747 {
4748 RTRCPTR pClosestInstrGC;
4749 uint32_t size;
4750
4751 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4752 if (pPatchInstrGC)
4753 {
4754 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4755 Assert(pClosestInstrGC <= pGuestPtrGC);
4756 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4757 /* Check if this is not a write into a gap between two patches */
4758 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4759 pPatchInstrGC = 0;
4760 }
4761 }
4762 if (pPatchInstrGC)
4763 {
4764 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4765
4766 fValidPatchWrite = true;
4767
4768 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4769 Assert(pPatchToGuestRec);
4770 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4771 {
4772 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4773
4774 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4775 {
4776 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4777
4778 PATMR3MarkDirtyPatch(pVM, pPatch);
4779
4780 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4781 goto loop_start;
4782 }
4783 else
4784 {
4785 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4786 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4787
4788 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4789 pPatchToGuestRec->fDirty = true;
4790
4791 *pInstrHC = 0xCC;
4792
4793 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4794 }
4795 }
4796 /* else already marked dirty */
4797 }
4798 }
4799 }
4800 } /* for each patch */
4801
4802 if (fValidPatchWrite == false)
4803 {
4804 /* Write to a part of the page that either:
4805 * - doesn't contain any code (shared code/data); rather unlikely
4806 * - old code page that's no longer in active use.
4807 */
4808invalid_write_loop_start:
4809 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4810
4811 if (pPatchPage)
4812 {
4813 for (i=0;i<pPatchPage->cCount;i++)
4814 {
4815 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4816
4817 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
4818 {
4819 /** @note possibly dangerous assumption that all future writes will be harmless. */
4820 if (pPatch->flags & PATMFL_IDTHANDLER)
4821 {
4822 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4823
4824 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
4825 int rc = patmRemovePatchPages(pVM, pPatch);
4826 AssertRC(rc);
4827 }
4828 else
4829 {
4830 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4831 PATMR3MarkDirtyPatch(pVM, pPatch);
4832 }
4833 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4834 goto invalid_write_loop_start;
4835 }
4836 } /* for */
4837 }
4838 }
4839 }
4840 }
4841 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
4842 return VINF_SUCCESS;
4843
4844}
4845
4846/**
4847 * Disable all patches in a flushed page
4848 *
4849 * @returns VBox status code
4850 * @param pVM The VM to operate on.
4851 * @param addr GC address of the page to flush
4852 */
4853/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
4854 */
4855VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
4856{
4857 addr &= PAGE_BASE_GC_MASK;
4858
4859 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
4860 if (pPatchPage)
4861 {
4862 int i;
4863
4864 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
4865 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
4866 {
4867 if (pPatchPage->aPatch[i])
4868 {
4869 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4870
4871 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
4872 PATMR3MarkDirtyPatch(pVM, pPatch);
4873 }
4874 }
4875 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
4876 }
4877 return VINF_SUCCESS;
4878}
4879
4880/**
4881 * Checks if the instructions at the specified address has been patched already.
4882 *
4883 * @returns boolean, patched or not
4884 * @param pVM The VM to operate on.
4885 * @param pInstrGC Guest context pointer to instruction
4886 */
4887VMMR3DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
4888{
4889 PPATMPATCHREC pPatchRec;
4890 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4891 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
4892 return true;
4893 return false;
4894}
4895
4896/**
4897 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
4898 *
4899 * @returns VBox status code.
4900 * @param pVM The VM to operate on.
4901 * @param pInstrGC GC address of instr
4902 * @param pByte opcode byte pointer (OUT)
4903 *
4904 */
4905VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
4906{
4907 PPATMPATCHREC pPatchRec;
4908
4909 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
4910
4911 /* Shortcut. */
4912 if ( !PATMIsEnabled(pVM)
4913 || pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
4914 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
4915 {
4916 return VERR_PATCH_NOT_FOUND;
4917 }
4918
4919 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
4920 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
4921 if ( pPatchRec
4922 && pPatchRec->patch.uState == PATCH_ENABLED
4923 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
4924 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
4925 {
4926 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
4927 *pByte = pPatchRec->patch.aPrivInstr[offset];
4928
4929 if (pPatchRec->patch.cbPatchJump == 1)
4930 {
4931 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
4932 }
4933 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
4934 return VINF_SUCCESS;
4935 }
4936 return VERR_PATCH_NOT_FOUND;
4937}
4938
4939/**
4940 * Disable patch for privileged instruction at specified location
4941 *
4942 * @returns VBox status code.
4943 * @param pVM The VM to operate on.
4944 * @param pInstr Guest context point to privileged instruction
4945 *
4946 * @note returns failure if patching is not allowed or possible
4947 *
4948 */
4949VMMR3DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
4950{
4951 PPATMPATCHREC pPatchRec;
4952 PPATCHINFO pPatch;
4953
4954 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
4955 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4956 if (pPatchRec)
4957 {
4958 int rc = VINF_SUCCESS;
4959
4960 pPatch = &pPatchRec->patch;
4961
4962 /* Already disabled? */
4963 if (pPatch->uState == PATCH_DISABLED)
4964 return VINF_SUCCESS;
4965
4966 /* Clear the IDT entries for the patch we're disabling. */
4967 /** @note very important as we clear IF in the patch itself */
4968 /** @todo this needs to be changed */
4969 if (pPatch->flags & PATMFL_IDTHANDLER)
4970 {
4971 uint32_t iGate;
4972
4973 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
4974 if (iGate != (uint32_t)~0)
4975 {
4976 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
4977 if (++cIDTHandlersDisabled < 256)
4978 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
4979 }
4980 }
4981
4982 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
4983 if ( pPatch->pPatchBlockOffset
4984 && pPatch->uState == PATCH_ENABLED)
4985 {
4986 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
4987 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
4988 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
4989 }
4990
4991 /* IDT or function patches haven't changed any guest code. */
4992 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
4993 {
4994 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
4995 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
4996
4997 if (pPatch->uState != PATCH_REFUSED)
4998 {
4999 AssertMsg(pPatch->pPrivInstrHC, ("Invalid HC pointer?!? (%RRv)\n", pInstrGC));
5000 Assert(pPatch->cbPatchJump);
5001
5002 /** pPrivInstrHC is probably not valid anymore */
5003 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatchRec->patch.pPrivInstrGC, (PRTR3PTR)&pPatchRec->patch.pPrivInstrHC);
5004 if (rc == VINF_SUCCESS)
5005 {
5006 uint8_t temp[16];
5007
5008 Assert(pPatch->cbPatchJump < sizeof(temp));
5009
5010 /* Let's first check if the guest code is still the same. */
5011 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5012 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5013 if (rc == VINF_SUCCESS)
5014 {
5015 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5016
5017 if ( temp[0] != 0xE9 /* jmp opcode */
5018 || *(RTRCINTPTR *)(&temp[1]) != displ
5019 )
5020 {
5021 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5022 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5023 /* Remove it completely */
5024 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5025 rc = PATMR3RemovePatch(pVM, pInstrGC);
5026 AssertRC(rc);
5027 return VWRN_PATCH_REMOVED;
5028 }
5029 }
5030 patmRemoveJumpToPatch(pVM, pPatch);
5031
5032 }
5033 else
5034 {
5035 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5036 pPatch->uState = PATCH_DISABLE_PENDING;
5037 }
5038 }
5039 else
5040 {
5041 AssertMsgFailed(("Patch was refused!\n"));
5042 return VERR_PATCH_ALREADY_DISABLED;
5043 }
5044 }
5045 else
5046 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5047 {
5048 uint8_t temp[16];
5049
5050 Assert(pPatch->cbPatchJump < sizeof(temp));
5051
5052 /* Let's first check if the guest code is still the same. */
5053 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5054 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5055 if (rc == VINF_SUCCESS)
5056 {
5057 if (temp[0] != 0xCC)
5058 {
5059 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5060 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5061 /* Remove it completely */
5062 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5063 rc = PATMR3RemovePatch(pVM, pInstrGC);
5064 AssertRC(rc);
5065 return VWRN_PATCH_REMOVED;
5066 }
5067 patmDeactivateInt3Patch(pVM, pPatch);
5068 }
5069 }
5070
5071 if (rc == VINF_SUCCESS)
5072 {
5073 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5074 if (pPatch->uState == PATCH_DISABLE_PENDING)
5075 {
5076 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5077 pPatch->uState = PATCH_UNUSABLE;
5078 }
5079 else
5080 if (pPatch->uState != PATCH_DIRTY)
5081 {
5082 pPatch->uOldState = pPatch->uState;
5083 pPatch->uState = PATCH_DISABLED;
5084 }
5085 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5086 }
5087
5088 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5089 return VINF_SUCCESS;
5090 }
5091 Log(("Patch not found!\n"));
5092 return VERR_PATCH_NOT_FOUND;
5093}
5094
5095/**
5096 * Permanently disable patch for privileged instruction at specified location
5097 *
5098 * @returns VBox status code.
5099 * @param pVM The VM to operate on.
5100 * @param pInstr Guest context instruction pointer
5101 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5102 * @param pConflictPatch Conflicting patch
5103 *
5104 */
5105static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5106{
5107#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5108 PATCHINFO patch;
5109 DISCPUSTATE cpu;
5110 R3PTRTYPE(uint8_t *) pInstrHC;
5111 uint32_t opsize;
5112 bool disret;
5113 int rc;
5114
5115 RT_ZERO(patch);
5116 pInstrHC = PATMGCVirtToHCVirt(pVM, &patch, pInstrGC);
5117 cpu.mode = (pConflictPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5118 disret = PATMR3DISInstr(pVM, &patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
5119 /*
5120 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5121 * with one that jumps right into the conflict patch.
5122 * Otherwise we must disable the conflicting patch to avoid serious problems.
5123 */
5124 if ( disret == true
5125 && (pConflictPatch->flags & PATMFL_CODE32)
5126 && (cpu.pCurInstr->opcode == OP_JMP || (cpu.pCurInstr->optype & OPTYPE_COND_CONTROLFLOW))
5127 && (cpu.param1.flags & USE_IMMEDIATE32_REL))
5128 {
5129 /* Hint patches must be enabled first. */
5130 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5131 {
5132 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5133 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5134 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5135 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5136 /* Enabling might fail if the patched code has changed in the meantime. */
5137 if (rc != VINF_SUCCESS)
5138 return rc;
5139 }
5140
5141 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5142 if (RT_SUCCESS(rc))
5143 {
5144 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5145 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5146 return VINF_SUCCESS;
5147 }
5148 }
5149#endif
5150
5151 if (pConflictPatch->opcode == OP_CLI)
5152 {
5153 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5154 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5155 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5156 if (rc == VWRN_PATCH_REMOVED)
5157 return VINF_SUCCESS;
5158 if (RT_SUCCESS(rc))
5159 {
5160 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5161 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5162 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5163 if (rc == VERR_PATCH_NOT_FOUND)
5164 return VINF_SUCCESS; /* removed already */
5165
5166 AssertRC(rc);
5167 if (RT_SUCCESS(rc))
5168 {
5169 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5170 return VINF_SUCCESS;
5171 }
5172 }
5173 /* else turned into unusable patch (see below) */
5174 }
5175 else
5176 {
5177 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5178 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5179 if (rc == VWRN_PATCH_REMOVED)
5180 return VINF_SUCCESS;
5181 }
5182
5183 /* No need to monitor the code anymore. */
5184 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5185 {
5186 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5187 AssertRC(rc);
5188 }
5189 pConflictPatch->uState = PATCH_UNUSABLE;
5190 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5191 return VERR_PATCH_DISABLED;
5192}
5193
5194/**
5195 * Enable patch for privileged instruction at specified location
5196 *
5197 * @returns VBox status code.
5198 * @param pVM The VM to operate on.
5199 * @param pInstr Guest context point to privileged instruction
5200 *
5201 * @note returns failure if patching is not allowed or possible
5202 *
5203 */
5204VMMR3DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5205{
5206 PPATMPATCHREC pPatchRec;
5207 PPATCHINFO pPatch;
5208
5209 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5210 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5211 if (pPatchRec)
5212 {
5213 int rc = VINF_SUCCESS;
5214
5215 pPatch = &pPatchRec->patch;
5216
5217 if (pPatch->uState == PATCH_DISABLED)
5218 {
5219 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5220 {
5221 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5222 /** @todo -> pPrivInstrHC is probably not valid anymore */
5223 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatchRec->patch.pPrivInstrGC, (PRTR3PTR)&pPatchRec->patch.pPrivInstrHC);
5224 if (rc == VINF_SUCCESS)
5225 {
5226#ifdef DEBUG
5227 DISCPUSTATE cpu;
5228 char szOutput[256];
5229 uint32_t opsize, i = 0;
5230#endif
5231 uint8_t temp[16];
5232
5233 Assert(pPatch->cbPatchJump < sizeof(temp));
5234
5235 // let's first check if the guest code is still the same
5236 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5237 AssertRC(rc2);
5238
5239 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5240 {
5241 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5242 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5243 /* Remove it completely */
5244 rc = PATMR3RemovePatch(pVM, pInstrGC);
5245 AssertRC(rc);
5246 return VERR_PATCH_NOT_FOUND;
5247 }
5248
5249 rc2 = patmGenJumpToPatch(pVM, pPatch, false);
5250 AssertRC(rc2);
5251 if (RT_FAILURE(rc2))
5252 return rc2;
5253
5254#ifdef DEBUG
5255 bool disret;
5256 i = 0;
5257 while(i < pPatch->cbPatchJump)
5258 {
5259 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5260 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
5261 Log(("Renewed patch instr: %s", szOutput));
5262 i += opsize;
5263 }
5264#endif
5265 }
5266 }
5267 else
5268 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5269 {
5270 uint8_t temp[16];
5271
5272 Assert(pPatch->cbPatchJump < sizeof(temp));
5273
5274 /* Let's first check if the guest code is still the same. */
5275 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5276 AssertRC(rc2);
5277
5278 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5279 {
5280 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5281 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5282 rc = PATMR3RemovePatch(pVM, pInstrGC);
5283 AssertRC(rc);
5284 return VERR_PATCH_NOT_FOUND;
5285 }
5286
5287 rc2 = patmActivateInt3Patch(pVM, pPatch);
5288 if (RT_FAILURE(rc2))
5289 return rc2;
5290 }
5291
5292 pPatch->uState = pPatch->uOldState; //restore state
5293
5294 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5295 if (pPatch->pPatchBlockOffset)
5296 {
5297 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5298 }
5299
5300 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5301 }
5302 else
5303 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5304
5305 return rc;
5306 }
5307 return VERR_PATCH_NOT_FOUND;
5308}
5309
5310/**
5311 * Remove patch for privileged instruction at specified location
5312 *
5313 * @returns VBox status code.
5314 * @param pVM The VM to operate on.
5315 * @param pPatchRec Patch record
5316 * @param fForceRemove Remove *all* patches
5317 */
5318int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5319{
5320 PPATCHINFO pPatch;
5321
5322 pPatch = &pPatchRec->patch;
5323
5324 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5325 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5326 {
5327 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5328 return VERR_ACCESS_DENIED;
5329 }
5330 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5331
5332 /** @note NEVER EVER REUSE PATCH MEMORY */
5333 /** @note PATMR3DisablePatch put a breakpoint (0xCC) at the entry of this patch */
5334
5335 if (pPatchRec->patch.pPatchBlockOffset)
5336 {
5337 PAVLOU32NODECORE pNode;
5338
5339 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5340 Assert(pNode);
5341 }
5342
5343 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5344 {
5345 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5346 AssertRC(rc);
5347 }
5348
5349#ifdef VBOX_WITH_STATISTICS
5350 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5351 {
5352 STAMR3Deregister(pVM, &pPatchRec->patch);
5353#ifndef DEBUG_sandervl
5354 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5355 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5356 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5357 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5358 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5359 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5360 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5361 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5362 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5363 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5364 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5365 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5366 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5367 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5368#endif
5369 }
5370#endif
5371
5372 /** @note no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5373 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5374 pPatch->nrPatch2GuestRecs = 0;
5375 Assert(pPatch->Patch2GuestAddrTree == 0);
5376
5377 patmEmptyTree(pVM, &pPatch->FixupTree);
5378 pPatch->nrFixups = 0;
5379 Assert(pPatch->FixupTree == 0);
5380
5381 if (pPatchRec->patch.pTempInfo)
5382 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5383
5384 /** @note might fail, because it has already been removed (e.g. during reset). */
5385 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5386
5387 /* Free the patch record */
5388 MMHyperFree(pVM, pPatchRec);
5389 return VINF_SUCCESS;
5390}
5391
5392/**
5393 * Attempt to refresh the patch by recompiling its entire code block
5394 *
5395 * @returns VBox status code.
5396 * @param pVM The VM to operate on.
5397 * @param pPatchRec Patch record
5398 */
5399int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5400{
5401 PPATCHINFO pPatch;
5402 int rc;
5403 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5404
5405 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5406
5407 pPatch = &pPatchRec->patch;
5408 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5409 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5410 {
5411 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist\n"));
5412 return VERR_PATCHING_REFUSED;
5413 }
5414
5415 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5416
5417 rc = PATMR3DisablePatch(pVM, pInstrGC);
5418 AssertRC(rc);
5419
5420 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5421 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5422#ifdef VBOX_WITH_STATISTICS
5423 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5424 {
5425 STAMR3Deregister(pVM, &pPatchRec->patch);
5426#ifndef DEBUG_sandervl
5427 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5428 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5429 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5430 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5431 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5432 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5433 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5434 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5435 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5436 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5437 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5438 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5439 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5440 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5441#endif
5442 }
5443#endif
5444
5445 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5446
5447 /* Attempt to install a new patch. */
5448 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5449 if (RT_SUCCESS(rc))
5450 {
5451 RTRCPTR pPatchTargetGC;
5452 PPATMPATCHREC pNewPatchRec;
5453
5454 /* Determine target address in new patch */
5455 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5456 Assert(pPatchTargetGC);
5457 if (!pPatchTargetGC)
5458 {
5459 rc = VERR_PATCHING_REFUSED;
5460 goto failure;
5461 }
5462
5463 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5464 pPatch->uCurPatchOffset = 0;
5465
5466 /* insert jump to new patch in old patch block */
5467 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5468 if (RT_FAILURE(rc))
5469 goto failure;
5470
5471 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5472 Assert(pNewPatchRec); /* can't fail */
5473
5474 /* Remove old patch (only do that when everything is finished) */
5475 int rc2 = PATMRemovePatch(pVM, pPatchRec, true /* force removal */);
5476 AssertRC(rc2);
5477
5478 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5479 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5480
5481 LogRel(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5482 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5483
5484 /* Used by another patch, so don't remove it! */
5485 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5486 }
5487
5488failure:
5489 if (RT_FAILURE(rc))
5490 {
5491 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5492
5493 /* Remove the new inactive patch */
5494 rc = PATMR3RemovePatch(pVM, pInstrGC);
5495 AssertRC(rc);
5496
5497 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5498 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5499
5500 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5501 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5502 AssertRC(rc2);
5503
5504 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5505 }
5506 return rc;
5507}
5508
5509/**
5510 * Find patch for privileged instruction at specified location
5511 *
5512 * @returns Patch structure pointer if found; else NULL
5513 * @param pVM The VM to operate on.
5514 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5515 * @param fIncludeHints Include hinted patches or not
5516 *
5517 */
5518PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5519{
5520 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5521 /* if the patch is enabled, the pointer is not indentical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5522 if (pPatchRec)
5523 {
5524 if ( pPatchRec->patch.uState == PATCH_ENABLED
5525 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5526 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5527 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5528 {
5529 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5530 return &pPatchRec->patch;
5531 }
5532 else
5533 if ( fIncludeHints
5534 && pPatchRec->patch.uState == PATCH_DISABLED
5535 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5536 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5537 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5538 {
5539 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5540 return &pPatchRec->patch;
5541 }
5542 }
5543 return NULL;
5544}
5545
5546/**
5547 * Checks whether the GC address is inside a generated patch jump
5548 *
5549 * @returns true -> yes, false -> no
5550 * @param pVM The VM to operate on.
5551 * @param pAddr Guest context address
5552 * @param pPatchAddr Guest context patch address (if true)
5553 */
5554VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5555{
5556 RTRCPTR addr;
5557 PPATCHINFO pPatch;
5558
5559 if (PATMIsEnabled(pVM) == false)
5560 return false;
5561
5562 if (pPatchAddr == NULL)
5563 pPatchAddr = &addr;
5564
5565 *pPatchAddr = 0;
5566
5567 pPatch = PATMFindActivePatchByEntrypoint(pVM, pAddr);
5568 if (pPatch)
5569 {
5570 *pPatchAddr = pPatch->pPrivInstrGC;
5571 }
5572 return *pPatchAddr == 0 ? false : true;
5573}
5574
5575/**
5576 * Remove patch for privileged instruction at specified location
5577 *
5578 * @returns VBox status code.
5579 * @param pVM The VM to operate on.
5580 * @param pInstr Guest context point to privileged instruction
5581 *
5582 * @note returns failure if patching is not allowed or possible
5583 *
5584 */
5585VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5586{
5587 PPATMPATCHREC pPatchRec;
5588
5589 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5590 if (pPatchRec)
5591 {
5592 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5593 if (rc == VWRN_PATCH_REMOVED)
5594 return VINF_SUCCESS;
5595 return PATMRemovePatch(pVM, pPatchRec, false);
5596 }
5597 AssertFailed();
5598 return VERR_PATCH_NOT_FOUND;
5599}
5600
5601/**
5602 * Mark patch as dirty
5603 *
5604 * @returns VBox status code.
5605 * @param pVM The VM to operate on.
5606 * @param pPatch Patch record
5607 *
5608 * @note returns failure if patching is not allowed or possible
5609 *
5610 */
5611VMMR3DECL(int) PATMR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5612{
5613 if (pPatch->pPatchBlockOffset)
5614 {
5615 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5616 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5617 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5618 }
5619
5620 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5621 /* Put back the replaced instruction. */
5622 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5623 if (rc == VWRN_PATCH_REMOVED)
5624 return VINF_SUCCESS;
5625
5626 /** @note we don't restore patch pages for patches that are not enabled! */
5627 /** @note be careful when changing this behaviour!! */
5628
5629 /* The patch pages are no longer marked for self-modifying code detection */
5630 if (pPatch->flags & PATMFL_CODE_MONITORED)
5631 {
5632 rc = patmRemovePatchPages(pVM, pPatch);
5633 AssertRCReturn(rc, rc);
5634 }
5635 pPatch->uState = PATCH_DIRTY;
5636
5637 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5638 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5639
5640 return VINF_SUCCESS;
5641}
5642
5643/**
5644 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5645 *
5646 * @returns VBox status code.
5647 * @param pVM The VM to operate on.
5648 * @param pPatch Patch block structure pointer
5649 * @param pPatchGC GC address in patch block
5650 */
5651RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5652{
5653 Assert(pPatch->Patch2GuestAddrTree);
5654 /* Get the closest record from below. */
5655 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5656 if (pPatchToGuestRec)
5657 return pPatchToGuestRec->pOrgInstrGC;
5658
5659 return 0;
5660}
5661
5662/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5663 *
5664 * @returns corresponding GC pointer in patch block
5665 * @param pVM The VM to operate on.
5666 * @param pPatch Current patch block pointer
5667 * @param pInstrGC Guest context pointer to privileged instruction
5668 *
5669 */
5670RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5671{
5672 if (pPatch->Guest2PatchAddrTree)
5673 {
5674 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
5675 if (pGuestToPatchRec)
5676 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5677 }
5678
5679 return 0;
5680}
5681
5682/* Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no identical match)
5683 *
5684 * @returns corresponding GC pointer in patch block
5685 * @param pVM The VM to operate on.
5686 * @param pPatch Current patch block pointer
5687 * @param pInstrGC Guest context pointer to privileged instruction
5688 *
5689 */
5690RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5691{
5692 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
5693 if (pGuestToPatchRec)
5694 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5695
5696 return 0;
5697}
5698
5699/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5700 *
5701 * @returns corresponding GC pointer in patch block
5702 * @param pVM The VM to operate on.
5703 * @param pInstrGC Guest context pointer to privileged instruction
5704 *
5705 */
5706VMMR3DECL(RTRCPTR) PATMR3GuestGCPtrToPatchGCPtr(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
5707{
5708 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5709 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
5710 {
5711 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
5712 }
5713 return 0;
5714}
5715
5716/**
5717 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5718 *
5719 * @returns original GC instruction pointer or 0 if not found
5720 * @param pVM The VM to operate on.
5721 * @param pPatchGC GC address in patch block
5722 * @param pEnmState State of the translated address (out)
5723 *
5724 */
5725VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
5726{
5727 PPATMPATCHREC pPatchRec;
5728 void *pvPatchCoreOffset;
5729 RTRCPTR pPrivInstrGC;
5730
5731 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
5732 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5733 if (pvPatchCoreOffset == 0)
5734 {
5735 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
5736 return 0;
5737 }
5738 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
5739 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
5740 if (pEnmState)
5741 {
5742 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
5743 || pPatchRec->patch.uState == PATCH_DIRTY
5744 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
5745 || pPatchRec->patch.uState == PATCH_UNUSABLE),
5746 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
5747
5748 if ( !pPrivInstrGC
5749 || pPatchRec->patch.uState == PATCH_UNUSABLE
5750 || pPatchRec->patch.uState == PATCH_REFUSED)
5751 {
5752 pPrivInstrGC = 0;
5753 *pEnmState = PATMTRANS_FAILED;
5754 }
5755 else
5756 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
5757 {
5758 *pEnmState = PATMTRANS_INHIBITIRQ;
5759 }
5760 else
5761 if ( pPatchRec->patch.uState == PATCH_ENABLED
5762 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
5763 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
5764 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5765 {
5766 *pEnmState = PATMTRANS_OVERWRITTEN;
5767 }
5768 else
5769 if (PATMFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
5770 {
5771 *pEnmState = PATMTRANS_OVERWRITTEN;
5772 }
5773 else
5774 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
5775 {
5776 *pEnmState = PATMTRANS_PATCHSTART;
5777 }
5778 else
5779 *pEnmState = PATMTRANS_SAFE;
5780 }
5781 return pPrivInstrGC;
5782}
5783
5784/**
5785 * Returns the GC pointer of the patch for the specified GC address
5786 *
5787 * @returns VBox status code.
5788 * @param pVM The VM to operate on.
5789 * @param pAddrGC Guest context address
5790 */
5791VMMR3DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
5792{
5793 PPATMPATCHREC pPatchRec;
5794
5795 // Find the patch record
5796 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
5797 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
5798 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
5799 return PATCHCODE_PTR_GC(&pPatchRec->patch);
5800
5801 return 0;
5802}
5803
5804/**
5805 * Attempt to recover dirty instructions
5806 *
5807 * @returns VBox status code.
5808 * @param pVM The VM to operate on.
5809 * @param pCtx CPU context
5810 * @param pPatch Patch record
5811 * @param pPatchToGuestRec Patch to guest address record
5812 * @param pEip GC pointer of trapping instruction
5813 */
5814static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
5815{
5816 DISCPUSTATE CpuOld, CpuNew;
5817 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
5818 int rc;
5819 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
5820 uint32_t cbDirty;
5821 PRECPATCHTOGUEST pRec;
5822 PVMCPU pVCpu = VMMGetCpu0(pVM);
5823
5824 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pPatchToGuestRec->pOrgInstrGC));
5825
5826 pRec = pPatchToGuestRec;
5827 pCurInstrGC = pPatchToGuestRec->pOrgInstrGC;
5828 pCurPatchInstrGC = pEip;
5829 cbDirty = 0;
5830 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5831
5832 /* Find all adjacent dirty instructions */
5833 while (true)
5834 {
5835 if (pRec->fJumpTarget)
5836 {
5837 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pPatchToGuestRec->pOrgInstrGC));
5838 pRec->fDirty = false;
5839 return VERR_PATCHING_REFUSED;
5840 }
5841
5842 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
5843 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5844 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
5845
5846 /* Only harmless instructions are acceptable. */
5847 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
5848 if ( RT_FAILURE(rc)
5849 || !(CpuOld.pCurInstr->optype & OPTYPE_HARMLESS))
5850 {
5851 if (RT_SUCCESS(rc))
5852 cbDirty += CpuOld.opsize;
5853 else
5854 if (!cbDirty)
5855 cbDirty = 1;
5856 break;
5857 }
5858
5859#ifdef DEBUG
5860 char szBuf[256];
5861 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
5862 szBuf, sizeof(szBuf), NULL);
5863 Log(("DIRTY: %s\n", szBuf));
5864#endif
5865 /* Mark as clean; if we fail we'll let it always fault. */
5866 pRec->fDirty = false;
5867
5868 /** Remove old lookup record. */
5869 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
5870
5871 pCurPatchInstrGC += CpuOld.opsize;
5872 cbDirty += CpuOld.opsize;
5873
5874 /* Let's see if there's another dirty instruction right after. */
5875 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
5876 if (!pRec || !pRec->fDirty)
5877 break; /* no more dirty instructions */
5878
5879 /* In case of complex instructions the next guest instruction could be quite far off. */
5880 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
5881 }
5882
5883 if ( RT_SUCCESS(rc)
5884 && (CpuOld.pCurInstr->optype & OPTYPE_HARMLESS)
5885 )
5886 {
5887 uint32_t cbLeft;
5888
5889 pCurPatchInstrHC = pPatchInstrHC;
5890 pCurPatchInstrGC = pEip;
5891 cbLeft = cbDirty;
5892
5893 while (cbLeft && RT_SUCCESS(rc))
5894 {
5895 bool fValidInstr;
5896
5897 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
5898
5899 fValidInstr = !!(CpuNew.pCurInstr->optype & OPTYPE_HARMLESS);
5900 if ( !fValidInstr
5901 && (CpuNew.pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
5902 )
5903 {
5904 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
5905
5906 if ( pTargetGC >= pPatchToGuestRec->pOrgInstrGC
5907 && pTargetGC <= pPatchToGuestRec->pOrgInstrGC + cbDirty
5908 )
5909 {
5910 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
5911 fValidInstr = true;
5912 }
5913 }
5914
5915 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
5916 if ( rc == VINF_SUCCESS
5917 && CpuNew.opsize <= cbLeft /* must still fit */
5918 && fValidInstr
5919 )
5920 {
5921#ifdef DEBUG
5922 char szBuf[256];
5923 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
5924 szBuf, sizeof(szBuf), NULL);
5925 Log(("NEW: %s\n", szBuf));
5926#endif
5927
5928 /* Copy the new instruction. */
5929 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.opsize);
5930 AssertRC(rc);
5931
5932 /* Add a new lookup record for the duplicated instruction. */
5933 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
5934 }
5935 else
5936 {
5937#ifdef DEBUG
5938 char szBuf[256];
5939 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
5940 szBuf, sizeof(szBuf), NULL);
5941 Log(("NEW: %s (FAILED)\n", szBuf));
5942#endif
5943 /* Restore the old lookup record for the duplicated instruction. */
5944 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
5945
5946 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
5947 rc = VERR_PATCHING_REFUSED;
5948 break;
5949 }
5950 pCurInstrGC += CpuNew.opsize;
5951 pCurPatchInstrHC += CpuNew.opsize;
5952 pCurPatchInstrGC += CpuNew.opsize;
5953 cbLeft -= CpuNew.opsize;
5954
5955 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
5956 if (!cbLeft)
5957 {
5958 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
5959 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
5960 {
5961 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
5962 if (pRec)
5963 {
5964 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
5965 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5966
5967 Assert(!pRec->fDirty);
5968
5969 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
5970 if (cbFiller >= SIZEOF_NEARJUMP32)
5971 {
5972 pPatchFillHC[0] = 0xE9;
5973 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
5974#ifdef DEBUG
5975 char szBuf[256];
5976 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
5977 szBuf, sizeof(szBuf), NULL);
5978 Log(("FILL: %s\n", szBuf));
5979#endif
5980 }
5981 else
5982 {
5983 for (unsigned i = 0; i < cbFiller; i++)
5984 {
5985 pPatchFillHC[i] = 0x90; /* NOP */
5986#ifdef DEBUG
5987 char szBuf[256];
5988 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC + i,
5989 DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
5990 Log(("FILL: %s\n", szBuf));
5991#endif
5992 }
5993 }
5994 }
5995 }
5996 }
5997 }
5998 }
5999 else
6000 rc = VERR_PATCHING_REFUSED;
6001
6002 if (RT_SUCCESS(rc))
6003 {
6004 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6005 }
6006 else
6007 {
6008 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6009 Assert(cbDirty);
6010
6011 /* Mark the whole instruction stream with breakpoints. */
6012 if (cbDirty)
6013 memset(pPatchInstrHC, 0xCC, cbDirty);
6014
6015 if ( pVM->patm.s.fOutOfMemory == false
6016 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6017 {
6018 rc = patmR3RefreshPatch(pVM, pPatch);
6019 if (RT_FAILURE(rc))
6020 {
6021 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6022 }
6023 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6024 rc = VERR_PATCHING_REFUSED;
6025 }
6026 }
6027 return rc;
6028}
6029
6030/**
6031 * Handle trap inside patch code
6032 *
6033 * @returns VBox status code.
6034 * @param pVM The VM to operate on.
6035 * @param pCtx CPU context
6036 * @param pEip GC pointer of trapping instruction
6037 * @param ppNewEip GC pointer to new instruction
6038 */
6039VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6040{
6041 PPATMPATCHREC pPatch = 0;
6042 void *pvPatchCoreOffset;
6043 RTRCUINTPTR offset;
6044 RTRCPTR pNewEip;
6045 int rc ;
6046 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6047 PVMCPU pVCpu = VMMGetCpu0(pVM);
6048
6049 Assert(pVM->cCpus == 1);
6050
6051 pNewEip = 0;
6052 *ppNewEip = 0;
6053
6054 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6055
6056 /* Find the patch record. */
6057 /** @note there might not be a patch to guest translation record (global function) */
6058 offset = pEip - pVM->patm.s.pPatchMemGC;
6059 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6060 if (pvPatchCoreOffset)
6061 {
6062 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6063
6064 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6065
6066 if (pPatch->patch.uState == PATCH_DIRTY)
6067 {
6068 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6069 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6070 {
6071 /* Function duplication patches set fPIF to 1 on entry */
6072 pVM->patm.s.pGCStateHC->fPIF = 1;
6073 }
6074 }
6075 else
6076 if (pPatch->patch.uState == PATCH_DISABLED)
6077 {
6078 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6079 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6080 {
6081 /* Function duplication patches set fPIF to 1 on entry */
6082 pVM->patm.s.pGCStateHC->fPIF = 1;
6083 }
6084 }
6085 else
6086 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6087 {
6088 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6089
6090 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6091 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6092 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6093 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6094 }
6095
6096 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6097 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6098
6099 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6100 pPatch->patch.cTraps++;
6101 PATM_STAT_FAULT_INC(&pPatch->patch);
6102 }
6103 else
6104 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6105
6106 /* Check if we were interrupted in PATM generated instruction code. */
6107 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6108 {
6109 DISCPUSTATE Cpu;
6110 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6111 AssertRC(rc);
6112
6113 if ( rc == VINF_SUCCESS
6114 && ( Cpu.pCurInstr->opcode == OP_PUSHF
6115 || Cpu.pCurInstr->opcode == OP_PUSH
6116 || Cpu.pCurInstr->opcode == OP_CALL)
6117 )
6118 {
6119 uint64_t fFlags;
6120
6121 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6122
6123 if (Cpu.pCurInstr->opcode == OP_PUSH)
6124 {
6125 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6126 if ( rc == VINF_SUCCESS
6127 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6128 {
6129 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6130
6131 /* Reset the PATM stack. */
6132 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6133
6134 pVM->patm.s.pGCStateHC->fPIF = 1;
6135
6136 Log(("Faulting push -> go back to the original instruction\n"));
6137
6138 /* continue at the original instruction */
6139 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6140 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6141 return VINF_SUCCESS;
6142 }
6143 }
6144
6145 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6146 rc = PGMShwMakePageWritable(pVCpu, pCtx->esp, 0 /*fFlags*/);
6147 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6148 if (rc == VINF_SUCCESS)
6149 {
6150
6151 /* The guest page *must* be present. */
6152 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6153 if (rc == VINF_SUCCESS && (fFlags & X86_PTE_P))
6154 {
6155 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6156 return VINF_PATCH_CONTINUE;
6157 }
6158 }
6159 }
6160 else
6161 if (pPatch->patch.pPrivInstrGC == pNewEip)
6162 {
6163 /* Invalidated patch or first instruction overwritten.
6164 * We can ignore the fPIF state in this case.
6165 */
6166 /* Reset the PATM stack. */
6167 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6168
6169 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6170
6171 pVM->patm.s.pGCStateHC->fPIF = 1;
6172
6173 /* continue at the original instruction */
6174 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6175 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6176 return VINF_SUCCESS;
6177 }
6178
6179 char szBuf[256];
6180 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
6181
6182 /* Very bad. We crashed in emitted code. Probably stack? */
6183 if (pPatch)
6184 {
6185 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6186 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6187 }
6188 else
6189 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6190 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6191 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6192 }
6193
6194 /* From here on, we must have a valid patch to guest translation. */
6195 if (pvPatchCoreOffset == 0)
6196 {
6197 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6198 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6199 return VERR_PATCH_NOT_FOUND; //fatal error
6200 }
6201
6202 /* Take care of dirty/changed instructions. */
6203 if (pPatchToGuestRec->fDirty)
6204 {
6205 Assert(pPatchToGuestRec->Core.Key == offset);
6206 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6207
6208 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6209 if (RT_SUCCESS(rc))
6210 {
6211 /* Retry the current instruction. */
6212 pNewEip = pEip;
6213 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6214 }
6215 else
6216 {
6217 /* Reset the PATM stack. */
6218 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6219
6220 rc = VINF_SUCCESS; /* Continue at original instruction. */
6221 }
6222
6223 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6224 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6225 return rc;
6226 }
6227
6228#ifdef VBOX_STRICT
6229 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6230 {
6231 DISCPUSTATE cpu;
6232 bool disret;
6233 uint32_t opsize;
6234
6235 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6236 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6237 if (disret && cpu.pCurInstr->opcode == OP_RETN)
6238 {
6239 RTRCPTR retaddr;
6240 PCPUMCTX pCtx2;
6241
6242 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6243
6244 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6245 AssertRC(rc);
6246
6247 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6248 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6249 }
6250 }
6251#endif
6252
6253 /* Return original address, correct by subtracting the CS base address. */
6254 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6255
6256 /* Reset the PATM stack. */
6257 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6258
6259 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6260 {
6261 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6262 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6263#ifdef VBOX_STRICT
6264 DISCPUSTATE cpu;
6265 bool disret;
6266 uint32_t opsize;
6267
6268 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6269 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_ORGCODE);
6270
6271 if (disret && (cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_INT3))
6272 {
6273 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6274 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6275
6276 Assert(cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_IRET);
6277 }
6278#endif
6279 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6280 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6281 }
6282
6283 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6284#ifdef LOG_ENABLED
6285 CPUMR3DisasmInstr(pVM, pVCpu, pCtx, pNewEip, "PATCHRET: ");
6286#endif
6287 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6288 {
6289 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6290 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6291 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6292 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6293 return VERR_PATCH_DISABLED;
6294 }
6295
6296#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6297 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6298 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6299 {
6300 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6301 //we are only wasting time, back out the patch
6302 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6303 pTrapRec->pNextPatchInstr = 0;
6304 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6305 return VERR_PATCH_DISABLED;
6306 }
6307#endif
6308
6309 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6310 return VINF_SUCCESS;
6311}
6312
6313
6314/**
6315 * Handle page-fault in monitored page
6316 *
6317 * @returns VBox status code.
6318 * @param pVM The VM to operate on.
6319 */
6320VMMR3DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6321{
6322 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6323
6324 addr &= PAGE_BASE_GC_MASK;
6325
6326 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6327 AssertRC(rc); NOREF(rc);
6328
6329 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6330 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6331 {
6332 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6333 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6334 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6335 if (rc == VWRN_PATCH_REMOVED)
6336 return VINF_SUCCESS;
6337
6338 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6339
6340 if (addr == pPatchRec->patch.pPrivInstrGC)
6341 addr++;
6342 }
6343
6344 for(;;)
6345 {
6346 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6347
6348 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6349 break;
6350
6351 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6352 {
6353 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6354 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6355 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6356 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6357 }
6358 addr = pPatchRec->patch.pPrivInstrGC + 1;
6359 }
6360
6361 pVM->patm.s.pvFaultMonitor = 0;
6362 return VINF_SUCCESS;
6363}
6364
6365
6366#ifdef VBOX_WITH_STATISTICS
6367
6368static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6369{
6370 if (pPatch->flags & PATMFL_SYSENTER)
6371 {
6372 return "SYSENT";
6373 }
6374 else
6375 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6376 {
6377 static char szTrap[16];
6378 uint32_t iGate;
6379
6380 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6381 if (iGate < 256)
6382 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6383 else
6384 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6385 return szTrap;
6386 }
6387 else
6388 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6389 return "DUPFUNC";
6390 else
6391 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6392 return "FUNCCALL";
6393 else
6394 if (pPatch->flags & PATMFL_TRAMPOLINE)
6395 return "TRAMP";
6396 else
6397 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6398}
6399
6400static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6401{
6402 switch(pPatch->uState)
6403 {
6404 case PATCH_ENABLED:
6405 return "ENA";
6406 case PATCH_DISABLED:
6407 return "DIS";
6408 case PATCH_DIRTY:
6409 return "DIR";
6410 case PATCH_UNUSABLE:
6411 return "UNU";
6412 case PATCH_REFUSED:
6413 return "REF";
6414 case PATCH_DISABLE_PENDING:
6415 return "DIP";
6416 default:
6417 AssertFailed();
6418 return " ";
6419 }
6420}
6421
6422/**
6423 * Resets the sample.
6424 * @param pVM The VM handle.
6425 * @param pvSample The sample registered using STAMR3RegisterCallback.
6426 */
6427static void patmResetStat(PVM pVM, void *pvSample)
6428{
6429 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6430 Assert(pPatch);
6431
6432 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6433 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6434}
6435
6436/**
6437 * Prints the sample into the buffer.
6438 *
6439 * @param pVM The VM handle.
6440 * @param pvSample The sample registered using STAMR3RegisterCallback.
6441 * @param pszBuf The buffer to print into.
6442 * @param cchBuf The size of the buffer.
6443 */
6444static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6445{
6446 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6447 Assert(pPatch);
6448
6449 Assert(pPatch->uState != PATCH_REFUSED);
6450 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6451
6452 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6453 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6454 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6455}
6456
6457/**
6458 * Returns the GC address of the corresponding patch statistics counter
6459 *
6460 * @returns Stat address
6461 * @param pVM The VM to operate on.
6462 * @param pPatch Patch structure
6463 */
6464RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6465{
6466 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6467 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6468}
6469
6470#endif /* VBOX_WITH_STATISTICS */
6471
6472#ifdef VBOX_WITH_DEBUGGER
6473/**
6474 * The '.patmoff' command.
6475 *
6476 * @returns VBox status.
6477 * @param pCmd Pointer to the command descriptor (as registered).
6478 * @param pCmdHlp Pointer to command helper functions.
6479 * @param pVM Pointer to the current VM (if any).
6480 * @param paArgs Pointer to (readonly) array of arguments.
6481 * @param cArgs Number of arguments in the array.
6482 */
6483static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
6484{
6485 /*
6486 * Validate input.
6487 */
6488 if (!pVM)
6489 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6490
6491 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6492 PATMR3AllowPatching(pVM, false);
6493 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6494}
6495
6496/**
6497 * The '.patmon' command.
6498 *
6499 * @returns VBox status.
6500 * @param pCmd Pointer to the command descriptor (as registered).
6501 * @param pCmdHlp Pointer to command helper functions.
6502 * @param pVM Pointer to the current VM (if any).
6503 * @param paArgs Pointer to (readonly) array of arguments.
6504 * @param cArgs Number of arguments in the array.
6505 */
6506static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
6507{
6508 /*
6509 * Validate input.
6510 */
6511 if (!pVM)
6512 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6513
6514 PATMR3AllowPatching(pVM, true);
6515 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6516 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6517}
6518#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette