VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/PATM.cpp@ 28879

Last change on this file since 28879 was 28800, checked in by vboxsync, 15 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 245.2 KB
Line 
1/* $Id: PATM.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * NOTE: Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.virtualbox.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/patm.h>
25#include <VBox/stam.h>
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/cpumdis.h>
29#include <VBox/iom.h>
30#include <VBox/mm.h>
31#include <VBox/ssm.h>
32#include <VBox/trpm.h>
33#include <VBox/cfgm.h>
34#include <VBox/param.h>
35#include <VBox/selm.h>
36#include <iprt/avl.h>
37#include "PATMInternal.h"
38#include "PATMPatch.h"
39#include <VBox/vm.h>
40#include <VBox/csam.h>
41
42#include <VBox/dbg.h>
43#include <VBox/err.h>
44#include <VBox/log.h>
45#include <iprt/assert.h>
46#include <iprt/asm.h>
47#include <VBox/dis.h>
48#include <VBox/disopcode.h>
49
50#include <iprt/string.h>
51#include "PATMA.h"
52
53//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
54//#define PATM_DISABLE_ALL
55
56/*******************************************************************************
57* Internal Functions *
58*******************************************************************************/
59
60static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
61static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
62static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
63
64#ifdef LOG_ENABLED // keep gcc quiet
65static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
66#endif
67#ifdef VBOX_WITH_STATISTICS
68static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
69static void patmResetStat(PVM pVM, void *pvSample);
70static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
71#endif
72
73#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
74#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
75
76static int patmReinit(PVM pVM);
77static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
78
79#ifdef VBOX_WITH_DEBUGGER
80static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
81static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
82static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
83
84/** Command descriptors. */
85static const DBGCCMD g_aCmds[] =
86{
87 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, pResultDesc, fFlags, pfnHandler pszSyntax, ....pszDescription */
88 { "patmon", 0, 0, NULL, 0, NULL, 0, patmr3CmdOn, "", "Enable patching." },
89 { "patmoff", 0, 0, NULL, 0, NULL, 0, patmr3CmdOff, "", "Disable patching." },
90};
91#endif
92
93/* Don't want to break saved states, so put it here as a global variable. */
94static unsigned int cIDTHandlersDisabled = 0;
95
96/**
97 * Initializes the PATM.
98 *
99 * @returns VBox status code.
100 * @param pVM The VM to operate on.
101 */
102VMMR3DECL(int) PATMR3Init(PVM pVM)
103{
104 int rc;
105
106 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
107
108 /* These values can't change as they are hardcoded in patch code (old saved states!) */
109 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
110 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
111 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
112 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
113
114 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
115 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
116
117 /* Allocate patch memory and GC patch state memory. */
118 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
119 /* Add another page in case the generated code is much larger than expected. */
120 /** @todo bad safety precaution */
121 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
122 if (RT_FAILURE(rc))
123 {
124 Log(("MMHyperAlloc failed with %Rrc\n", rc));
125 return rc;
126 }
127 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
128
129 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
130 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
131 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
132
133 /*
134 * Hypervisor memory for GC status data (read/write)
135 *
136 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
137 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
138 *
139 */
140 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /** @note hardcoded dependencies on this exist. */
141 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
142 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
143
144 /* Hypervisor memory for patch statistics */
145 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
146 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
147
148 /* Memory for patch lookup trees. */
149 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
150 AssertRCReturn(rc, rc);
151 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
152
153#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
154 /* Check CFGM option. */
155 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
156 if (RT_FAILURE(rc))
157# ifdef PATM_DISABLE_ALL
158 pVM->fPATMEnabled = false;
159# else
160 pVM->fPATMEnabled = true;
161# endif
162#endif
163
164 rc = patmReinit(pVM);
165 AssertRC(rc);
166 if (RT_FAILURE(rc))
167 return rc;
168
169 /*
170 * Register save and load state notificators.
171 */
172 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
173 NULL, NULL, NULL,
174 NULL, patmR3Save, NULL,
175 NULL, patmR3Load, NULL);
176 AssertRCReturn(rc, rc);
177
178#ifdef VBOX_WITH_DEBUGGER
179 /*
180 * Debugger commands.
181 */
182 static bool s_fRegisteredCmds = false;
183 if (!s_fRegisteredCmds)
184 {
185 int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
186 if (RT_SUCCESS(rc2))
187 s_fRegisteredCmds = true;
188 }
189#endif
190
191#ifdef VBOX_WITH_STATISTICS
192 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
193 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
194 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
195 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
196 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
197 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
198 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
199 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
200
201 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
202 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
203
204 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
205 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
206 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
207
208 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
209 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
210 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
211 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
212 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
213
214 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
215 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
216
217 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
218 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
219
220 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
221 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
222 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
223
224 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
225 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
226 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
227
228 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
229 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
230
231 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
232 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
233 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
234 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
235
236 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
237 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
238
239 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
240 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
241
242 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
243 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
244 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
245
246 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
247 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
248 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
249 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
250
251 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
252 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
253 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
254 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
255 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
256
257 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
258#endif /* VBOX_WITH_STATISTICS */
259
260 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
261 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
262 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
263 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
264 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
265 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
266 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
267 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
268
269 return rc;
270}
271
272/**
273 * Finalizes HMA page attributes.
274 *
275 * @returns VBox status code.
276 * @param pVM The VM handle.
277 */
278VMMR3DECL(int) PATMR3InitFinalize(PVM pVM)
279{
280 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
281 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
282 if (RT_FAILURE(rc))
283 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
284
285 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
286 if (RT_FAILURE(rc))
287 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
288
289 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
290 if (RT_FAILURE(rc))
291 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
292
293 return rc;
294}
295
296/**
297 * (Re)initializes PATM
298 *
299 * @param pVM The VM.
300 */
301static int patmReinit(PVM pVM)
302{
303 int rc;
304
305 /*
306 * Assert alignment and sizes.
307 */
308 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
309 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
310
311 /*
312 * Setup any fixed pointers and offsets.
313 */
314 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
315
316#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
317#ifndef PATM_DISABLE_ALL
318 pVM->fPATMEnabled = true;
319#endif
320#endif
321
322 Assert(pVM->patm.s.pGCStateHC);
323 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
324 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
325
326 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
327 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
328
329 Assert(pVM->patm.s.pGCStackHC);
330 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
331 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
332 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
333 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
334
335 Assert(pVM->patm.s.pStatsHC);
336 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
337 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
338
339 Assert(pVM->patm.s.pPatchMemHC);
340 Assert(pVM->patm.s.pPatchMemGC == MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
341 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
342 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
343
344 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
345 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
346
347 Assert(pVM->patm.s.PatchLookupTreeHC);
348 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
349
350 /*
351 * (Re)Initialize PATM structure
352 */
353 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
354 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
355 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
356 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
357 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
358 pVM->patm.s.pvFaultMonitor = 0;
359 pVM->patm.s.deltaReloc = 0;
360
361 /* Lowest and highest patched instruction */
362 pVM->patm.s.pPatchedInstrGCLowest = ~0;
363 pVM->patm.s.pPatchedInstrGCHighest = 0;
364
365 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
366 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
367 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
368
369 pVM->patm.s.pfnSysEnterPatchGC = 0;
370 pVM->patm.s.pfnSysEnterGC = 0;
371
372 pVM->patm.s.fOutOfMemory = false;
373
374 pVM->patm.s.pfnHelperCallGC = 0;
375
376 /* Generate all global functions to be used by future patches. */
377 /* We generate a fake patch in order to use the existing code for relocation. */
378 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
379 if (RT_FAILURE(rc))
380 {
381 Log(("Out of memory!!!!\n"));
382 return VERR_NO_MEMORY;
383 }
384 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
385 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
386 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
387
388 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
389 AssertRC(rc);
390
391 /* Update free pointer in patch memory. */
392 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
393 /* Round to next 8 byte boundary. */
394 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
395 return rc;
396}
397
398
399/**
400 * Applies relocations to data and code managed by this
401 * component. This function will be called at init and
402 * whenever the VMM need to relocate it self inside the GC.
403 *
404 * The PATM will update the addresses used by the switcher.
405 *
406 * @param pVM The VM.
407 */
408VMMR3DECL(void) PATMR3Relocate(PVM pVM)
409{
410 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
411 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
412
413 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
414 if (delta)
415 {
416 PCPUMCTX pCtx;
417
418 /* Update CPUMCTX guest context pointer. */
419 pVM->patm.s.pCPUMCtxGC += delta;
420
421 pVM->patm.s.deltaReloc = delta;
422
423 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
424
425 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
426
427 /* If we are running patch code right now, then also adjust EIP. */
428 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
429 pCtx->eip += delta;
430
431 pVM->patm.s.pGCStateGC = GCPtrNew;
432 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
433
434 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
435
436 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
437
438 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
439
440 if (pVM->patm.s.pfnSysEnterPatchGC)
441 pVM->patm.s.pfnSysEnterPatchGC += delta;
442
443 /* Deal with the global patch functions. */
444 pVM->patm.s.pfnHelperCallGC += delta;
445 pVM->patm.s.pfnHelperRetGC += delta;
446 pVM->patm.s.pfnHelperIretGC += delta;
447 pVM->patm.s.pfnHelperJumpGC += delta;
448
449 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
450 }
451}
452
453
454/**
455 * Terminates the PATM.
456 *
457 * Termination means cleaning up and freeing all resources,
458 * the VM it self is at this point powered off or suspended.
459 *
460 * @returns VBox status code.
461 * @param pVM The VM to operate on.
462 */
463VMMR3DECL(int) PATMR3Term(PVM pVM)
464{
465 /* Memory was all allocated from the two MM heaps and requires no freeing. */
466 return VINF_SUCCESS;
467}
468
469
470/**
471 * PATM reset callback.
472 *
473 * @returns VBox status code.
474 * @param pVM The VM which is reset.
475 */
476VMMR3DECL(int) PATMR3Reset(PVM pVM)
477{
478 Log(("PATMR3Reset\n"));
479
480 /* Free all patches. */
481 while (true)
482 {
483 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
484 if (pPatchRec)
485 {
486 PATMRemovePatch(pVM, pPatchRec, true);
487 }
488 else
489 break;
490 }
491 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
492 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
493 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
494 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
495
496 int rc = patmReinit(pVM);
497 if (RT_SUCCESS(rc))
498 rc = PATMR3InitFinalize(pVM); /* paranoia */
499
500 return rc;
501}
502
503/**
504 * Read callback for disassembly function; supports reading bytes that cross a page boundary
505 *
506 * @returns VBox status code.
507 * @param pSrc GC source pointer
508 * @param pDest HC destination pointer
509 * @param size Number of bytes to read
510 * @param pvUserdata Callback specific user data (pCpu)
511 *
512 */
513int patmReadBytes(RTUINTPTR pSrc, uint8_t *pDest, unsigned size, void *pvUserdata)
514{
515 DISCPUSTATE *pCpu = (DISCPUSTATE *)pvUserdata;
516 PATMDISASM *pDisInfo = (PATMDISASM *)pCpu->apvUserData[0];
517 int orgsize = size;
518
519 Assert(size);
520 if (size == 0)
521 return VERR_INVALID_PARAMETER;
522
523 /*
524 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
525 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
526 */
527 /** @todo could change in the future! */
528 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
529 {
530 for (int i=0;i<orgsize;i++)
531 {
532 int rc = PATMR3QueryOpcode(pDisInfo->pVM, (RTRCPTR)pSrc, pDest);
533 if (RT_SUCCESS(rc))
534 {
535 pSrc++;
536 pDest++;
537 size--;
538 }
539 else break;
540 }
541 if (size == 0)
542 return VINF_SUCCESS;
543#ifdef VBOX_STRICT
544 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
545 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
546 {
547 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc, NULL) == false);
548 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc+size-1, NULL) == false);
549 }
550#endif
551 }
552
553
554 if (PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(pSrc + size - 1) && !PATMIsPatchGCAddr(pDisInfo->pVM, pSrc))
555 {
556 return PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], pDest, pSrc, size);
557 }
558 else
559 {
560 uint8_t *pInstrHC = pDisInfo->pInstrHC;
561
562 Assert(pInstrHC);
563
564 /* pInstrHC is the base address; adjust according to the GC pointer. */
565 pInstrHC = pInstrHC + (pSrc - pDisInfo->pInstrGC);
566
567 memcpy(pDest, (void *)pInstrHC, size);
568 }
569
570 return VINF_SUCCESS;
571}
572
573/**
574 * Callback function for RTAvloU32DoWithAll
575 *
576 * Updates all fixups in the patches
577 *
578 * @returns VBox status code.
579 * @param pNode Current node
580 * @param pParam The VM to operate on.
581 */
582static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
583{
584 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
585 PVM pVM = (PVM)pParam;
586 RTRCINTPTR delta;
587#ifdef LOG_ENABLED
588 DISCPUSTATE cpu;
589 char szOutput[256];
590 uint32_t opsize;
591 bool disret;
592#endif
593 int rc;
594
595 /* Nothing to do if the patch is not active. */
596 if (pPatch->patch.uState == PATCH_REFUSED)
597 return 0;
598
599#ifdef LOG_ENABLED
600 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
601 {
602 /** @note pPrivInstrHC is probably not valid anymore */
603 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatch->patch.pPrivInstrGC, (PRTR3PTR)&pPatch->patch.pPrivInstrHC);
604 if (rc == VINF_SUCCESS)
605 {
606 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
607 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, pPatch->patch.pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
608 Log(("Org patch jump: %s", szOutput));
609 }
610 }
611#endif
612
613 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
614 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
615
616 /*
617 * Apply fixups
618 */
619 PRELOCREC pRec = 0;
620 AVLPVKEY key = 0;
621
622 while (true)
623 {
624 /* Get the record that's closest from above */
625 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
626 if (pRec == 0)
627 break;
628
629 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
630
631 switch (pRec->uType)
632 {
633 case FIXUP_ABSOLUTE:
634 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
635 if (!pRec->pSource || PATMIsPatchGCAddr(pVM, pRec->pSource))
636 {
637 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
638 }
639 else
640 {
641 uint8_t curInstr[15];
642 uint8_t oldInstr[15];
643 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
644
645 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
646
647 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
648 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
649
650 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
651 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
652
653 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
654
655 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
656 {
657 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
658
659 Log(("PATM: Patch page not present -> check later!\n"));
660 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
661 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
662 }
663 else
664 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
665 {
666 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
667 /*
668 * Disable patch; this is not a good solution
669 */
670 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
671 pPatch->patch.uState = PATCH_DISABLED;
672 }
673 else
674 if (RT_SUCCESS(rc))
675 {
676 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
677 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
678 AssertRC(rc);
679 }
680 }
681 break;
682
683 case FIXUP_REL_JMPTOPATCH:
684 {
685 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
686
687 if ( pPatch->patch.uState == PATCH_ENABLED
688 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
689 {
690 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
691 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
692 RTRCPTR pJumpOffGC;
693 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
694 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
695
696 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
697
698 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
699#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
700 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
701 {
702 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
703
704 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
705 oldJump[0] = pPatch->patch.aPrivInstr[0];
706 oldJump[1] = pPatch->patch.aPrivInstr[1];
707 *(RTRCUINTPTR *)&oldJump[2] = displOld;
708 }
709 else
710#endif
711 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
712 {
713 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
714 oldJump[0] = 0xE9;
715 *(RTRCUINTPTR *)&oldJump[1] = displOld;
716 }
717 else
718 {
719 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
720 continue; //this should never happen!!
721 }
722 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
723
724 /*
725 * Read old patch jump and compare it to the one we previously installed
726 */
727 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
728 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
729
730 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
731 {
732 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
733
734 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
735 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
736 }
737 else
738 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
739 {
740 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
741 /*
742 * Disable patch; this is not a good solution
743 */
744 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
745 pPatch->patch.uState = PATCH_DISABLED;
746 }
747 else
748 if (RT_SUCCESS(rc))
749 {
750 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
751 AssertRC(rc);
752 }
753 else
754 {
755 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
756 }
757 }
758 else
759 {
760 Log(("Skip the guest jump to patch code for this disabled patch %08X - %08X\n", pPatch->patch.pPrivInstrHC, pRec->pRelocPos));
761 }
762
763 pRec->pDest = pTarget;
764 break;
765 }
766
767 case FIXUP_REL_JMPTOGUEST:
768 {
769 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
770 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
771
772 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
773 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
774 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
775 pRec->pSource = pSource;
776 break;
777 }
778
779 default:
780 AssertMsg(0, ("Invalid fixup type!!\n"));
781 return VERR_INVALID_PARAMETER;
782 }
783 }
784
785#ifdef LOG_ENABLED
786 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
787 {
788 /** @note pPrivInstrHC is probably not valid anymore */
789 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatch->patch.pPrivInstrGC, (PRTR3PTR)&pPatch->patch.pPrivInstrHC);
790 if (rc == VINF_SUCCESS)
791 {
792 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
793 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, pPatch->patch.pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
794 Log(("Rel patch jump: %s", szOutput));
795 }
796 }
797#endif
798 return 0;
799}
800
801/**
802 * \#PF Handler callback for virtual access handler ranges.
803 *
804 * Important to realize that a physical page in a range can have aliases, and
805 * for ALL and WRITE handlers these will also trigger.
806 *
807 * @returns VINF_SUCCESS if the handler have carried out the operation.
808 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
809 * @param pVM VM Handle.
810 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
811 * @param pvPtr The HC mapping of that address.
812 * @param pvBuf What the guest is reading/writing.
813 * @param cbBuf How much it's reading/writing.
814 * @param enmAccessType The access type.
815 * @param pvUser User argument.
816 */
817DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
818{
819 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
820 /** @todo could be the wrong virtual address (alias) */
821 pVM->patm.s.pvFaultMonitor = GCPtr;
822 PATMR3HandleMonitoredPage(pVM);
823 return VINF_PGM_HANDLER_DO_DEFAULT;
824}
825
826
827#ifdef VBOX_WITH_DEBUGGER
828/**
829 * Callback function for RTAvloU32DoWithAll
830 *
831 * Enables the patch that's being enumerated
832 *
833 * @returns 0 (continue enumeration).
834 * @param pNode Current node
835 * @param pVM The VM to operate on.
836 */
837static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
838{
839 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
840
841 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
842 return 0;
843}
844#endif /* VBOX_WITH_DEBUGGER */
845
846
847#ifdef VBOX_WITH_DEBUGGER
848/**
849 * Callback function for RTAvloU32DoWithAll
850 *
851 * Disables the patch that's being enumerated
852 *
853 * @returns 0 (continue enumeration).
854 * @param pNode Current node
855 * @param pVM The VM to operate on.
856 */
857static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
858{
859 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
860
861 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
862 return 0;
863}
864#endif
865
866/**
867 * Returns the host context pointer and size of the patch memory block
868 *
869 * @returns VBox status code.
870 * @param pVM The VM to operate on.
871 * @param pcb Size of the patch memory block
872 */
873VMMR3DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
874{
875 if (pcb)
876 {
877 *pcb = pVM->patm.s.cbPatchMem;
878 }
879 return pVM->patm.s.pPatchMemHC;
880}
881
882
883/**
884 * Returns the guest context pointer and size of the patch memory block
885 *
886 * @returns VBox status code.
887 * @param pVM The VM to operate on.
888 * @param pcb Size of the patch memory block
889 */
890VMMR3DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
891{
892 if (pcb)
893 {
894 *pcb = pVM->patm.s.cbPatchMem;
895 }
896 return pVM->patm.s.pPatchMemGC;
897}
898
899
900/**
901 * Returns the host context pointer of the GC context structure
902 *
903 * @returns VBox status code.
904 * @param pVM The VM to operate on.
905 */
906VMMR3DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
907{
908 return pVM->patm.s.pGCStateHC;
909}
910
911
912/**
913 * Checks whether the HC address is part of our patch region
914 *
915 * @returns VBox status code.
916 * @param pVM The VM to operate on.
917 * @param pAddrGC Guest context address
918 */
919VMMR3DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, R3PTRTYPE(uint8_t *) pAddrHC)
920{
921 return (pAddrHC >= pVM->patm.s.pPatchMemHC && pAddrHC < pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) ? true : false;
922}
923
924
925/**
926 * Allows or disallow patching of privileged instructions executed by the guest OS
927 *
928 * @returns VBox status code.
929 * @param pVM The VM to operate on.
930 * @param fAllowPatching Allow/disallow patching
931 */
932VMMR3DECL(int) PATMR3AllowPatching(PVM pVM, uint32_t fAllowPatching)
933{
934 pVM->fPATMEnabled = (fAllowPatching) ? true : false;
935 return VINF_SUCCESS;
936}
937
938/**
939 * Convert a GC patch block pointer to a HC patch pointer
940 *
941 * @returns HC pointer or NULL if it's not a GC patch pointer
942 * @param pVM The VM to operate on.
943 * @param pAddrGC GC pointer
944 */
945VMMR3DECL(R3PTRTYPE(void *)) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
946{
947 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
948 {
949 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
950 }
951 return NULL;
952}
953
954/**
955 * Query PATM state (enabled/disabled)
956 *
957 * @returns 0 - disabled, 1 - enabled
958 * @param pVM The VM to operate on.
959 */
960VMMR3DECL(int) PATMR3IsEnabled(PVM pVM)
961{
962 return pVM->fPATMEnabled;
963}
964
965
966/**
967 * Convert guest context address to host context pointer
968 *
969 * @returns VBox status code.
970 * @param pVM The VM to operate on.
971 * @param pPatch Patch block structure pointer
972 * @param pGCPtr Guest context pointer
973 *
974 * @returns Host context pointer or NULL in case of an error
975 *
976 */
977R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pGCPtr)
978{
979 int rc;
980 R3PTRTYPE(uint8_t *) pHCPtr;
981 uint32_t offset;
982
983 if (PATMIsPatchGCAddr(pVM, pGCPtr))
984 {
985 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
986 }
987
988 offset = pGCPtr & PAGE_OFFSET_MASK;
989 if (pPatch->cacheRec.pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
990 {
991 return pPatch->cacheRec.pPatchLocStartHC + offset;
992 }
993
994 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pGCPtr, (void **)&pHCPtr);
995 if (rc != VINF_SUCCESS)
996 {
997 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
998 return NULL;
999 }
1000////invalid? Assert(sizeof(R3PTRTYPE(uint8_t*)) == sizeof(uint32_t));
1001
1002 pPatch->cacheRec.pPatchLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1003 pPatch->cacheRec.pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1004 return pHCPtr;
1005}
1006
1007
1008/* Calculates and fills in all branch targets
1009 *
1010 * @returns VBox status code.
1011 * @param pVM The VM to operate on.
1012 * @param pPatch Current patch block pointer
1013 *
1014 */
1015static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1016{
1017 int32_t displ;
1018
1019 PJUMPREC pRec = 0;
1020 int nrJumpRecs = 0;
1021
1022 /*
1023 * Set all branch targets inside the patch block.
1024 * We remove all jump records as they are no longer needed afterwards.
1025 */
1026 while (true)
1027 {
1028 RCPTRTYPE(uint8_t *) pInstrGC;
1029 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1030
1031 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1032 if (pRec == 0)
1033 break;
1034
1035 nrJumpRecs++;
1036
1037 /* HC in patch block to GC in patch block. */
1038 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1039
1040 if (pRec->opcode == OP_CALL)
1041 {
1042 /* Special case: call function replacement patch from this patch block.
1043 */
1044 PPATMPATCHREC pFunctionRec = PATMQueryFunctionPatch(pVM, pRec->pTargetGC);
1045 if (!pFunctionRec)
1046 {
1047 int rc;
1048
1049 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1050 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1051 else
1052 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1053
1054 if (RT_FAILURE(rc))
1055 {
1056 uint8_t *pPatchHC;
1057 RTRCPTR pPatchGC;
1058 RTRCPTR pOrgInstrGC;
1059
1060 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1061 Assert(pOrgInstrGC);
1062
1063 /* Failure for some reason -> mark exit point with int 3. */
1064 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1065
1066 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1067 Assert(pPatchGC);
1068
1069 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1070
1071 /* Set a breakpoint at the very beginning of the recompiled instruction */
1072 *pPatchHC = 0xCC;
1073
1074 continue;
1075 }
1076 }
1077 else
1078 {
1079 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1080 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1081 }
1082
1083 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1084 }
1085 else
1086 {
1087 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1088 }
1089
1090 if (pBranchTargetGC == 0)
1091 {
1092 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1093 return VERR_PATCHING_REFUSED;
1094 }
1095 /* Our jumps *always* have a dword displacement (to make things easier). */
1096 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1097 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1098 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1099 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1100 }
1101 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1102 Assert(pPatch->JumpTree == 0);
1103 return VINF_SUCCESS;
1104}
1105
1106/* Add an illegal instruction record
1107 *
1108 * @param pVM The VM to operate on.
1109 * @param pPatch Patch structure ptr
1110 * @param pInstrGC Guest context pointer to privileged instruction
1111 *
1112 */
1113static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1114{
1115 PAVLPVNODECORE pRec;
1116
1117 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1118 Assert(pRec);
1119 pRec->Key = (AVLPVKEY)pInstrGC;
1120
1121 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1122 Assert(ret); NOREF(ret);
1123 pPatch->pTempInfo->nrIllegalInstr++;
1124}
1125
1126static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1127{
1128 PAVLPVNODECORE pRec;
1129
1130 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)pInstrGC);
1131 if (pRec)
1132 return true;
1133 return false;
1134}
1135
1136/**
1137 * Add a patch to guest lookup record
1138 *
1139 * @param pVM The VM to operate on.
1140 * @param pPatch Patch structure ptr
1141 * @param pPatchInstrHC Guest context pointer to patch block
1142 * @param pInstrGC Guest context pointer to privileged instruction
1143 * @param enmType Lookup type
1144 * @param fDirty Dirty flag
1145 *
1146 */
1147 /** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
1148void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1149{
1150 bool ret;
1151 PRECPATCHTOGUEST pPatchToGuestRec;
1152 PRECGUESTTOPATCH pGuestToPatchRec;
1153 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1154
1155 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1156 {
1157 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1158 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1159 return; /* already there */
1160
1161 Assert(!pPatchToGuestRec);
1162 }
1163#ifdef VBOX_STRICT
1164 else
1165 {
1166 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1167 Assert(!pPatchToGuestRec);
1168 }
1169#endif
1170
1171 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1172 Assert(pPatchToGuestRec);
1173 pPatchToGuestRec->Core.Key = PatchOffset;
1174 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1175 pPatchToGuestRec->enmType = enmType;
1176 pPatchToGuestRec->fDirty = fDirty;
1177
1178 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1179 Assert(ret);
1180
1181 /* GC to patch address */
1182 if (enmType == PATM_LOOKUP_BOTHDIR)
1183 {
1184 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1185 if (!pGuestToPatchRec)
1186 {
1187 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1188 pGuestToPatchRec->Core.Key = pInstrGC;
1189 pGuestToPatchRec->PatchOffset = PatchOffset;
1190
1191 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1192 Assert(ret);
1193 }
1194 }
1195
1196 pPatch->nrPatch2GuestRecs++;
1197}
1198
1199
1200/**
1201 * Removes a patch to guest lookup record
1202 *
1203 * @param pVM The VM to operate on.
1204 * @param pPatch Patch structure ptr
1205 * @param pPatchInstrGC Guest context pointer to patch block
1206 */
1207void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1208{
1209 PAVLU32NODECORE pNode;
1210 PAVLU32NODECORE pNode2;
1211 PRECPATCHTOGUEST pPatchToGuestRec;
1212 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1213
1214 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1215 Assert(pPatchToGuestRec);
1216 if (pPatchToGuestRec)
1217 {
1218 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1219 {
1220 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1221
1222 Assert(pGuestToPatchRec->Core.Key);
1223 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1224 Assert(pNode2);
1225 }
1226 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1227 Assert(pNode);
1228
1229 MMR3HeapFree(pPatchToGuestRec);
1230 pPatch->nrPatch2GuestRecs--;
1231 }
1232}
1233
1234
1235/**
1236 * RTAvlPVDestroy callback.
1237 */
1238static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1239{
1240 MMR3HeapFree(pNode);
1241 return 0;
1242}
1243
1244/**
1245 * Empty the specified tree (PV tree, MMR3 heap)
1246 *
1247 * @param pVM The VM to operate on.
1248 * @param ppTree Tree to empty
1249 */
1250void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1251{
1252 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1253}
1254
1255
1256/**
1257 * RTAvlU32Destroy callback.
1258 */
1259static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1260{
1261 MMR3HeapFree(pNode);
1262 return 0;
1263}
1264
1265/**
1266 * Empty the specified tree (U32 tree, MMR3 heap)
1267 *
1268 * @param pVM The VM to operate on.
1269 * @param ppTree Tree to empty
1270 */
1271void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1272{
1273 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1274}
1275
1276
1277/**
1278 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1279 *
1280 * @returns VBox status code.
1281 * @param pVM The VM to operate on.
1282 * @param pCpu CPU disassembly state
1283 * @param pInstrGC Guest context pointer to privileged instruction
1284 * @param pCurInstrGC Guest context pointer to the current instruction
1285 * @param pUserData User pointer (callback specific)
1286 *
1287 */
1288static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1289{
1290 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1291 bool fIllegalInstr = false;
1292
1293 //Preliminary heuristics:
1294 //- no call instructions without a fixed displacement between cli and sti/popf
1295 //- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1296 //- no nested pushf/cli
1297 //- sti/popf should be the (eventual) target of all branches
1298 //- no near or far returns; no int xx, no into
1299 //
1300 // Note: Later on we can impose less stricter guidelines if the need arises
1301
1302 /* Bail out if the patch gets too big. */
1303 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1304 {
1305 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1306 fIllegalInstr = true;
1307 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1308 }
1309 else
1310 {
1311 /* No unconditinal jumps or calls without fixed displacements. */
1312 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1313 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1314 )
1315 {
1316 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1317 if ( pCpu->param1.size == 6 /* far call/jmp */
1318 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1319 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1320 )
1321 {
1322 fIllegalInstr = true;
1323 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1324 }
1325 }
1326
1327 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1328 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->opcode == OP_JMP)
1329 {
1330 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC + pCpu->opsize < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1331 {
1332 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1333 /* We turn this one into a int 3 callable patch. */
1334 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1335 }
1336 }
1337 else
1338 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1339 if (pPatch->opcode == OP_PUSHF)
1340 {
1341 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->opcode == OP_PUSHF)
1342 {
1343 fIllegalInstr = true;
1344 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1345 }
1346 }
1347
1348 // no far returns
1349 if (pCpu->pCurInstr->opcode == OP_RETF)
1350 {
1351 pPatch->pTempInfo->nrRetInstr++;
1352 fIllegalInstr = true;
1353 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1354 }
1355 else
1356 // no int xx or into either
1357 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1358 {
1359 fIllegalInstr = true;
1360 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1361 }
1362 }
1363
1364 pPatch->cbPatchBlockSize += pCpu->opsize;
1365
1366 /* Illegal instruction -> end of analysis phase for this code block */
1367 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1368 return VINF_SUCCESS;
1369
1370 /* Check for exit points. */
1371 switch (pCpu->pCurInstr->opcode)
1372 {
1373 case OP_SYSEXIT:
1374 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1375
1376 case OP_SYSENTER:
1377 case OP_ILLUD2:
1378 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1379 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1380 return VINF_SUCCESS;
1381
1382 case OP_STI:
1383 case OP_POPF:
1384 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1385 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1386 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1387 {
1388 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1389 return VERR_PATCHING_REFUSED;
1390 }
1391 if (pPatch->opcode == OP_PUSHF)
1392 {
1393 if (pCpu->pCurInstr->opcode == OP_POPF)
1394 {
1395 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1396 return VINF_SUCCESS;
1397
1398 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1399 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1400 pPatch->flags |= PATMFL_CHECK_SIZE;
1401 }
1402 break; //sti doesn't mark the end of a pushf block; only popf does
1403 }
1404 //else no break
1405 case OP_RETN: /* exit point for function replacement */
1406 return VINF_SUCCESS;
1407
1408 case OP_IRET:
1409 return VINF_SUCCESS; /* exitpoint */
1410
1411 case OP_CPUID:
1412 case OP_CALL:
1413 case OP_JMP:
1414 break;
1415
1416 default:
1417 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1418 {
1419 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1420 return VINF_SUCCESS; /* exit point */
1421 }
1422 break;
1423 }
1424
1425 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1426 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW))
1427 {
1428 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1429 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->opsize));
1430 return VINF_SUCCESS;
1431 }
1432
1433 return VWRN_CONTINUE_ANALYSIS;
1434}
1435
1436/**
1437 * Analyses the instructions inside a function for compliance
1438 *
1439 * @returns VBox status code.
1440 * @param pVM The VM to operate on.
1441 * @param pCpu CPU disassembly state
1442 * @param pInstrGC Guest context pointer to privileged instruction
1443 * @param pCurInstrGC Guest context pointer to the current instruction
1444 * @param pUserData User pointer (callback specific)
1445 *
1446 */
1447static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1448{
1449 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1450 bool fIllegalInstr = false;
1451
1452 //Preliminary heuristics:
1453 //- no call instructions
1454 //- ret ends a block
1455
1456 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1457
1458 // bail out if the patch gets too big
1459 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1460 {
1461 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1462 fIllegalInstr = true;
1463 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1464 }
1465 else
1466 {
1467 // no unconditinal jumps or calls without fixed displacements
1468 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1469 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1470 )
1471 {
1472 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1473 if ( pCpu->param1.size == 6 /* far call/jmp */
1474 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1475 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1476 )
1477 {
1478 fIllegalInstr = true;
1479 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1480 }
1481 }
1482 else /* no far returns */
1483 if (pCpu->pCurInstr->opcode == OP_RETF)
1484 {
1485 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1486 fIllegalInstr = true;
1487 }
1488 else /* no int xx or into either */
1489 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1490 {
1491 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1492 fIllegalInstr = true;
1493 }
1494
1495 #if 0
1496 ///@todo we can handle certain in/out and privileged instructions in the guest context
1497 if (pCpu->pCurInstr->optype & OPTYPE_PRIVILEGED && pCpu->pCurInstr->opcode != OP_STI)
1498 {
1499 Log(("Illegal instructions for function patch!!\n"));
1500 return VERR_PATCHING_REFUSED;
1501 }
1502 #endif
1503 }
1504
1505 pPatch->cbPatchBlockSize += pCpu->opsize;
1506
1507 /* Illegal instruction -> end of analysis phase for this code block */
1508 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1509 {
1510 return VINF_SUCCESS;
1511 }
1512
1513 // Check for exit points
1514 switch (pCpu->pCurInstr->opcode)
1515 {
1516 case OP_ILLUD2:
1517 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1518 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1519 return VINF_SUCCESS;
1520
1521 case OP_IRET:
1522 case OP_SYSEXIT: /* will fault or emulated in GC */
1523 case OP_RETN:
1524 return VINF_SUCCESS;
1525
1526 case OP_POPF:
1527 case OP_STI:
1528 return VWRN_CONTINUE_ANALYSIS;
1529 default:
1530 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1531 {
1532 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1533 return VINF_SUCCESS; /* exit point */
1534 }
1535 return VWRN_CONTINUE_ANALYSIS;
1536 }
1537
1538 return VWRN_CONTINUE_ANALYSIS;
1539}
1540
1541/**
1542 * Recompiles the instructions in a code block
1543 *
1544 * @returns VBox status code.
1545 * @param pVM The VM to operate on.
1546 * @param pCpu CPU disassembly state
1547 * @param pInstrGC Guest context pointer to privileged instruction
1548 * @param pCurInstrGC Guest context pointer to the current instruction
1549 * @param pUserData User pointer (callback specific)
1550 *
1551 */
1552static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1553{
1554 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1555 int rc = VINF_SUCCESS;
1556 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1557
1558 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1559
1560 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1561 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1562 {
1563 /*
1564 * Been there, done that; so insert a jump (we don't want to duplicate code)
1565 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1566 */
1567 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1568 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->prefix & PREFIX_OPSIZE));
1569 }
1570
1571 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1572 {
1573 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pUserData);
1574 }
1575 else
1576 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pUserData);
1577
1578 if (RT_FAILURE(rc))
1579 return rc;
1580
1581 /** @note Never do a direct return unless a failure is encountered! */
1582
1583 /* Clear recompilation of next instruction flag; we are doing that right here. */
1584 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1585 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1586
1587 /* Add lookup record for patch to guest address translation */
1588 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1589
1590 /* Update lowest and highest instruction address for this patch */
1591 if (pCurInstrGC < pPatch->pInstrGCLowest)
1592 pPatch->pInstrGCLowest = pCurInstrGC;
1593 else
1594 if (pCurInstrGC > pPatch->pInstrGCHighest)
1595 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->opsize;
1596
1597 /* Illegal instruction -> end of recompile phase for this code block. */
1598 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1599 {
1600 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1601 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1602 goto end;
1603 }
1604
1605 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1606 * Indirect calls are handled below.
1607 */
1608 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1609 && (pCpu->pCurInstr->opcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1610 && (OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J))
1611 {
1612 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1613 if (pTargetGC == 0)
1614 {
1615 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
1616 return VERR_PATCHING_REFUSED;
1617 }
1618
1619 if (pCpu->pCurInstr->opcode == OP_CALL)
1620 {
1621 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1622 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1623 if (RT_FAILURE(rc))
1624 goto end;
1625 }
1626 else
1627 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->opcode, !!(pCpu->prefix & PREFIX_OPSIZE));
1628
1629 if (RT_SUCCESS(rc))
1630 rc = VWRN_CONTINUE_RECOMPILE;
1631
1632 goto end;
1633 }
1634
1635 switch (pCpu->pCurInstr->opcode)
1636 {
1637 case OP_CLI:
1638 {
1639 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1640 * until we've found the proper exit point(s).
1641 */
1642 if ( pCurInstrGC != pInstrGC
1643 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1644 )
1645 {
1646 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1647 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1648 }
1649 /* Set by irq inhibition; no longer valid now. */
1650 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1651
1652 rc = patmPatchGenCli(pVM, pPatch);
1653 if (RT_SUCCESS(rc))
1654 rc = VWRN_CONTINUE_RECOMPILE;
1655 break;
1656 }
1657
1658 case OP_MOV:
1659 if (pCpu->pCurInstr->optype & OPTYPE_POTENTIALLY_DANGEROUS)
1660 {
1661 /* mov ss, src? */
1662 if ( (pCpu->param1.flags & USE_REG_SEG)
1663 && (pCpu->param1.base.reg_seg == DIS_SELREG_SS))
1664 {
1665 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1666 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1667 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1668 }
1669#if 0 /* necessary for Haiku */
1670 else
1671 if ( (pCpu->param2.flags & USE_REG_SEG)
1672 && (pCpu->param2.base.reg_seg == USE_REG_SS)
1673 && (pCpu->param1.flags & (USE_REG_GEN32|USE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1674 {
1675 /* mov GPR, ss */
1676 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1677 if (RT_SUCCESS(rc))
1678 rc = VWRN_CONTINUE_RECOMPILE;
1679 break;
1680 }
1681#endif
1682 }
1683 goto duplicate_instr;
1684
1685 case OP_POP:
1686 if (pCpu->pCurInstr->param1 == OP_PARM_REG_SS)
1687 {
1688 Assert(pCpu->pCurInstr->optype & OPTYPE_INHIBIT_IRQS);
1689
1690 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1691 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1692 }
1693 goto duplicate_instr;
1694
1695 case OP_STI:
1696 {
1697 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1698
1699 /* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1700 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1701 {
1702 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1703 fInhibitIRQInstr = true;
1704 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1705 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1706 }
1707 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1708
1709 if (RT_SUCCESS(rc))
1710 {
1711 DISCPUSTATE cpu = *pCpu;
1712 unsigned opsize;
1713 int disret;
1714 RCPTRTYPE(uint8_t *) pReturnInstrGC;
1715 R3PTRTYPE(uint8_t *) pNextInstrHC;
1716
1717 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1718
1719 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1720 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pNextInstrGC);
1721 if (pNextInstrHC == NULL)
1722 {
1723 AssertFailed();
1724 return VERR_PATCHING_REFUSED;
1725 }
1726
1727 // Disassemble the next instruction
1728 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pNextInstrGC, pNextInstrHC, &opsize, NULL);
1729 if (disret == false)
1730 {
1731 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1732 return VERR_PATCHING_REFUSED;
1733 }
1734 pReturnInstrGC = pNextInstrGC + opsize;
1735
1736 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1737 || pReturnInstrGC <= pInstrGC
1738 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1739 )
1740 {
1741 /* Not an exit point for function duplication patches */
1742 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1743 && RT_SUCCESS(rc))
1744 {
1745 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1746 rc = VWRN_CONTINUE_RECOMPILE;
1747 }
1748 else
1749 rc = VINF_SUCCESS; //exit point
1750 }
1751 else {
1752 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1753 rc = VERR_PATCHING_REFUSED; //not allowed!!
1754 }
1755 }
1756 break;
1757 }
1758
1759 case OP_POPF:
1760 {
1761 bool fGenerateJmpBack = (pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32);
1762
1763 /* Not an exit point for IDT handler or function replacement patches */
1764 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1765 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1766 fGenerateJmpBack = false;
1767
1768 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->opsize, !!(pCpu->prefix & PREFIX_OPSIZE), fGenerateJmpBack);
1769 if (RT_SUCCESS(rc))
1770 {
1771 if (fGenerateJmpBack == false)
1772 {
1773 /* Not an exit point for IDT handler or function replacement patches */
1774 rc = VWRN_CONTINUE_RECOMPILE;
1775 }
1776 else
1777 {
1778 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1779 rc = VINF_SUCCESS; /* exit point! */
1780 }
1781 }
1782 break;
1783 }
1784
1785 case OP_PUSHF:
1786 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->prefix & PREFIX_OPSIZE));
1787 if (RT_SUCCESS(rc))
1788 rc = VWRN_CONTINUE_RECOMPILE;
1789 break;
1790
1791 case OP_PUSH:
1792 if (pCpu->pCurInstr->param1 == OP_PARM_REG_CS)
1793 {
1794 rc = patmPatchGenPushCS(pVM, pPatch);
1795 if (RT_SUCCESS(rc))
1796 rc = VWRN_CONTINUE_RECOMPILE;
1797 break;
1798 }
1799 goto duplicate_instr;
1800
1801 case OP_IRET:
1802 Log(("IRET at %RRv\n", pCurInstrGC));
1803 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->prefix & PREFIX_OPSIZE));
1804 if (RT_SUCCESS(rc))
1805 {
1806 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1807 rc = VINF_SUCCESS; /* exit point by definition */
1808 }
1809 break;
1810
1811 case OP_ILLUD2:
1812 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1813 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1814 if (RT_SUCCESS(rc))
1815 rc = VINF_SUCCESS; /* exit point by definition */
1816 Log(("Illegal opcode (0xf 0xb)\n"));
1817 break;
1818
1819 case OP_CPUID:
1820 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1821 if (RT_SUCCESS(rc))
1822 rc = VWRN_CONTINUE_RECOMPILE;
1823 break;
1824
1825 case OP_STR:
1826 case OP_SLDT:
1827 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1828 if (RT_SUCCESS(rc))
1829 rc = VWRN_CONTINUE_RECOMPILE;
1830 break;
1831
1832 case OP_SGDT:
1833 case OP_SIDT:
1834 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1835 if (RT_SUCCESS(rc))
1836 rc = VWRN_CONTINUE_RECOMPILE;
1837 break;
1838
1839 case OP_RETN:
1840 /* retn is an exit point for function patches */
1841 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
1842 if (RT_SUCCESS(rc))
1843 rc = VINF_SUCCESS; /* exit point by definition */
1844 break;
1845
1846 case OP_SYSEXIT:
1847 /* Duplicate it, so it can be emulated in GC (or fault). */
1848 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1849 if (RT_SUCCESS(rc))
1850 rc = VINF_SUCCESS; /* exit point by definition */
1851 break;
1852
1853 case OP_CALL:
1854 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1855 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1856 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1857 */
1858 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1859 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far calls! */)
1860 {
1861 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
1862 if (RT_SUCCESS(rc))
1863 {
1864 rc = VWRN_CONTINUE_RECOMPILE;
1865 }
1866 break;
1867 }
1868 goto gen_illegal_instr;
1869
1870 case OP_JMP:
1871 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1872 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1873 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1874 */
1875 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1876 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far jumps! */)
1877 {
1878 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
1879 if (RT_SUCCESS(rc))
1880 rc = VINF_SUCCESS; /* end of branch */
1881 break;
1882 }
1883 goto gen_illegal_instr;
1884
1885 case OP_INT3:
1886 case OP_INT:
1887 case OP_INTO:
1888 goto gen_illegal_instr;
1889
1890 case OP_MOV_DR:
1891 /** @note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1892 if (pCpu->pCurInstr->param2 == OP_PARM_Dd)
1893 {
1894 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
1895 if (RT_SUCCESS(rc))
1896 rc = VWRN_CONTINUE_RECOMPILE;
1897 break;
1898 }
1899 goto duplicate_instr;
1900
1901 case OP_MOV_CR:
1902 /** @note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1903 if (pCpu->pCurInstr->param2 == OP_PARM_Cd)
1904 {
1905 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
1906 if (RT_SUCCESS(rc))
1907 rc = VWRN_CONTINUE_RECOMPILE;
1908 break;
1909 }
1910 goto duplicate_instr;
1911
1912 default:
1913 if (pCpu->pCurInstr->optype & (OPTYPE_CONTROLFLOW | OPTYPE_PRIVILEGED_NOTRAP))
1914 {
1915gen_illegal_instr:
1916 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1917 if (RT_SUCCESS(rc))
1918 rc = VINF_SUCCESS; /* exit point by definition */
1919 }
1920 else
1921 {
1922duplicate_instr:
1923 Log(("patmPatchGenDuplicate\n"));
1924 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1925 if (RT_SUCCESS(rc))
1926 rc = VWRN_CONTINUE_RECOMPILE;
1927 }
1928 break;
1929 }
1930
1931end:
1932
1933 if ( !fInhibitIRQInstr
1934 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
1935 {
1936 int rc2;
1937 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1938
1939 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
1940 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
1941 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
1942 {
1943 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
1944
1945 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
1946 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1947 rc = VINF_SUCCESS; /* end of the line */
1948 }
1949 else
1950 {
1951 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
1952 }
1953 if (RT_FAILURE(rc2))
1954 rc = rc2;
1955 }
1956
1957 if (RT_SUCCESS(rc))
1958 {
1959 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1960 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
1961 && pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32
1962 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
1963 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
1964 )
1965 {
1966 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1967
1968 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1969 Log(("patmRecompileCallback: end found for single instruction patch at %RRv opsize %d\n", pNextInstrGC, pCpu->opsize));
1970
1971 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
1972 AssertRC(rc);
1973 }
1974 }
1975 return rc;
1976}
1977
1978
1979#ifdef LOG_ENABLED
1980
1981/* Add a disasm jump record (temporary for prevent duplicate analysis)
1982 *
1983 * @param pVM The VM to operate on.
1984 * @param pPatch Patch structure ptr
1985 * @param pInstrGC Guest context pointer to privileged instruction
1986 *
1987 */
1988static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1989{
1990 PAVLPVNODECORE pRec;
1991
1992 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1993 Assert(pRec);
1994 pRec->Key = (AVLPVKEY)pInstrGC;
1995
1996 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
1997 Assert(ret);
1998}
1999
2000/**
2001 * Checks if jump target has been analysed before.
2002 *
2003 * @returns VBox status code.
2004 * @param pPatch Patch struct
2005 * @param pInstrGC Jump target
2006 *
2007 */
2008static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2009{
2010 PAVLPVNODECORE pRec;
2011
2012 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)pInstrGC);
2013 if (pRec)
2014 return true;
2015 return false;
2016}
2017
2018/**
2019 * For proper disassembly of the final patch block
2020 *
2021 * @returns VBox status code.
2022 * @param pVM The VM to operate on.
2023 * @param pCpu CPU disassembly state
2024 * @param pInstrGC Guest context pointer to privileged instruction
2025 * @param pCurInstrGC Guest context pointer to the current instruction
2026 * @param pUserData User pointer (callback specific)
2027 *
2028 */
2029int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
2030{
2031 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2032
2033 if (pCpu->pCurInstr->opcode == OP_INT3)
2034 {
2035 /* Could be an int3 inserted in a call patch. Check to be sure */
2036 DISCPUSTATE cpu;
2037 uint8_t *pOrgJumpHC;
2038 RTRCPTR pOrgJumpGC;
2039 uint32_t dummy;
2040
2041 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2042 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2043 pOrgJumpHC = PATMGCVirtToHCVirt(pVM, pPatch, pOrgJumpGC);
2044
2045 bool disret = PATMR3DISInstr(pVM, pPatch, &cpu, pOrgJumpGC, pOrgJumpHC, &dummy, NULL);
2046 if (!disret || cpu.pCurInstr->opcode != OP_CALL || cpu.param1.size != 4 /* only near calls */)
2047 return VINF_SUCCESS;
2048
2049 return VWRN_CONTINUE_ANALYSIS;
2050 }
2051
2052 if ( pCpu->pCurInstr->opcode == OP_ILLUD2
2053 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2054 {
2055 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2056 return VWRN_CONTINUE_ANALYSIS;
2057 }
2058
2059 if ( (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2060 || pCpu->pCurInstr->opcode == OP_INT
2061 || pCpu->pCurInstr->opcode == OP_IRET
2062 || pCpu->pCurInstr->opcode == OP_RETN
2063 || pCpu->pCurInstr->opcode == OP_RETF
2064 )
2065 {
2066 return VINF_SUCCESS;
2067 }
2068
2069 if (pCpu->pCurInstr->opcode == OP_ILLUD2)
2070 return VINF_SUCCESS;
2071
2072 return VWRN_CONTINUE_ANALYSIS;
2073}
2074
2075
2076/**
2077 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2078 *
2079 * @returns VBox status code.
2080 * @param pVM The VM to operate on.
2081 * @param pInstrGC Guest context pointer to the initial privileged instruction
2082 * @param pCurInstrGC Guest context pointer to the current instruction
2083 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2084 * @param pUserData User pointer (callback specific)
2085 *
2086 */
2087int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, void *pUserData)
2088{
2089 DISCPUSTATE cpu;
2090 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2091 int rc = VWRN_CONTINUE_ANALYSIS;
2092 uint32_t opsize, delta;
2093 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2094 bool disret;
2095 char szOutput[256];
2096
2097 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2098
2099 /* We need this to determine branch targets (and for disassembling). */
2100 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2101
2102 while(rc == VWRN_CONTINUE_ANALYSIS)
2103 {
2104 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2105
2106 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2107 if (pCurInstrHC == NULL)
2108 {
2109 rc = VERR_PATCHING_REFUSED;
2110 goto end;
2111 }
2112
2113 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2114 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2115 {
2116 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2117
2118 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2119 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2120 else
2121 Log(("DIS %s", szOutput));
2122
2123 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2124 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2125 {
2126 rc = VINF_SUCCESS;
2127 goto end;
2128 }
2129 }
2130 else
2131 Log(("DIS: %s", szOutput));
2132
2133 if (disret == false)
2134 {
2135 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2136 rc = VINF_SUCCESS;
2137 goto end;
2138 }
2139
2140 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pUserData);
2141 if (rc != VWRN_CONTINUE_ANALYSIS) {
2142 break; //done!
2143 }
2144
2145 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2146 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2147 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2148 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2149 )
2150 {
2151 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2152 RTRCPTR pOrgTargetGC;
2153
2154 if (pTargetGC == 0)
2155 {
2156 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2157 rc = VERR_PATCHING_REFUSED;
2158 break;
2159 }
2160
2161 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2162 {
2163 //jump back to guest code
2164 rc = VINF_SUCCESS;
2165 goto end;
2166 }
2167 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2168
2169 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2170 {
2171 rc = VINF_SUCCESS;
2172 goto end;
2173 }
2174
2175 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2176 {
2177 /* New jump, let's check it. */
2178 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2179
2180 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2181 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pUserData);
2182 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2183
2184 if (rc != VINF_SUCCESS) {
2185 break; //done!
2186 }
2187 }
2188 if (cpu.pCurInstr->opcode == OP_JMP)
2189 {
2190 /* Unconditional jump; return to caller. */
2191 rc = VINF_SUCCESS;
2192 goto end;
2193 }
2194
2195 rc = VWRN_CONTINUE_ANALYSIS;
2196 }
2197 pCurInstrGC += opsize;
2198 }
2199end:
2200 return rc;
2201}
2202
2203/**
2204 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2205 *
2206 * @returns VBox status code.
2207 * @param pVM The VM to operate on.
2208 * @param pInstrGC Guest context pointer to the initial privileged instruction
2209 * @param pCurInstrGC Guest context pointer to the current instruction
2210 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2211 * @param pUserData User pointer (callback specific)
2212 *
2213 */
2214int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, void *pUserData)
2215{
2216 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2217
2218 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pUserData);
2219 /* Free all disasm jump records. */
2220 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2221 return rc;
2222}
2223
2224#endif /* LOG_ENABLED */
2225
2226/**
2227 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2228 * If so, this patch is permanently disabled.
2229 *
2230 * @param pVM The VM to operate on.
2231 * @param pInstrGC Guest context pointer to instruction
2232 * @param pConflictGC Guest context pointer to check
2233 *
2234 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2235 *
2236 */
2237VMMR3DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2238{
2239 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2240 if (pTargetPatch)
2241 {
2242 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2243 }
2244 return VERR_PATCH_NO_CONFLICT;
2245}
2246
2247/**
2248 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2249 *
2250 * @returns VBox status code.
2251 * @param pVM The VM to operate on.
2252 * @param pInstrGC Guest context pointer to privileged instruction
2253 * @param pCurInstrGC Guest context pointer to the current instruction
2254 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2255 * @param pUserData User pointer (callback specific)
2256 *
2257 */
2258static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, void *pUserData)
2259{
2260 DISCPUSTATE cpu;
2261 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2262 int rc = VWRN_CONTINUE_ANALYSIS;
2263 uint32_t opsize;
2264 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2265 bool disret;
2266#ifdef LOG_ENABLED
2267 char szOutput[256];
2268#endif
2269
2270 while (rc == VWRN_CONTINUE_RECOMPILE)
2271 {
2272 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2273
2274 ////Log(("patmRecompileCodeStream %RRv %RRv\n", pInstrGC, pCurInstrGC));
2275
2276 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2277 if (pCurInstrHC == NULL)
2278 {
2279 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2280 goto end;
2281 }
2282#ifdef LOG_ENABLED
2283 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput);
2284 Log(("Recompile: %s", szOutput));
2285#else
2286 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2287#endif
2288 if (disret == false)
2289 {
2290 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2291
2292 /* Add lookup record for patch to guest address translation */
2293 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2294 patmPatchGenIllegalInstr(pVM, pPatch);
2295 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2296 goto end;
2297 }
2298
2299 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pUserData);
2300 if (rc != VWRN_CONTINUE_RECOMPILE)
2301 {
2302 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2303 if ( rc == VINF_SUCCESS
2304 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2305 {
2306 DISCPUSTATE cpunext;
2307 uint32_t opsizenext;
2308 uint8_t *pNextInstrHC;
2309 RTRCPTR pNextInstrGC = pCurInstrGC + opsize;
2310
2311 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2312
2313 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2314 * Recompile the next instruction as well
2315 */
2316 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pNextInstrGC);
2317 if (pNextInstrHC == NULL)
2318 {
2319 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2320 goto end;
2321 }
2322 cpunext.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2323 disret = PATMR3DISInstr(pVM, pPatch, &cpunext, pNextInstrGC, pNextInstrHC, &opsizenext, NULL);
2324 if (disret == false)
2325 {
2326 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2327 goto end;
2328 }
2329 switch(cpunext.pCurInstr->opcode)
2330 {
2331 case OP_IRET: /* inhibit cleared in generated code */
2332 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2333 case OP_HLT:
2334 break; /* recompile these */
2335
2336 default:
2337 if (cpunext.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2338 {
2339 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2340
2341 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2342 AssertRC(rc);
2343 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2344 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2345 }
2346 break;
2347 }
2348
2349 /** @note after a cli we must continue to a proper exit point */
2350 if (cpunext.pCurInstr->opcode != OP_CLI)
2351 {
2352 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pUserData);
2353 if (RT_SUCCESS(rc))
2354 {
2355 rc = VINF_SUCCESS;
2356 goto end;
2357 }
2358 break;
2359 }
2360 else
2361 rc = VWRN_CONTINUE_RECOMPILE;
2362 }
2363 else
2364 break; /* done! */
2365 }
2366
2367 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2368
2369
2370 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2371 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2372 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2373 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2374 )
2375 {
2376 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2377 if (addr == 0)
2378 {
2379 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2380 rc = VERR_PATCHING_REFUSED;
2381 break;
2382 }
2383
2384 Log(("Jump encountered target %RRv\n", addr));
2385
2386 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2387 if (!(cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW))
2388 {
2389 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2390 /* First we need to finish this linear code stream until the next exit point. */
2391 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+opsize, pfnPATMR3Recompile, pUserData);
2392 if (RT_FAILURE(rc))
2393 {
2394 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2395 break; //fatal error
2396 }
2397 }
2398
2399 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2400 {
2401 /* New code; let's recompile it. */
2402 Log(("patmRecompileCodeStream continue with jump\n"));
2403
2404 /*
2405 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2406 * this patch so we can continue our analysis
2407 *
2408 * We rely on CSAM to detect and resolve conflicts
2409 */
2410 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, addr);
2411 if(pTargetPatch)
2412 {
2413 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2414 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2415 }
2416
2417 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2418 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pUserData);
2419 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2420
2421 if(pTargetPatch)
2422 {
2423 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2424 }
2425
2426 if (RT_FAILURE(rc))
2427 {
2428 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2429 break; //done!
2430 }
2431 }
2432 /* Always return to caller here; we're done! */
2433 rc = VINF_SUCCESS;
2434 goto end;
2435 }
2436 else
2437 if (cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW)
2438 {
2439 rc = VINF_SUCCESS;
2440 goto end;
2441 }
2442 pCurInstrGC += opsize;
2443 }
2444end:
2445 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2446 return rc;
2447}
2448
2449
2450/**
2451 * Generate the jump from guest to patch code
2452 *
2453 * @returns VBox status code.
2454 * @param pVM The VM to operate on.
2455 * @param pPatch Patch record
2456 */
2457static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, bool fAddFixup = true)
2458{
2459 uint8_t temp[8];
2460 uint8_t *pPB;
2461 int rc;
2462
2463 Assert(pPatch->cbPatchJump <= sizeof(temp));
2464 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2465
2466 pPB = pPatch->pPrivInstrHC;
2467
2468#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2469 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2470 {
2471 Assert(pPatch->pPatchJumpDestGC);
2472
2473 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2474 {
2475 // jmp [PatchCode]
2476 if (fAddFixup)
2477 {
2478 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2479 {
2480 Log(("Relocation failed for the jump in the guest code!!\n"));
2481 return VERR_PATCHING_REFUSED;
2482 }
2483 }
2484
2485 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2486 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2487 }
2488 else
2489 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2490 {
2491 // jmp [PatchCode]
2492 if (fAddFixup)
2493 {
2494 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2495 {
2496 Log(("Relocation failed for the jump in the guest code!!\n"));
2497 return VERR_PATCHING_REFUSED;
2498 }
2499 }
2500
2501 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2502 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2503 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2504 }
2505 else
2506 {
2507 Assert(0);
2508 return VERR_PATCHING_REFUSED;
2509 }
2510 }
2511 else
2512#endif
2513 {
2514 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2515
2516 // jmp [PatchCode]
2517 if (fAddFixup)
2518 {
2519 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2520 {
2521 Log(("Relocation failed for the jump in the guest code!!\n"));
2522 return VERR_PATCHING_REFUSED;
2523 }
2524 }
2525 temp[0] = 0xE9; //jmp
2526 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2527 }
2528 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2529 AssertRC(rc);
2530
2531 if (rc == VINF_SUCCESS)
2532 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2533
2534 return rc;
2535}
2536
2537/**
2538 * Remove the jump from guest to patch code
2539 *
2540 * @returns VBox status code.
2541 * @param pVM The VM to operate on.
2542 * @param pPatch Patch record
2543 */
2544static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2545{
2546#ifdef DEBUG
2547 DISCPUSTATE cpu;
2548 char szOutput[256];
2549 uint32_t opsize, i = 0;
2550 bool disret;
2551
2552 while(i < pPatch->cbPrivInstr)
2553 {
2554 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2555 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
2556 if (disret == false)
2557 break;
2558
2559 Log(("Org patch jump: %s", szOutput));
2560 Assert(opsize);
2561 i += opsize;
2562 }
2563#endif
2564
2565 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2566 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2567#ifdef DEBUG
2568 if (rc == VINF_SUCCESS)
2569 {
2570 i = 0;
2571 while(i < pPatch->cbPrivInstr)
2572 {
2573 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2574 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
2575 if (disret == false)
2576 break;
2577
2578 Log(("Org instr: %s", szOutput));
2579 Assert(opsize);
2580 i += opsize;
2581 }
2582 }
2583#endif
2584 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2585 return rc;
2586}
2587
2588/**
2589 * Generate the call from guest to patch code
2590 *
2591 * @returns VBox status code.
2592 * @param pVM The VM to operate on.
2593 * @param pPatch Patch record
2594 */
2595static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, bool fAddFixup = true)
2596{
2597 uint8_t temp[8];
2598 uint8_t *pPB;
2599 int rc;
2600
2601 Assert(pPatch->cbPatchJump <= sizeof(temp));
2602
2603 pPB = pPatch->pPrivInstrHC;
2604
2605 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2606
2607 // jmp [PatchCode]
2608 if (fAddFixup)
2609 {
2610 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2611 {
2612 Log(("Relocation failed for the jump in the guest code!!\n"));
2613 return VERR_PATCHING_REFUSED;
2614 }
2615 }
2616
2617 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2618 temp[0] = pPatch->aPrivInstr[0];
2619 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2620
2621 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2622 AssertRC(rc);
2623
2624 return rc;
2625}
2626
2627
2628/**
2629 * Patch cli/sti pushf/popf instruction block at specified location
2630 *
2631 * @returns VBox status code.
2632 * @param pVM The VM to operate on.
2633 * @param pInstrGC Guest context point to privileged instruction
2634 * @param pInstrHC Host context point to privileged instruction
2635 * @param uOpcode Instruction opcode
2636 * @param uOpSize Size of starting instruction
2637 * @param pPatchRec Patch record
2638 *
2639 * @note returns failure if patching is not allowed or possible
2640 *
2641 */
2642VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2643 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2644{
2645 PPATCHINFO pPatch = &pPatchRec->patch;
2646 int rc = VERR_PATCHING_REFUSED;
2647 DISCPUSTATE cpu;
2648 uint32_t orgOffsetPatchMem = ~0;
2649 RTRCPTR pInstrStart;
2650#ifdef LOG_ENABLED
2651 uint32_t opsize;
2652 char szOutput[256];
2653 bool disret;
2654#endif
2655
2656 /* Save original offset (in case of failures later on) */
2657 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2658 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2659
2660 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2661 switch (uOpcode)
2662 {
2663 case OP_MOV:
2664 break;
2665
2666 case OP_CLI:
2667 case OP_PUSHF:
2668 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2669 /** @note special precautions are taken when disabling and enabling such patches. */
2670 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2671 break;
2672
2673 default:
2674 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2675 {
2676 AssertMsg(0, ("PATMR3PatchBlock: Invalid opcode %x\n", uOpcode));
2677 return VERR_INVALID_PARAMETER;
2678 }
2679 }
2680
2681 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2682 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2683
2684 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2685 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2686 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2687 )
2688 {
2689 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2690#ifdef DEBUG_sandervl
2691//// AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
2692#endif
2693 rc = VERR_PATCHING_REFUSED;
2694 goto failure;
2695 }
2696
2697 pPatch->nrPatch2GuestRecs = 0;
2698 pInstrStart = pInstrGC;
2699
2700#ifdef PATM_ENABLE_CALL
2701 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2702#endif
2703
2704 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2705 pPatch->uCurPatchOffset = 0;
2706
2707 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2708
2709 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2710 {
2711 Assert(pPatch->flags & PATMFL_INTHANDLER);
2712
2713 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2714 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2715 if (RT_FAILURE(rc))
2716 goto failure;
2717 }
2718
2719 /***************************************************************************************************************************/
2720 /** @note We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2721 /***************************************************************************************************************************/
2722#ifdef VBOX_WITH_STATISTICS
2723 if (!(pPatch->flags & PATMFL_SYSENTER))
2724 {
2725 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2726 if (RT_FAILURE(rc))
2727 goto failure;
2728 }
2729#endif
2730
2731 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pPatch);
2732 if (rc != VINF_SUCCESS)
2733 {
2734 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2735 goto failure;
2736 }
2737
2738 /* Calculated during analysis. */
2739 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2740 {
2741 /* Most likely cause: we encountered an illegal instruction very early on. */
2742 /** @todo could turn it into an int3 callable patch. */
2743 Log(("PATMR3PatchBlock: patch block too small -> refuse\n"));
2744 rc = VERR_PATCHING_REFUSED;
2745 goto failure;
2746 }
2747
2748 /* size of patch block */
2749 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2750
2751
2752 /* Update free pointer in patch memory. */
2753 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2754 /* Round to next 8 byte boundary. */
2755 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2756
2757 /*
2758 * Insert into patch to guest lookup tree
2759 */
2760 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2761 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2762 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2763 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2764 if (!rc)
2765 {
2766 rc = VERR_PATCHING_REFUSED;
2767 goto failure;
2768 }
2769
2770 /* Note that patmr3SetBranchTargets can install additional patches!! */
2771 rc = patmr3SetBranchTargets(pVM, pPatch);
2772 if (rc != VINF_SUCCESS)
2773 {
2774 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2775 goto failure;
2776 }
2777
2778#ifdef LOG_ENABLED
2779 Log(("Patch code ----------------------------------------------------------\n"));
2780 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
2781 Log(("Patch code ends -----------------------------------------------------\n"));
2782#endif
2783
2784 /* make a copy of the guest code bytes that will be overwritten */
2785 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2786
2787 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2788 AssertRC(rc);
2789
2790 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2791 {
2792 /*uint8_t ASMInt3 = 0xCC; - unused */
2793
2794 Log(("PATMR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2795 /* Replace first opcode byte with 'int 3'. */
2796 rc = patmActivateInt3Patch(pVM, pPatch);
2797 if (RT_FAILURE(rc))
2798 goto failure;
2799
2800 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2801 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2802
2803 pPatch->flags &= ~PATMFL_INSTR_HINT;
2804 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2805 }
2806 else
2807 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2808 {
2809 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2810 /* now insert a jump in the guest code */
2811 rc = patmGenJumpToPatch(pVM, pPatch, true);
2812 AssertRC(rc);
2813 if (RT_FAILURE(rc))
2814 goto failure;
2815
2816 }
2817
2818#ifdef LOG_ENABLED
2819 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2820 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2821 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
2822#endif
2823
2824 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2825 pPatch->pTempInfo->nrIllegalInstr = 0;
2826
2827 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2828
2829 pPatch->uState = PATCH_ENABLED;
2830 return VINF_SUCCESS;
2831
2832failure:
2833 if (pPatchRec->CoreOffset.Key)
2834 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
2835
2836 patmEmptyTree(pVM, &pPatch->FixupTree);
2837 pPatch->nrFixups = 0;
2838
2839 patmEmptyTree(pVM, &pPatch->JumpTree);
2840 pPatch->nrJumpRecs = 0;
2841
2842 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2843 pPatch->pTempInfo->nrIllegalInstr = 0;
2844
2845 /* Turn this cli patch into a dummy. */
2846 pPatch->uState = PATCH_REFUSED;
2847 pPatch->pPatchBlockOffset = 0;
2848
2849 // Give back the patch memory we no longer need
2850 Assert(orgOffsetPatchMem != (uint32_t)~0);
2851 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2852
2853 return rc;
2854}
2855
2856/**
2857 * Patch IDT handler
2858 *
2859 * @returns VBox status code.
2860 * @param pVM The VM to operate on.
2861 * @param pInstrGC Guest context point to privileged instruction
2862 * @param pInstrHC Host context point to privileged instruction
2863 * @param uOpSize Size of starting instruction
2864 * @param pPatchRec Patch record
2865 *
2866 * @note returns failure if patching is not allowed or possible
2867 *
2868 */
2869static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2870 uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2871{
2872 PPATCHINFO pPatch = &pPatchRec->patch;
2873 bool disret;
2874 DISCPUSTATE cpuPush, cpuJmp;
2875 uint32_t opsize;
2876 RTRCPTR pCurInstrGC = pInstrGC;
2877 uint8_t *pCurInstrHC = pInstrHC;
2878 uint32_t orgOffsetPatchMem = ~0;
2879
2880 /*
2881 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
2882 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
2883 * condition here and only patch the common entypoint once.
2884 */
2885 cpuPush.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2886 disret = PATMR3DISInstr(pVM, pPatch, &cpuPush, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2887 Assert(disret);
2888 if (disret && cpuPush.pCurInstr->opcode == OP_PUSH)
2889 {
2890 RTRCPTR pJmpInstrGC;
2891 int rc;
2892
2893 pCurInstrGC += opsize;
2894 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2895
2896 cpuJmp.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2897 disret = PATMR3DISInstr(pVM, pPatch, &cpuJmp, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2898 if ( disret
2899 && cpuJmp.pCurInstr->opcode == OP_JMP
2900 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
2901 )
2902 {
2903 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2904 if (pJmpPatch == 0)
2905 {
2906 /* Patch it first! */
2907 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
2908 if (rc != VINF_SUCCESS)
2909 goto failure;
2910 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2911 Assert(pJmpPatch);
2912 }
2913 if (pJmpPatch->patch.uState != PATCH_ENABLED)
2914 goto failure;
2915
2916 /* save original offset (in case of failures later on) */
2917 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2918
2919 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2920 pPatch->uCurPatchOffset = 0;
2921 pPatch->nrPatch2GuestRecs = 0;
2922
2923#ifdef VBOX_WITH_STATISTICS
2924 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2925 if (RT_FAILURE(rc))
2926 goto failure;
2927#endif
2928
2929 /* Install fake cli patch (to clear the virtual IF) */
2930 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2931 if (RT_FAILURE(rc))
2932 goto failure;
2933
2934 /* Add lookup record for patch to guest address translation (for the push) */
2935 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
2936
2937 /* Duplicate push. */
2938 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
2939 if (RT_FAILURE(rc))
2940 goto failure;
2941
2942 /* Generate jump to common entrypoint. */
2943 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
2944 if (RT_FAILURE(rc))
2945 goto failure;
2946
2947 /* size of patch block */
2948 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2949
2950 /* Update free pointer in patch memory. */
2951 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2952 /* Round to next 8 byte boundary */
2953 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2954
2955 /* There's no jump from guest to patch code. */
2956 pPatch->cbPatchJump = 0;
2957
2958
2959#ifdef LOG_ENABLED
2960 Log(("Patch code ----------------------------------------------------------\n"));
2961 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
2962 Log(("Patch code ends -----------------------------------------------------\n"));
2963#endif
2964 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
2965
2966 /*
2967 * Insert into patch to guest lookup tree
2968 */
2969 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2970 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2971 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2972 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2973
2974 pPatch->uState = PATCH_ENABLED;
2975
2976 return VINF_SUCCESS;
2977 }
2978 }
2979failure:
2980 /* Give back the patch memory we no longer need */
2981 if (orgOffsetPatchMem != (uint32_t)~0)
2982 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2983
2984 return PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
2985}
2986
2987/**
2988 * Install a trampoline to call a guest trap handler directly
2989 *
2990 * @returns VBox status code.
2991 * @param pVM The VM to operate on.
2992 * @param pInstrGC Guest context point to privileged instruction
2993 * @param pPatchRec Patch record
2994 *
2995 */
2996static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
2997{
2998 PPATCHINFO pPatch = &pPatchRec->patch;
2999 int rc = VERR_PATCHING_REFUSED;
3000 uint32_t orgOffsetPatchMem = ~0;
3001#ifdef LOG_ENABLED
3002 bool disret;
3003 DISCPUSTATE cpu;
3004 uint32_t opsize;
3005 char szOutput[256];
3006#endif
3007
3008 // save original offset (in case of failures later on)
3009 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3010
3011 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3012 pPatch->uCurPatchOffset = 0;
3013 pPatch->nrPatch2GuestRecs = 0;
3014
3015#ifdef VBOX_WITH_STATISTICS
3016 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3017 if (RT_FAILURE(rc))
3018 goto failure;
3019#endif
3020
3021 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3022 if (RT_FAILURE(rc))
3023 goto failure;
3024
3025 /* size of patch block */
3026 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3027
3028 /* Update free pointer in patch memory. */
3029 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3030 /* Round to next 8 byte boundary */
3031 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3032
3033 /* There's no jump from guest to patch code. */
3034 pPatch->cbPatchJump = 0;
3035
3036#ifdef LOG_ENABLED
3037 Log(("Patch code ----------------------------------------------------------\n"));
3038 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
3039 Log(("Patch code ends -----------------------------------------------------\n"));
3040#endif
3041
3042#ifdef LOG_ENABLED
3043 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3044 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3045 Log(("TRAP handler patch: %s", szOutput));
3046#endif
3047 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3048
3049 /*
3050 * Insert into patch to guest lookup tree
3051 */
3052 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3053 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3054 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3055 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3056
3057 pPatch->uState = PATCH_ENABLED;
3058 return VINF_SUCCESS;
3059
3060failure:
3061 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3062
3063 /* Turn this cli patch into a dummy. */
3064 pPatch->uState = PATCH_REFUSED;
3065 pPatch->pPatchBlockOffset = 0;
3066
3067 /* Give back the patch memory we no longer need */
3068 Assert(orgOffsetPatchMem != (uint32_t)~0);
3069 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3070
3071 return rc;
3072}
3073
3074
3075#ifdef LOG_ENABLED
3076/**
3077 * Check if the instruction is patched as a common idt handler
3078 *
3079 * @returns true or false
3080 * @param pVM The VM to operate on.
3081 * @param pInstrGC Guest context point to the instruction
3082 *
3083 */
3084static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3085{
3086 PPATMPATCHREC pRec;
3087
3088 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3089 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3090 return true;
3091 return false;
3092}
3093#endif //DEBUG
3094
3095
3096/**
3097 * Duplicates a complete function
3098 *
3099 * @returns VBox status code.
3100 * @param pVM The VM to operate on.
3101 * @param pInstrGC Guest context point to privileged instruction
3102 * @param pPatchRec Patch record
3103 *
3104 */
3105static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3106{
3107 PPATCHINFO pPatch = &pPatchRec->patch;
3108 int rc = VERR_PATCHING_REFUSED;
3109 DISCPUSTATE cpu;
3110 uint32_t orgOffsetPatchMem = ~0;
3111
3112 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3113 /* Save original offset (in case of failures later on). */
3114 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3115
3116 /* We will not go on indefinitely with call instruction handling. */
3117 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3118 {
3119 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3120 return VERR_PATCHING_REFUSED;
3121 }
3122
3123 pVM->patm.s.ulCallDepth++;
3124
3125#ifdef PATM_ENABLE_CALL
3126 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3127#endif
3128
3129 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3130
3131 pPatch->nrPatch2GuestRecs = 0;
3132 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3133 pPatch->uCurPatchOffset = 0;
3134
3135 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3136
3137 /** @note Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3138 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3139 if (RT_FAILURE(rc))
3140 goto failure;
3141
3142#ifdef VBOX_WITH_STATISTICS
3143 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3144 if (RT_FAILURE(rc))
3145 goto failure;
3146#endif
3147 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pPatch);
3148 if (rc != VINF_SUCCESS)
3149 {
3150 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3151 goto failure;
3152 }
3153
3154 //size of patch block
3155 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3156
3157 //update free pointer in patch memory
3158 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3159 /* Round to next 8 byte boundary. */
3160 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3161
3162 pPatch->uState = PATCH_ENABLED;
3163
3164 /*
3165 * Insert into patch to guest lookup tree
3166 */
3167 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3168 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3169 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3170 AssertMsg(rc, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3171 if (!rc)
3172 {
3173 rc = VERR_PATCHING_REFUSED;
3174 goto failure;
3175 }
3176
3177 /* Note that patmr3SetBranchTargets can install additional patches!! */
3178 rc = patmr3SetBranchTargets(pVM, pPatch);
3179 if (rc != VINF_SUCCESS)
3180 {
3181 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3182 goto failure;
3183 }
3184
3185#ifdef LOG_ENABLED
3186 Log(("Patch code ----------------------------------------------------------\n"));
3187 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
3188 Log(("Patch code ends -----------------------------------------------------\n"));
3189#endif
3190
3191 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3192
3193 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3194 pPatch->pTempInfo->nrIllegalInstr = 0;
3195
3196 pVM->patm.s.ulCallDepth--;
3197 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3198 return VINF_SUCCESS;
3199
3200failure:
3201 if (pPatchRec->CoreOffset.Key)
3202 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3203
3204 patmEmptyTree(pVM, &pPatch->FixupTree);
3205 pPatch->nrFixups = 0;
3206
3207 patmEmptyTree(pVM, &pPatch->JumpTree);
3208 pPatch->nrJumpRecs = 0;
3209
3210 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3211 pPatch->pTempInfo->nrIllegalInstr = 0;
3212
3213 /* Turn this cli patch into a dummy. */
3214 pPatch->uState = PATCH_REFUSED;
3215 pPatch->pPatchBlockOffset = 0;
3216
3217 // Give back the patch memory we no longer need
3218 Assert(orgOffsetPatchMem != (uint32_t)~0);
3219 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3220
3221 pVM->patm.s.ulCallDepth--;
3222 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3223 return rc;
3224}
3225
3226/**
3227 * Creates trampoline code to jump inside an existing patch
3228 *
3229 * @returns VBox status code.
3230 * @param pVM The VM to operate on.
3231 * @param pInstrGC Guest context point to privileged instruction
3232 * @param pPatchRec Patch record
3233 *
3234 */
3235static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3236{
3237 PPATCHINFO pPatch = &pPatchRec->patch;
3238 RTRCPTR pPage, pPatchTargetGC = 0;
3239 uint32_t orgOffsetPatchMem = ~0;
3240 int rc = VERR_PATCHING_REFUSED;
3241
3242 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3243 /* Save original offset (in case of failures later on). */
3244 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3245
3246 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3247 /** @todo we already checked this before */
3248 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3249
3250 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3251 if (pPatchPage)
3252 {
3253 uint32_t i;
3254
3255 for (i=0;i<pPatchPage->cCount;i++)
3256 {
3257 if (pPatchPage->aPatch[i])
3258 {
3259 PPATCHINFO pPatch2 = pPatchPage->aPatch[i];
3260
3261 if ( (pPatch2->flags & PATMFL_DUPLICATE_FUNCTION)
3262 && pPatch2->uState == PATCH_ENABLED)
3263 {
3264 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch2, pInstrGC);
3265 if (pPatchTargetGC)
3266 {
3267 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3268 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch2->Patch2GuestAddrTree, offsetPatch, false);
3269 Assert(pPatchToGuestRec);
3270
3271 pPatchToGuestRec->fJumpTarget = true;
3272 Assert(pPatchTargetGC != pPatch2->pPrivInstrGC);
3273 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv\n", pPatch2->pPrivInstrGC));
3274 pPatch2->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3275 break;
3276 }
3277 }
3278 }
3279 }
3280 }
3281 AssertReturn(pPatchPage && pPatchTargetGC, VERR_PATCHING_REFUSED);
3282
3283 pPatch->nrPatch2GuestRecs = 0;
3284 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3285 pPatch->uCurPatchOffset = 0;
3286
3287 /** @note Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3288 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3289 if (RT_FAILURE(rc))
3290 goto failure;
3291
3292#ifdef VBOX_WITH_STATISTICS
3293 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3294 if (RT_FAILURE(rc))
3295 goto failure;
3296#endif
3297
3298 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3299 if (RT_FAILURE(rc))
3300 goto failure;
3301
3302 /*
3303 * Insert into patch to guest lookup tree
3304 */
3305 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3306 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3307 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3308 AssertMsg(rc, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3309 if (!rc)
3310 {
3311 rc = VERR_PATCHING_REFUSED;
3312 goto failure;
3313 }
3314
3315 /* size of patch block */
3316 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3317
3318 /* Update free pointer in patch memory. */
3319 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3320 /* Round to next 8 byte boundary */
3321 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3322
3323 /* There's no jump from guest to patch code. */
3324 pPatch->cbPatchJump = 0;
3325
3326 /* Enable the patch. */
3327 pPatch->uState = PATCH_ENABLED;
3328 /* We allow this patch to be called as a function. */
3329 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3330 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3331 return VINF_SUCCESS;
3332
3333failure:
3334 if (pPatchRec->CoreOffset.Key)
3335 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3336
3337 patmEmptyTree(pVM, &pPatch->FixupTree);
3338 pPatch->nrFixups = 0;
3339
3340 patmEmptyTree(pVM, &pPatch->JumpTree);
3341 pPatch->nrJumpRecs = 0;
3342
3343 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3344 pPatch->pTempInfo->nrIllegalInstr = 0;
3345
3346 /* Turn this cli patch into a dummy. */
3347 pPatch->uState = PATCH_REFUSED;
3348 pPatch->pPatchBlockOffset = 0;
3349
3350 // Give back the patch memory we no longer need
3351 Assert(orgOffsetPatchMem != (uint32_t)~0);
3352 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3353
3354 return rc;
3355}
3356
3357
3358/**
3359 * Patch branch target function for call/jump at specified location.
3360 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3361 *
3362 * @returns VBox status code.
3363 * @param pVM The VM to operate on.
3364 * @param pCtx Guest context
3365 *
3366 */
3367VMMR3DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3368{
3369 RTRCPTR pBranchTarget, pPage;
3370 int rc;
3371 RTRCPTR pPatchTargetGC = 0;
3372
3373 pBranchTarget = pCtx->edx;
3374 pBranchTarget = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3375
3376 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3377 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3378
3379 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3380 if (pPatchPage)
3381 {
3382 uint32_t i;
3383
3384 for (i=0;i<pPatchPage->cCount;i++)
3385 {
3386 if (pPatchPage->aPatch[i])
3387 {
3388 PPATCHINFO pPatch = pPatchPage->aPatch[i];
3389
3390 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3391 && pPatch->uState == PATCH_ENABLED)
3392 {
3393 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3394 if (pPatchTargetGC)
3395 {
3396 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3397 break;
3398 }
3399 }
3400 }
3401 }
3402 }
3403
3404 if (pPatchTargetGC)
3405 {
3406 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3407 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3408 }
3409 else
3410 {
3411 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3412 }
3413
3414 if (rc == VINF_SUCCESS)
3415 {
3416 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3417 Assert(pPatchTargetGC);
3418 }
3419
3420 if (pPatchTargetGC)
3421 {
3422 pCtx->eax = pPatchTargetGC;
3423 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3424 }
3425 else
3426 {
3427 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3428 pCtx->eax = 0;
3429 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3430 }
3431 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3432 rc = PATMAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3433 AssertRC(rc);
3434
3435 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3436 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3437 return VINF_SUCCESS;
3438}
3439
3440/**
3441 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3442 *
3443 * @returns VBox status code.
3444 * @param pVM The VM to operate on.
3445 * @param pCpu Disassembly CPU structure ptr
3446 * @param pInstrGC Guest context point to privileged instruction
3447 * @param pPatch Patch record
3448 *
3449 */
3450static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3451{
3452 int rc = VERR_PATCHING_REFUSED;
3453 DISCPUSTATE cpu;
3454 RTRCPTR pTargetGC;
3455 PPATMPATCHREC pPatchFunction;
3456 uint32_t opsize;
3457 bool disret;
3458#ifdef LOG_ENABLED
3459 char szOutput[256];
3460#endif
3461
3462 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3463 Assert((pCpu->pCurInstr->opcode == OP_CALL || pCpu->pCurInstr->opcode == OP_JMP) && pCpu->opsize == SIZEOF_NEARJUMP32);
3464
3465 if ((pCpu->pCurInstr->opcode != OP_CALL && pCpu->pCurInstr->opcode != OP_JMP) || pCpu->opsize != SIZEOF_NEARJUMP32)
3466 {
3467 rc = VERR_PATCHING_REFUSED;
3468 goto failure;
3469 }
3470
3471 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3472 if (pTargetGC == 0)
3473 {
3474 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
3475 rc = VERR_PATCHING_REFUSED;
3476 goto failure;
3477 }
3478
3479 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3480 if (pPatchFunction == NULL)
3481 {
3482 for(;;)
3483 {
3484 /* It could be an indirect call (call -> jmp dest).
3485 * Note that it's dangerous to assume the jump will never change...
3486 */
3487 uint8_t *pTmpInstrHC;
3488
3489 pTmpInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pTargetGC);
3490 Assert(pTmpInstrHC);
3491 if (pTmpInstrHC == 0)
3492 break;
3493
3494 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3495 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pTargetGC, pTmpInstrHC, &opsize, NULL);
3496 if (disret == false || cpu.pCurInstr->opcode != OP_JMP)
3497 break;
3498
3499 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3500 if (pTargetGC == 0)
3501 {
3502 break;
3503 }
3504
3505 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3506 break;
3507 }
3508 if (pPatchFunction == 0)
3509 {
3510 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3511 rc = VERR_PATCHING_REFUSED;
3512 goto failure;
3513 }
3514 }
3515
3516 // make a copy of the guest code bytes that will be overwritten
3517 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3518
3519 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3520 AssertRC(rc);
3521
3522 /* Now replace the original call in the guest code */
3523 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), true);
3524 AssertRC(rc);
3525 if (RT_FAILURE(rc))
3526 goto failure;
3527
3528 /* Lowest and highest address for write monitoring. */
3529 pPatch->pInstrGCLowest = pInstrGC;
3530 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3531
3532#ifdef LOG_ENABLED
3533 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3534 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3535 Log(("Call patch: %s", szOutput));
3536#endif
3537
3538 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3539
3540 pPatch->uState = PATCH_ENABLED;
3541 return VINF_SUCCESS;
3542
3543failure:
3544 /* Turn this patch into a dummy. */
3545 pPatch->uState = PATCH_REFUSED;
3546
3547 return rc;
3548}
3549
3550/**
3551 * Replace the address in an MMIO instruction with the cached version.
3552 *
3553 * @returns VBox status code.
3554 * @param pVM The VM to operate on.
3555 * @param pInstrGC Guest context point to privileged instruction
3556 * @param pCpu Disassembly CPU structure ptr
3557 * @param pPatch Patch record
3558 *
3559 * @note returns failure if patching is not allowed or possible
3560 *
3561 */
3562static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3563{
3564 uint8_t *pPB;
3565 int rc = VERR_PATCHING_REFUSED;
3566#ifdef LOG_ENABLED
3567 DISCPUSTATE cpu;
3568 uint32_t opsize;
3569 bool disret;
3570 char szOutput[256];
3571#endif
3572
3573 Assert(pVM->patm.s.mmio.pCachedData);
3574 if (!pVM->patm.s.mmio.pCachedData)
3575 goto failure;
3576
3577 if (pCpu->param2.flags != USE_DISPLACEMENT32)
3578 goto failure;
3579
3580 pPB = pPatch->pPrivInstrHC;
3581
3582 /* Add relocation record for cached data access. */
3583 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3584 {
3585 Log(("Relocation failed for cached mmio address!!\n"));
3586 return VERR_PATCHING_REFUSED;
3587 }
3588#ifdef LOG_ENABLED
3589 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3590 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3591 Log(("MMIO patch old instruction: %s", szOutput));
3592#endif
3593
3594 /* Save original instruction. */
3595 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3596 AssertRC(rc);
3597
3598 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3599
3600 /* Replace address with that of the cached item. */
3601 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->opsize - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3602 AssertRC(rc);
3603 if (RT_FAILURE(rc))
3604 {
3605 goto failure;
3606 }
3607
3608#ifdef LOG_ENABLED
3609 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3610 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3611 Log(("MMIO patch: %s", szOutput));
3612#endif
3613 pVM->patm.s.mmio.pCachedData = 0;
3614 pVM->patm.s.mmio.GCPhys = 0;
3615 pPatch->uState = PATCH_ENABLED;
3616 return VINF_SUCCESS;
3617
3618failure:
3619 /* Turn this patch into a dummy. */
3620 pPatch->uState = PATCH_REFUSED;
3621
3622 return rc;
3623}
3624
3625
3626/**
3627 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3628 *
3629 * @returns VBox status code.
3630 * @param pVM The VM to operate on.
3631 * @param pInstrGC Guest context point to privileged instruction
3632 * @param pPatch Patch record
3633 *
3634 * @note returns failure if patching is not allowed or possible
3635 *
3636 */
3637static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3638{
3639 DISCPUSTATE cpu;
3640 uint32_t opsize;
3641 bool disret;
3642 uint8_t *pInstrHC;
3643#ifdef LOG_ENABLED
3644 char szOutput[256];
3645#endif
3646
3647 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3648
3649 /* Convert GC to HC address. */
3650 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3651 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3652
3653 /* Disassemble mmio instruction. */
3654 cpu.mode = pPatch->uOpMode;
3655 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
3656 if (disret == false)
3657 {
3658 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3659 return VERR_PATCHING_REFUSED;
3660 }
3661
3662 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
3663 if (opsize > MAX_INSTR_SIZE)
3664 return VERR_PATCHING_REFUSED;
3665 if (cpu.param2.flags != USE_DISPLACEMENT32)
3666 return VERR_PATCHING_REFUSED;
3667
3668 /* Add relocation record for cached data access. */
3669 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3670 {
3671 Log(("Relocation failed for cached mmio address!!\n"));
3672 return VERR_PATCHING_REFUSED;
3673 }
3674 /* Replace address with that of the cached item. */
3675 *(RTRCPTR *)&pInstrHC[cpu.opsize - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3676
3677 /* Lowest and highest address for write monitoring. */
3678 pPatch->pInstrGCLowest = pInstrGC;
3679 pPatch->pInstrGCHighest = pInstrGC + cpu.opsize;
3680
3681#ifdef LOG_ENABLED
3682 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3683 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3684 Log(("MMIO patch: %s", szOutput));
3685#endif
3686
3687 pVM->patm.s.mmio.pCachedData = 0;
3688 pVM->patm.s.mmio.GCPhys = 0;
3689 return VINF_SUCCESS;
3690}
3691
3692/**
3693 * Activates an int3 patch
3694 *
3695 * @returns VBox status code.
3696 * @param pVM The VM to operate on.
3697 * @param pPatch Patch record
3698 */
3699static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3700{
3701 uint8_t ASMInt3 = 0xCC;
3702 int rc;
3703
3704 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3705 Assert(pPatch->uState != PATCH_ENABLED);
3706
3707 /* Replace first opcode byte with 'int 3'. */
3708 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &ASMInt3, sizeof(ASMInt3));
3709 AssertRC(rc);
3710
3711 pPatch->cbPatchJump = sizeof(ASMInt3);
3712
3713 return rc;
3714}
3715
3716/**
3717 * Deactivates an int3 patch
3718 *
3719 * @returns VBox status code.
3720 * @param pVM The VM to operate on.
3721 * @param pPatch Patch record
3722 */
3723static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3724{
3725 uint8_t ASMInt3 = 0xCC;
3726 int rc;
3727
3728 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3729 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3730
3731 /* Restore first opcode byte. */
3732 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3733 AssertRC(rc);
3734 return rc;
3735}
3736
3737/**
3738 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically in the guest context.
3739 *
3740 * @returns VBox status code.
3741 * @param pVM The VM to operate on.
3742 * @param pInstrGC Guest context point to privileged instruction
3743 * @param pInstrHC Host context point to privileged instruction
3744 * @param pCpu Disassembly CPU structure ptr
3745 * @param pPatch Patch record
3746 *
3747 * @note returns failure if patching is not allowed or possible
3748 *
3749 */
3750VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3751{
3752 uint8_t ASMInt3 = 0xCC;
3753 int rc;
3754
3755 /** @note Do not use patch memory here! It might called during patch installation too. */
3756
3757#ifdef LOG_ENABLED
3758 DISCPUSTATE cpu;
3759 char szOutput[256];
3760 uint32_t opsize;
3761
3762 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3763 PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3764 Log(("PATMR3PatchInstrInt3: %s", szOutput));
3765#endif
3766
3767 /* Save the original instruction. */
3768 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3769 AssertRC(rc);
3770 pPatch->cbPatchJump = sizeof(ASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3771
3772 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3773
3774 /* Replace first opcode byte with 'int 3'. */
3775 rc = patmActivateInt3Patch(pVM, pPatch);
3776 if (RT_FAILURE(rc))
3777 goto failure;
3778
3779 /* Lowest and highest address for write monitoring. */
3780 pPatch->pInstrGCLowest = pInstrGC;
3781 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3782
3783 pPatch->uState = PATCH_ENABLED;
3784 return VINF_SUCCESS;
3785
3786failure:
3787 /* Turn this patch into a dummy. */
3788 return VERR_PATCHING_REFUSED;
3789}
3790
3791#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3792/**
3793 * Patch a jump instruction at specified location
3794 *
3795 * @returns VBox status code.
3796 * @param pVM The VM to operate on.
3797 * @param pInstrGC Guest context point to privileged instruction
3798 * @param pInstrHC Host context point to privileged instruction
3799 * @param pCpu Disassembly CPU structure ptr
3800 * @param pPatchRec Patch record
3801 *
3802 * @note returns failure if patching is not allowed or possible
3803 *
3804 */
3805int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3806{
3807 PPATCHINFO pPatch = &pPatchRec->patch;
3808 int rc = VERR_PATCHING_REFUSED;
3809#ifdef LOG_ENABLED
3810 bool disret;
3811 DISCPUSTATE cpu;
3812 uint32_t opsize;
3813 char szOutput[256];
3814#endif
3815
3816 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3817 pPatch->uCurPatchOffset = 0;
3818 pPatch->cbPatchBlockSize = 0;
3819 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3820
3821 /*
3822 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3823 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3824 */
3825 switch (pCpu->pCurInstr->opcode)
3826 {
3827 case OP_JO:
3828 case OP_JNO:
3829 case OP_JC:
3830 case OP_JNC:
3831 case OP_JE:
3832 case OP_JNE:
3833 case OP_JBE:
3834 case OP_JNBE:
3835 case OP_JS:
3836 case OP_JNS:
3837 case OP_JP:
3838 case OP_JNP:
3839 case OP_JL:
3840 case OP_JNL:
3841 case OP_JLE:
3842 case OP_JNLE:
3843 case OP_JMP:
3844 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3845 Assert(pCpu->param1.flags & USE_IMMEDIATE32_REL);
3846 if (!(pCpu->param1.flags & USE_IMMEDIATE32_REL))
3847 goto failure;
3848
3849 Assert(pCpu->opsize == SIZEOF_NEARJUMP32 || pCpu->opsize == SIZEOF_NEAR_COND_JUMP32);
3850 if (pCpu->opsize != SIZEOF_NEARJUMP32 && pCpu->opsize != SIZEOF_NEAR_COND_JUMP32)
3851 goto failure;
3852
3853 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->opsize))
3854 {
3855 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
3856 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
3857 rc = VERR_PATCHING_REFUSED;
3858 goto failure;
3859 }
3860
3861 break;
3862
3863 default:
3864 goto failure;
3865 }
3866
3867 // make a copy of the guest code bytes that will be overwritten
3868 Assert(pCpu->opsize <= sizeof(pPatch->aPrivInstr));
3869 Assert(pCpu->opsize >= SIZEOF_NEARJUMP32);
3870 pPatch->cbPatchJump = pCpu->opsize;
3871
3872 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3873 AssertRC(rc);
3874
3875 /* Now insert a jump in the guest code. */
3876 /*
3877 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
3878 * references the target instruction in the conflict patch.
3879 */
3880 RTRCPTR pJmpDest = PATMR3GuestGCPtrToPatchGCPtr(pVM, pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval);
3881
3882 AssertMsg(pJmpDest, ("PATMR3GuestGCPtrToPatchGCPtr failed for %RRv\n", pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval));
3883 pPatch->pPatchJumpDestGC = pJmpDest;
3884
3885 rc = patmGenJumpToPatch(pVM, pPatch, true);
3886 AssertRC(rc);
3887 if (RT_FAILURE(rc))
3888 goto failure;
3889
3890 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
3891
3892#ifdef LOG_ENABLED
3893 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3894 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3895 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
3896#endif
3897
3898 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3899
3900 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
3901
3902 /* Lowest and highest address for write monitoring. */
3903 pPatch->pInstrGCLowest = pInstrGC;
3904 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
3905
3906 pPatch->uState = PATCH_ENABLED;
3907 return VINF_SUCCESS;
3908
3909failure:
3910 /* Turn this cli patch into a dummy. */
3911 pPatch->uState = PATCH_REFUSED;
3912
3913 return rc;
3914}
3915#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
3916
3917
3918/**
3919 * Gives hint to PATM about supervisor guest instructions
3920 *
3921 * @returns VBox status code.
3922 * @param pVM The VM to operate on.
3923 * @param pInstr Guest context point to privileged instruction
3924 * @param flags Patch flags
3925 */
3926VMMR3DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
3927{
3928 Assert(pInstrGC);
3929 Assert(flags == PATMFL_CODE32);
3930
3931 Log(("PATMR3AddHint %RRv\n", pInstrGC));
3932 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
3933}
3934
3935/**
3936 * Patch privileged instruction at specified location
3937 *
3938 * @returns VBox status code.
3939 * @param pVM The VM to operate on.
3940 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
3941 * @param flags Patch flags
3942 *
3943 * @note returns failure if patching is not allowed or possible
3944 */
3945VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
3946{
3947 DISCPUSTATE cpu;
3948 R3PTRTYPE(uint8_t *) pInstrHC;
3949 uint32_t opsize;
3950 PPATMPATCHREC pPatchRec;
3951 PCPUMCTX pCtx = 0;
3952 bool disret;
3953 int rc;
3954 PVMCPU pVCpu = VMMGetCpu0(pVM);
3955
3956 if (!pVM || pInstrGC == 0 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
3957 {
3958 AssertFailed();
3959 return VERR_INVALID_PARAMETER;
3960 }
3961
3962 if (PATMIsEnabled(pVM) == false)
3963 return VERR_PATCHING_REFUSED;
3964
3965 /* Test for patch conflict only with patches that actually change guest code. */
3966 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
3967 {
3968 PPATCHINFO pConflictPatch = PATMFindActivePatchByEntrypoint(pVM, pInstrGC);
3969 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
3970 if (pConflictPatch != 0)
3971 return VERR_PATCHING_REFUSED;
3972 }
3973
3974 if (!(flags & PATMFL_CODE32))
3975 {
3976 /** @todo Only 32 bits code right now */
3977 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
3978 return VERR_NOT_IMPLEMENTED;
3979 }
3980
3981 /* We ran out of patch memory; don't bother anymore. */
3982 if (pVM->patm.s.fOutOfMemory == true)
3983 return VERR_PATCHING_REFUSED;
3984
3985 /* Make sure the code selector is wide open; otherwise refuse. */
3986 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
3987 if (CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0)
3988 {
3989 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
3990 if (pInstrGCFlat != pInstrGC)
3991 {
3992 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
3993 return VERR_PATCHING_REFUSED;
3994 }
3995 }
3996
3997 /** @note the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
3998 if (!(flags & PATMFL_GUEST_SPECIFIC))
3999 {
4000 /* New code. Make sure CSAM has a go at it first. */
4001 CSAMR3CheckCode(pVM, pInstrGC);
4002 }
4003
4004 /** @note obsolete */
4005 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4006 && (flags & PATMFL_MMIO_ACCESS))
4007 {
4008 RTRCUINTPTR offset;
4009 void *pvPatchCoreOffset;
4010
4011 /* Find the patch record. */
4012 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4013 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4014 if (pvPatchCoreOffset == NULL)
4015 {
4016 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4017 return VERR_PATCH_NOT_FOUND; //fatal error
4018 }
4019 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4020
4021 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4022 }
4023
4024 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4025
4026 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4027 if (pPatchRec)
4028 {
4029 Assert(!(flags & PATMFL_TRAMPOLINE));
4030
4031 /* Hints about existing patches are ignored. */
4032 if (flags & PATMFL_INSTR_HINT)
4033 return VERR_PATCHING_REFUSED;
4034
4035 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4036 {
4037 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4038 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4039 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4040 }
4041
4042 if (pPatchRec->patch.uState == PATCH_DISABLED)
4043 {
4044 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4045 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4046 {
4047 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4048 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4049 }
4050 else
4051 Log(("Enabling patch %RRv again\n", pInstrGC));
4052
4053 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4054 rc = PATMR3EnablePatch(pVM, pInstrGC);
4055 if (RT_SUCCESS(rc))
4056 return VWRN_PATCH_ENABLED;
4057
4058 return rc;
4059 }
4060 if ( pPatchRec->patch.uState == PATCH_ENABLED
4061 || pPatchRec->patch.uState == PATCH_DIRTY)
4062 {
4063 /*
4064 * The patch might have been overwritten.
4065 */
4066 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4067 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4068 {
4069 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4070 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4071 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4072 {
4073 if (flags & PATMFL_IDTHANDLER)
4074 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4075
4076 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4077 }
4078 }
4079 rc = PATMR3RemovePatch(pVM, pInstrGC);
4080 if (RT_FAILURE(rc))
4081 return VERR_PATCHING_REFUSED;
4082 }
4083 else
4084 {
4085 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4086 /* already tried it once! */
4087 return VERR_PATCHING_REFUSED;
4088 }
4089 }
4090
4091 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4092 if (RT_FAILURE(rc))
4093 {
4094 Log(("Out of memory!!!!\n"));
4095 return VERR_NO_MEMORY;
4096 }
4097 pPatchRec->Core.Key = pInstrGC;
4098 pPatchRec->patch.uState = PATCH_REFUSED; //default
4099 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4100 Assert(rc);
4101
4102 RTGCPHYS GCPhys;
4103 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4104 if (rc != VINF_SUCCESS)
4105 {
4106 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4107 return rc;
4108 }
4109 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4110 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4111 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4112 {
4113 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4114 return VERR_PATCHING_REFUSED;
4115 }
4116 GCPhys = GCPhys + (pInstrGC & PAGE_OFFSET_MASK);
4117 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, MAX_INSTR_SIZE, (void **)&pInstrHC);
4118 AssertRCReturn(rc, rc);
4119
4120 pPatchRec->patch.pPrivInstrHC = pInstrHC;
4121 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4122 pPatchRec->patch.flags = flags;
4123 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4124
4125 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4126 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4127
4128 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4129 {
4130 /*
4131 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4132 */
4133 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4134 if (pPatchNear)
4135 {
4136 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4137 {
4138 Log(("Dangerous patch; would overwrite the ususable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4139
4140 pPatchRec->patch.uState = PATCH_UNUSABLE;
4141 /*
4142 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4143 */
4144 return VERR_PATCHING_REFUSED;
4145 }
4146 }
4147 }
4148
4149 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4150 if (pPatchRec->patch.pTempInfo == 0)
4151 {
4152 Log(("Out of memory!!!!\n"));
4153 return VERR_NO_MEMORY;
4154 }
4155
4156 cpu.mode = pPatchRec->patch.uOpMode;
4157 disret = PATMR3DISInstr(pVM, &pPatchRec->patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
4158 if (disret == false)
4159 {
4160 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4161 return VERR_PATCHING_REFUSED;
4162 }
4163
4164 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
4165 if (opsize > MAX_INSTR_SIZE)
4166 {
4167 return VERR_PATCHING_REFUSED;
4168 }
4169
4170 pPatchRec->patch.cbPrivInstr = opsize;
4171 pPatchRec->patch.opcode = cpu.pCurInstr->opcode;
4172
4173 /* Restricted hinting for now. */
4174 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->opcode == OP_CLI);
4175
4176 /* Allocate statistics slot */
4177 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4178 {
4179 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4180 }
4181 else
4182 {
4183 Log(("WARNING: Patch index wrap around!!\n"));
4184 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4185 }
4186
4187 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4188 {
4189 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec);
4190 }
4191 else
4192 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4193 {
4194 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec);
4195 }
4196 else
4197 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4198 {
4199 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4200 }
4201 else
4202 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4203 {
4204 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &pPatchRec->patch);
4205 }
4206 else
4207 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4208 {
4209 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4210 }
4211 else
4212 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4213 {
4214 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &pPatchRec->patch);
4215 }
4216 else
4217 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4218 {
4219 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4220 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4221
4222 rc = patmIdtHandler(pVM, pInstrGC, pInstrHC, opsize, pPatchRec);
4223#ifdef VBOX_WITH_STATISTICS
4224 if ( rc == VINF_SUCCESS
4225 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4226 {
4227 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4228 }
4229#endif
4230 }
4231 else
4232 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4233 {
4234 switch (cpu.pCurInstr->opcode)
4235 {
4236 case OP_SYSENTER:
4237 case OP_PUSH:
4238 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4239 if (rc == VINF_SUCCESS)
4240 {
4241 if (rc == VINF_SUCCESS)
4242 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4243 return rc;
4244 }
4245 break;
4246
4247 default:
4248 rc = VERR_NOT_IMPLEMENTED;
4249 break;
4250 }
4251 }
4252 else
4253 {
4254 switch (cpu.pCurInstr->opcode)
4255 {
4256 case OP_SYSENTER:
4257 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4258 if (rc == VINF_SUCCESS)
4259 {
4260 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4261 return VINF_SUCCESS;
4262 }
4263 break;
4264
4265#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4266 case OP_JO:
4267 case OP_JNO:
4268 case OP_JC:
4269 case OP_JNC:
4270 case OP_JE:
4271 case OP_JNE:
4272 case OP_JBE:
4273 case OP_JNBE:
4274 case OP_JS:
4275 case OP_JNS:
4276 case OP_JP:
4277 case OP_JNP:
4278 case OP_JL:
4279 case OP_JNL:
4280 case OP_JLE:
4281 case OP_JNLE:
4282 case OP_JECXZ:
4283 case OP_LOOP:
4284 case OP_LOOPNE:
4285 case OP_LOOPE:
4286 case OP_JMP:
4287 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4288 {
4289 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4290 break;
4291 }
4292 return VERR_NOT_IMPLEMENTED;
4293#endif
4294
4295 case OP_PUSHF:
4296 case OP_CLI:
4297 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4298 rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->opcode, opsize, pPatchRec);
4299 break;
4300
4301 case OP_STR:
4302 case OP_SGDT:
4303 case OP_SLDT:
4304 case OP_SIDT:
4305 case OP_CPUID:
4306 case OP_LSL:
4307 case OP_LAR:
4308 case OP_SMSW:
4309 case OP_VERW:
4310 case OP_VERR:
4311 case OP_IRET:
4312 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4313 break;
4314
4315 default:
4316 return VERR_NOT_IMPLEMENTED;
4317 }
4318 }
4319
4320 if (rc != VINF_SUCCESS)
4321 {
4322 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4323 {
4324 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4325 pPatchRec->patch.nrPatch2GuestRecs = 0;
4326 }
4327 pVM->patm.s.uCurrentPatchIdx--;
4328 }
4329 else
4330 {
4331 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4332 AssertRCReturn(rc, rc);
4333
4334 /* Keep track upper and lower boundaries of patched instructions */
4335 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4336 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4337 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4338 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4339
4340 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4341 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4342
4343 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4344 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4345
4346 rc = VINF_SUCCESS;
4347
4348 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4349 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4350 {
4351 rc = PATMR3DisablePatch(pVM, pInstrGC);
4352 AssertRCReturn(rc, rc);
4353 }
4354
4355#ifdef VBOX_WITH_STATISTICS
4356 /* Register statistics counter */
4357 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4358 {
4359 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4360 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4361#ifndef DEBUG_sandervl
4362 /* Full breakdown for the GUI. */
4363 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4364 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4365 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4366 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4367 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4368 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4369 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4370 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4371 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4372 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4373 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4374 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4375 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4376 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4377 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4378 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4379#endif
4380 }
4381#endif
4382 }
4383 return rc;
4384}
4385
4386/**
4387 * Query instruction size
4388 *
4389 * @returns VBox status code.
4390 * @param pVM The VM to operate on.
4391 * @param pPatch Patch record
4392 * @param pInstrGC Instruction address
4393 */
4394static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4395{
4396 uint8_t *pInstrHC;
4397
4398 int rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pInstrGC, (PRTR3PTR)&pInstrHC);
4399 if (rc == VINF_SUCCESS)
4400 {
4401 DISCPUSTATE cpu;
4402 bool disret;
4403 uint32_t opsize;
4404
4405 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4406 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL, PATMREAD_ORGCODE | PATMREAD_NOCHECK);
4407 if (disret)
4408 return opsize;
4409 }
4410 return 0;
4411}
4412
4413/**
4414 * Add patch to page record
4415 *
4416 * @returns VBox status code.
4417 * @param pVM The VM to operate on.
4418 * @param pPage Page address
4419 * @param pPatch Patch record
4420 */
4421int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4422{
4423 PPATMPATCHPAGE pPatchPage;
4424 int rc;
4425
4426 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4427
4428 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4429 if (pPatchPage)
4430 {
4431 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4432 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4433 {
4434 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4435 PPATCHINFO *paPatchOld = pPatchPage->aPatch;
4436
4437 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4438 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4439 if (RT_FAILURE(rc))
4440 {
4441 Log(("Out of memory!!!!\n"));
4442 return VERR_NO_MEMORY;
4443 }
4444 memcpy(pPatchPage->aPatch, paPatchOld, cMaxPatchesOld*sizeof(PPATCHINFO));
4445 MMHyperFree(pVM, paPatchOld);
4446 }
4447 pPatchPage->aPatch[pPatchPage->cCount] = pPatch;
4448 pPatchPage->cCount++;
4449 }
4450 else
4451 {
4452 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4453 if (RT_FAILURE(rc))
4454 {
4455 Log(("Out of memory!!!!\n"));
4456 return VERR_NO_MEMORY;
4457 }
4458 pPatchPage->Core.Key = pPage;
4459 pPatchPage->cCount = 1;
4460 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4461
4462 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4463 if (RT_FAILURE(rc))
4464 {
4465 Log(("Out of memory!!!!\n"));
4466 MMHyperFree(pVM, pPatchPage);
4467 return VERR_NO_MEMORY;
4468 }
4469 pPatchPage->aPatch[0] = pPatch;
4470
4471 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4472 Assert(rc);
4473 pVM->patm.s.cPageRecords++;
4474
4475 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4476 }
4477 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4478
4479 /* Get the closest guest instruction (from below) */
4480 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4481 Assert(pGuestToPatchRec);
4482 if (pGuestToPatchRec)
4483 {
4484 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4485 if ( pPatchPage->pLowestAddrGC == 0
4486 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4487 {
4488 RTRCUINTPTR offset;
4489
4490 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4491
4492 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4493 /* If we're too close to the page boundary, then make sure an instruction from the previous page doesn't cross the boundary itself. */
4494 if (offset && offset < MAX_INSTR_SIZE)
4495 {
4496 /* Get the closest guest instruction (from above) */
4497 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4498
4499 if (pGuestToPatchRec)
4500 {
4501 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4502 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4503 {
4504 pPatchPage->pLowestAddrGC = pPage;
4505 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4506 }
4507 }
4508 }
4509 }
4510 }
4511
4512 /* Get the closest guest instruction (from above) */
4513 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4514 Assert(pGuestToPatchRec);
4515 if (pGuestToPatchRec)
4516 {
4517 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4518 if ( pPatchPage->pHighestAddrGC == 0
4519 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4520 {
4521 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4522 /* Increase by instruction size. */
4523 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4524//// Assert(size);
4525 pPatchPage->pHighestAddrGC += size;
4526 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4527 }
4528 }
4529
4530 return VINF_SUCCESS;
4531}
4532
4533/**
4534 * Remove patch from page record
4535 *
4536 * @returns VBox status code.
4537 * @param pVM The VM to operate on.
4538 * @param pPage Page address
4539 * @param pPatch Patch record
4540 */
4541int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4542{
4543 PPATMPATCHPAGE pPatchPage;
4544 int rc;
4545
4546 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4547 Assert(pPatchPage);
4548
4549 if (!pPatchPage)
4550 return VERR_INVALID_PARAMETER;
4551
4552 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4553
4554 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4555 if (pPatchPage->cCount > 1)
4556 {
4557 uint32_t i;
4558
4559 /* Used by multiple patches */
4560 for (i=0;i<pPatchPage->cCount;i++)
4561 {
4562 if (pPatchPage->aPatch[i] == pPatch)
4563 {
4564 pPatchPage->aPatch[i] = 0;
4565 break;
4566 }
4567 }
4568 /* close the gap between the remaining pointers. */
4569 if (i < pPatchPage->cCount - 1)
4570 {
4571 memcpy(&pPatchPage->aPatch[i], &pPatchPage->aPatch[i+1], sizeof(PPATCHINFO)*(pPatchPage->cCount - (i+1)));
4572 }
4573 AssertMsg(i < pPatchPage->cCount, ("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4574
4575 pPatchPage->cCount--;
4576 }
4577 else
4578 {
4579 PPATMPATCHPAGE pPatchNode;
4580
4581 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4582
4583 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4584 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4585 Assert(pPatchNode && pPatchNode == pPatchPage);
4586
4587 Assert(pPatchPage->aPatch);
4588 rc = MMHyperFree(pVM, pPatchPage->aPatch);
4589 AssertRC(rc);
4590 rc = MMHyperFree(pVM, pPatchPage);
4591 AssertRC(rc);
4592 pVM->patm.s.cPageRecords--;
4593 }
4594 return VINF_SUCCESS;
4595}
4596
4597/**
4598 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4599 *
4600 * @returns VBox status code.
4601 * @param pVM The VM to operate on.
4602 * @param pPatch Patch record
4603 */
4604int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4605{
4606 int rc;
4607 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4608
4609 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4610 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4611 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4612
4613 /** @todo optimize better (large gaps between current and next used page) */
4614 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4615 {
4616 /* Get the closest guest instruction (from above) */
4617 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4618 if ( pGuestToPatchRec
4619 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4620 )
4621 {
4622 /* Code in page really patched -> add record */
4623 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4624 AssertRC(rc);
4625 }
4626 }
4627 pPatch->flags |= PATMFL_CODE_MONITORED;
4628 return VINF_SUCCESS;
4629}
4630
4631/**
4632 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4633 *
4634 * @returns VBox status code.
4635 * @param pVM The VM to operate on.
4636 * @param pPatch Patch record
4637 */
4638int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4639{
4640 int rc;
4641 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4642
4643 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4644 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4645 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4646
4647 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4648 {
4649 /* Get the closest guest instruction (from above) */
4650 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4651 if ( pGuestToPatchRec
4652 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4653 )
4654 {
4655 /* Code in page really patched -> remove record */
4656 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4657 AssertRC(rc);
4658 }
4659 }
4660 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4661 return VINF_SUCCESS;
4662}
4663
4664/**
4665 * Notifies PATM about a (potential) write to code that has been patched.
4666 *
4667 * @returns VBox status code.
4668 * @param pVM The VM to operate on.
4669 * @param GCPtr GC pointer to write address
4670 * @param cbWrite Nr of bytes to write
4671 *
4672 */
4673VMMR3DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4674{
4675 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4676
4677 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4678
4679 Assert(VM_IS_EMT(pVM));
4680
4681 /* Quick boundary check */
4682 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4683 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4684 )
4685 return VINF_SUCCESS;
4686
4687 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4688
4689 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4690 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4691
4692 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4693 {
4694loop_start:
4695 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4696 if (pPatchPage)
4697 {
4698 uint32_t i;
4699 bool fValidPatchWrite = false;
4700
4701 /* Quick check to see if the write is in the patched part of the page */
4702 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4703 || pPatchPage->pHighestAddrGC < GCPtr)
4704 {
4705 break;
4706 }
4707
4708 for (i=0;i<pPatchPage->cCount;i++)
4709 {
4710 if (pPatchPage->aPatch[i])
4711 {
4712 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4713 RTRCPTR pPatchInstrGC;
4714 //unused: bool fForceBreak = false;
4715
4716 Assert(pPatchPage->aPatch[i]->flags & PATMFL_CODE_MONITORED);
4717 /** @todo inefficient and includes redundant checks for multiple pages. */
4718 for (uint32_t j=0; j<cbWrite; j++)
4719 {
4720 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4721
4722 if ( pPatch->cbPatchJump
4723 && pGuestPtrGC >= pPatch->pPrivInstrGC
4724 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4725 {
4726 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4727 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4728 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4729 if (rc == VINF_SUCCESS)
4730 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4731 goto loop_start;
4732
4733 continue;
4734 }
4735
4736 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4737 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4738 if (!pPatchInstrGC)
4739 {
4740 RTRCPTR pClosestInstrGC;
4741 uint32_t size;
4742
4743 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4744 if (pPatchInstrGC)
4745 {
4746 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4747 Assert(pClosestInstrGC <= pGuestPtrGC);
4748 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4749 /* Check if this is not a write into a gap between two patches */
4750 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4751 pPatchInstrGC = 0;
4752 }
4753 }
4754 if (pPatchInstrGC)
4755 {
4756 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4757
4758 fValidPatchWrite = true;
4759
4760 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4761 Assert(pPatchToGuestRec);
4762 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4763 {
4764 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4765
4766 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4767 {
4768 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4769
4770 PATMR3MarkDirtyPatch(pVM, pPatch);
4771
4772 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4773 goto loop_start;
4774 }
4775 else
4776 {
4777 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4778 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4779
4780 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4781 pPatchToGuestRec->fDirty = true;
4782
4783 *pInstrHC = 0xCC;
4784
4785 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4786 }
4787 }
4788 /* else already marked dirty */
4789 }
4790 }
4791 }
4792 } /* for each patch */
4793
4794 if (fValidPatchWrite == false)
4795 {
4796 /* Write to a part of the page that either:
4797 * - doesn't contain any code (shared code/data); rather unlikely
4798 * - old code page that's no longer in active use.
4799 */
4800invalid_write_loop_start:
4801 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4802
4803 if (pPatchPage)
4804 {
4805 for (i=0;i<pPatchPage->cCount;i++)
4806 {
4807 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4808
4809 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
4810 {
4811 /** @note possibly dangerous assumption that all future writes will be harmless. */
4812 if (pPatch->flags & PATMFL_IDTHANDLER)
4813 {
4814 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4815
4816 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
4817 int rc = patmRemovePatchPages(pVM, pPatch);
4818 AssertRC(rc);
4819 }
4820 else
4821 {
4822 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4823 PATMR3MarkDirtyPatch(pVM, pPatch);
4824 }
4825 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4826 goto invalid_write_loop_start;
4827 }
4828 } /* for */
4829 }
4830 }
4831 }
4832 }
4833 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
4834 return VINF_SUCCESS;
4835
4836}
4837
4838/**
4839 * Disable all patches in a flushed page
4840 *
4841 * @returns VBox status code
4842 * @param pVM The VM to operate on.
4843 * @param addr GC address of the page to flush
4844 */
4845/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
4846 */
4847VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
4848{
4849 addr &= PAGE_BASE_GC_MASK;
4850
4851 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
4852 if (pPatchPage)
4853 {
4854 int i;
4855
4856 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
4857 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
4858 {
4859 if (pPatchPage->aPatch[i])
4860 {
4861 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4862
4863 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
4864 PATMR3MarkDirtyPatch(pVM, pPatch);
4865 }
4866 }
4867 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
4868 }
4869 return VINF_SUCCESS;
4870}
4871
4872/**
4873 * Checks if the instructions at the specified address has been patched already.
4874 *
4875 * @returns boolean, patched or not
4876 * @param pVM The VM to operate on.
4877 * @param pInstrGC Guest context pointer to instruction
4878 */
4879VMMR3DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
4880{
4881 PPATMPATCHREC pPatchRec;
4882 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4883 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
4884 return true;
4885 return false;
4886}
4887
4888/**
4889 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
4890 *
4891 * @returns VBox status code.
4892 * @param pVM The VM to operate on.
4893 * @param pInstrGC GC address of instr
4894 * @param pByte opcode byte pointer (OUT)
4895 *
4896 */
4897VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
4898{
4899 PPATMPATCHREC pPatchRec;
4900
4901 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
4902
4903 /* Shortcut. */
4904 if ( !PATMIsEnabled(pVM)
4905 || pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
4906 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
4907 {
4908 return VERR_PATCH_NOT_FOUND;
4909 }
4910
4911 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
4912 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
4913 if ( pPatchRec
4914 && pPatchRec->patch.uState == PATCH_ENABLED
4915 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
4916 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
4917 {
4918 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
4919 *pByte = pPatchRec->patch.aPrivInstr[offset];
4920
4921 if (pPatchRec->patch.cbPatchJump == 1)
4922 {
4923 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
4924 }
4925 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
4926 return VINF_SUCCESS;
4927 }
4928 return VERR_PATCH_NOT_FOUND;
4929}
4930
4931/**
4932 * Disable patch for privileged instruction at specified location
4933 *
4934 * @returns VBox status code.
4935 * @param pVM The VM to operate on.
4936 * @param pInstr Guest context point to privileged instruction
4937 *
4938 * @note returns failure if patching is not allowed or possible
4939 *
4940 */
4941VMMR3DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
4942{
4943 PPATMPATCHREC pPatchRec;
4944 PPATCHINFO pPatch;
4945
4946 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
4947 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4948 if (pPatchRec)
4949 {
4950 int rc = VINF_SUCCESS;
4951
4952 pPatch = &pPatchRec->patch;
4953
4954 /* Already disabled? */
4955 if (pPatch->uState == PATCH_DISABLED)
4956 return VINF_SUCCESS;
4957
4958 /* Clear the IDT entries for the patch we're disabling. */
4959 /** @note very important as we clear IF in the patch itself */
4960 /** @todo this needs to be changed */
4961 if (pPatch->flags & PATMFL_IDTHANDLER)
4962 {
4963 uint32_t iGate;
4964
4965 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
4966 if (iGate != (uint32_t)~0)
4967 {
4968 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
4969 if (++cIDTHandlersDisabled < 256)
4970 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
4971 }
4972 }
4973
4974 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
4975 if ( pPatch->pPatchBlockOffset
4976 && pPatch->uState == PATCH_ENABLED)
4977 {
4978 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
4979 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
4980 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
4981 }
4982
4983 /* IDT or function patches haven't changed any guest code. */
4984 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
4985 {
4986 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
4987 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
4988
4989 if (pPatch->uState != PATCH_REFUSED)
4990 {
4991 AssertMsg(pPatch->pPrivInstrHC, ("Invalid HC pointer?!? (%RRv)\n", pInstrGC));
4992 Assert(pPatch->cbPatchJump);
4993
4994 /** pPrivInstrHC is probably not valid anymore */
4995 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatchRec->patch.pPrivInstrGC, (PRTR3PTR)&pPatchRec->patch.pPrivInstrHC);
4996 if (rc == VINF_SUCCESS)
4997 {
4998 uint8_t temp[16];
4999
5000 Assert(pPatch->cbPatchJump < sizeof(temp));
5001
5002 /* Let's first check if the guest code is still the same. */
5003 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5004 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5005 if (rc == VINF_SUCCESS)
5006 {
5007 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5008
5009 if ( temp[0] != 0xE9 /* jmp opcode */
5010 || *(RTRCINTPTR *)(&temp[1]) != displ
5011 )
5012 {
5013 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5014 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5015 /* Remove it completely */
5016 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5017 rc = PATMR3RemovePatch(pVM, pInstrGC);
5018 AssertRC(rc);
5019 return VWRN_PATCH_REMOVED;
5020 }
5021 }
5022 patmRemoveJumpToPatch(pVM, pPatch);
5023
5024 }
5025 else
5026 {
5027 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5028 pPatch->uState = PATCH_DISABLE_PENDING;
5029 }
5030 }
5031 else
5032 {
5033 AssertMsgFailed(("Patch was refused!\n"));
5034 return VERR_PATCH_ALREADY_DISABLED;
5035 }
5036 }
5037 else
5038 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5039 {
5040 uint8_t temp[16];
5041
5042 Assert(pPatch->cbPatchJump < sizeof(temp));
5043
5044 /* Let's first check if the guest code is still the same. */
5045 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5046 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5047 if (rc == VINF_SUCCESS)
5048 {
5049 if (temp[0] != 0xCC)
5050 {
5051 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5052 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5053 /* Remove it completely */
5054 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5055 rc = PATMR3RemovePatch(pVM, pInstrGC);
5056 AssertRC(rc);
5057 return VWRN_PATCH_REMOVED;
5058 }
5059 patmDeactivateInt3Patch(pVM, pPatch);
5060 }
5061 }
5062
5063 if (rc == VINF_SUCCESS)
5064 {
5065 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5066 if (pPatch->uState == PATCH_DISABLE_PENDING)
5067 {
5068 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5069 pPatch->uState = PATCH_UNUSABLE;
5070 }
5071 else
5072 if (pPatch->uState != PATCH_DIRTY)
5073 {
5074 pPatch->uOldState = pPatch->uState;
5075 pPatch->uState = PATCH_DISABLED;
5076 }
5077 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5078 }
5079
5080 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5081 return VINF_SUCCESS;
5082 }
5083 Log(("Patch not found!\n"));
5084 return VERR_PATCH_NOT_FOUND;
5085}
5086
5087/**
5088 * Permanently disable patch for privileged instruction at specified location
5089 *
5090 * @returns VBox status code.
5091 * @param pVM The VM to operate on.
5092 * @param pInstr Guest context instruction pointer
5093 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5094 * @param pConflictPatch Conflicting patch
5095 *
5096 */
5097static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5098{
5099#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5100 PATCHINFO patch;
5101 DISCPUSTATE cpu;
5102 R3PTRTYPE(uint8_t *) pInstrHC;
5103 uint32_t opsize;
5104 bool disret;
5105 int rc;
5106
5107 RT_ZERO(patch);
5108 pInstrHC = PATMGCVirtToHCVirt(pVM, &patch, pInstrGC);
5109 cpu.mode = (pConflictPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5110 disret = PATMR3DISInstr(pVM, &patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
5111 /*
5112 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5113 * with one that jumps right into the conflict patch.
5114 * Otherwise we must disable the conflicting patch to avoid serious problems.
5115 */
5116 if ( disret == true
5117 && (pConflictPatch->flags & PATMFL_CODE32)
5118 && (cpu.pCurInstr->opcode == OP_JMP || (cpu.pCurInstr->optype & OPTYPE_COND_CONTROLFLOW))
5119 && (cpu.param1.flags & USE_IMMEDIATE32_REL))
5120 {
5121 /* Hint patches must be enabled first. */
5122 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5123 {
5124 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5125 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5126 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5127 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5128 /* Enabling might fail if the patched code has changed in the meantime. */
5129 if (rc != VINF_SUCCESS)
5130 return rc;
5131 }
5132
5133 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5134 if (RT_SUCCESS(rc))
5135 {
5136 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5137 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5138 return VINF_SUCCESS;
5139 }
5140 }
5141#endif
5142
5143 if (pConflictPatch->opcode == OP_CLI)
5144 {
5145 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5146 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5147 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5148 if (rc == VWRN_PATCH_REMOVED)
5149 return VINF_SUCCESS;
5150 if (RT_SUCCESS(rc))
5151 {
5152 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5153 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5154 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5155 if (rc == VERR_PATCH_NOT_FOUND)
5156 return VINF_SUCCESS; /* removed already */
5157
5158 AssertRC(rc);
5159 if (RT_SUCCESS(rc))
5160 {
5161 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5162 return VINF_SUCCESS;
5163 }
5164 }
5165 /* else turned into unusable patch (see below) */
5166 }
5167 else
5168 {
5169 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5170 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5171 if (rc == VWRN_PATCH_REMOVED)
5172 return VINF_SUCCESS;
5173 }
5174
5175 /* No need to monitor the code anymore. */
5176 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5177 {
5178 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5179 AssertRC(rc);
5180 }
5181 pConflictPatch->uState = PATCH_UNUSABLE;
5182 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5183 return VERR_PATCH_DISABLED;
5184}
5185
5186/**
5187 * Enable patch for privileged instruction at specified location
5188 *
5189 * @returns VBox status code.
5190 * @param pVM The VM to operate on.
5191 * @param pInstr Guest context point to privileged instruction
5192 *
5193 * @note returns failure if patching is not allowed or possible
5194 *
5195 */
5196VMMR3DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5197{
5198 PPATMPATCHREC pPatchRec;
5199 PPATCHINFO pPatch;
5200
5201 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5202 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5203 if (pPatchRec)
5204 {
5205 int rc = VINF_SUCCESS;
5206
5207 pPatch = &pPatchRec->patch;
5208
5209 if (pPatch->uState == PATCH_DISABLED)
5210 {
5211 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5212 {
5213 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5214 /** @todo -> pPrivInstrHC is probably not valid anymore */
5215 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatchRec->patch.pPrivInstrGC, (PRTR3PTR)&pPatchRec->patch.pPrivInstrHC);
5216 if (rc == VINF_SUCCESS)
5217 {
5218#ifdef DEBUG
5219 DISCPUSTATE cpu;
5220 char szOutput[256];
5221 uint32_t opsize, i = 0;
5222#endif
5223 uint8_t temp[16];
5224
5225 Assert(pPatch->cbPatchJump < sizeof(temp));
5226
5227 // let's first check if the guest code is still the same
5228 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5229 AssertRC(rc2);
5230
5231 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5232 {
5233 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5234 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5235 /* Remove it completely */
5236 rc = PATMR3RemovePatch(pVM, pInstrGC);
5237 AssertRC(rc);
5238 return VERR_PATCH_NOT_FOUND;
5239 }
5240
5241 rc2 = patmGenJumpToPatch(pVM, pPatch, false);
5242 AssertRC(rc2);
5243 if (RT_FAILURE(rc2))
5244 return rc2;
5245
5246#ifdef DEBUG
5247 bool disret;
5248 i = 0;
5249 while(i < pPatch->cbPatchJump)
5250 {
5251 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5252 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
5253 Log(("Renewed patch instr: %s", szOutput));
5254 i += opsize;
5255 }
5256#endif
5257 }
5258 }
5259 else
5260 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5261 {
5262 uint8_t temp[16];
5263
5264 Assert(pPatch->cbPatchJump < sizeof(temp));
5265
5266 /* Let's first check if the guest code is still the same. */
5267 int rc2 = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5268 AssertRC(rc2);
5269
5270 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5271 {
5272 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5273 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5274 rc = PATMR3RemovePatch(pVM, pInstrGC);
5275 AssertRC(rc);
5276 return VERR_PATCH_NOT_FOUND;
5277 }
5278
5279 rc2 = patmActivateInt3Patch(pVM, pPatch);
5280 if (RT_FAILURE(rc2))
5281 return rc2;
5282 }
5283
5284 pPatch->uState = pPatch->uOldState; //restore state
5285
5286 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5287 if (pPatch->pPatchBlockOffset)
5288 {
5289 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5290 }
5291
5292 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5293 }
5294 else
5295 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5296
5297 return rc;
5298 }
5299 return VERR_PATCH_NOT_FOUND;
5300}
5301
5302/**
5303 * Remove patch for privileged instruction at specified location
5304 *
5305 * @returns VBox status code.
5306 * @param pVM The VM to operate on.
5307 * @param pPatchRec Patch record
5308 * @param fForceRemove Remove *all* patches
5309 */
5310int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5311{
5312 PPATCHINFO pPatch;
5313
5314 pPatch = &pPatchRec->patch;
5315
5316 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5317 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5318 {
5319 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5320 return VERR_ACCESS_DENIED;
5321 }
5322 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5323
5324 /** @note NEVER EVER REUSE PATCH MEMORY */
5325 /** @note PATMR3DisablePatch put a breakpoint (0xCC) at the entry of this patch */
5326
5327 if (pPatchRec->patch.pPatchBlockOffset)
5328 {
5329 PAVLOU32NODECORE pNode;
5330
5331 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5332 Assert(pNode);
5333 }
5334
5335 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5336 {
5337 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5338 AssertRC(rc);
5339 }
5340
5341#ifdef VBOX_WITH_STATISTICS
5342 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5343 {
5344 STAMR3Deregister(pVM, &pPatchRec->patch);
5345#ifndef DEBUG_sandervl
5346 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5347 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5348 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5349 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5350 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5351 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5352 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5353 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5354 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5355 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5356 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5357 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5358 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5359 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5360#endif
5361 }
5362#endif
5363
5364 /** @note no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5365 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5366 pPatch->nrPatch2GuestRecs = 0;
5367 Assert(pPatch->Patch2GuestAddrTree == 0);
5368
5369 patmEmptyTree(pVM, &pPatch->FixupTree);
5370 pPatch->nrFixups = 0;
5371 Assert(pPatch->FixupTree == 0);
5372
5373 if (pPatchRec->patch.pTempInfo)
5374 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5375
5376 /** @note might fail, because it has already been removed (e.g. during reset). */
5377 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5378
5379 /* Free the patch record */
5380 MMHyperFree(pVM, pPatchRec);
5381 return VINF_SUCCESS;
5382}
5383
5384/**
5385 * Attempt to refresh the patch by recompiling its entire code block
5386 *
5387 * @returns VBox status code.
5388 * @param pVM The VM to operate on.
5389 * @param pPatchRec Patch record
5390 */
5391int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5392{
5393 PPATCHINFO pPatch;
5394 int rc;
5395 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5396
5397 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5398
5399 pPatch = &pPatchRec->patch;
5400 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5401 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5402 {
5403 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist\n"));
5404 return VERR_PATCHING_REFUSED;
5405 }
5406
5407 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5408
5409 rc = PATMR3DisablePatch(pVM, pInstrGC);
5410 AssertRC(rc);
5411
5412 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5413 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5414#ifdef VBOX_WITH_STATISTICS
5415 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5416 {
5417 STAMR3Deregister(pVM, &pPatchRec->patch);
5418#ifndef DEBUG_sandervl
5419 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5420 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5421 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5422 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5423 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5424 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5425 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5426 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5427 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5428 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5429 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5430 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5431 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5432 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5433#endif
5434 }
5435#endif
5436
5437 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5438
5439 /* Attempt to install a new patch. */
5440 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5441 if (RT_SUCCESS(rc))
5442 {
5443 RTRCPTR pPatchTargetGC;
5444 PPATMPATCHREC pNewPatchRec;
5445
5446 /* Determine target address in new patch */
5447 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5448 Assert(pPatchTargetGC);
5449 if (!pPatchTargetGC)
5450 {
5451 rc = VERR_PATCHING_REFUSED;
5452 goto failure;
5453 }
5454
5455 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5456 pPatch->uCurPatchOffset = 0;
5457
5458 /* insert jump to new patch in old patch block */
5459 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5460 if (RT_FAILURE(rc))
5461 goto failure;
5462
5463 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5464 Assert(pNewPatchRec); /* can't fail */
5465
5466 /* Remove old patch (only do that when everything is finished) */
5467 int rc2 = PATMRemovePatch(pVM, pPatchRec, true /* force removal */);
5468 AssertRC(rc2);
5469
5470 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5471 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5472
5473 LogRel(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5474 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5475
5476 /* Used by another patch, so don't remove it! */
5477 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5478 }
5479
5480failure:
5481 if (RT_FAILURE(rc))
5482 {
5483 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5484
5485 /* Remove the new inactive patch */
5486 rc = PATMR3RemovePatch(pVM, pInstrGC);
5487 AssertRC(rc);
5488
5489 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5490 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5491
5492 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5493 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5494 AssertRC(rc2);
5495
5496 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5497 }
5498 return rc;
5499}
5500
5501/**
5502 * Find patch for privileged instruction at specified location
5503 *
5504 * @returns Patch structure pointer if found; else NULL
5505 * @param pVM The VM to operate on.
5506 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5507 * @param fIncludeHints Include hinted patches or not
5508 *
5509 */
5510PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5511{
5512 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5513 /* if the patch is enabled, the pointer is not indentical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5514 if (pPatchRec)
5515 {
5516 if ( pPatchRec->patch.uState == PATCH_ENABLED
5517 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5518 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5519 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5520 {
5521 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5522 return &pPatchRec->patch;
5523 }
5524 else
5525 if ( fIncludeHints
5526 && pPatchRec->patch.uState == PATCH_DISABLED
5527 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5528 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5529 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5530 {
5531 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5532 return &pPatchRec->patch;
5533 }
5534 }
5535 return NULL;
5536}
5537
5538/**
5539 * Checks whether the GC address is inside a generated patch jump
5540 *
5541 * @returns true -> yes, false -> no
5542 * @param pVM The VM to operate on.
5543 * @param pAddr Guest context address
5544 * @param pPatchAddr Guest context patch address (if true)
5545 */
5546VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5547{
5548 RTRCPTR addr;
5549 PPATCHINFO pPatch;
5550
5551 if (PATMIsEnabled(pVM) == false)
5552 return false;
5553
5554 if (pPatchAddr == NULL)
5555 pPatchAddr = &addr;
5556
5557 *pPatchAddr = 0;
5558
5559 pPatch = PATMFindActivePatchByEntrypoint(pVM, pAddr);
5560 if (pPatch)
5561 {
5562 *pPatchAddr = pPatch->pPrivInstrGC;
5563 }
5564 return *pPatchAddr == 0 ? false : true;
5565}
5566
5567/**
5568 * Remove patch for privileged instruction at specified location
5569 *
5570 * @returns VBox status code.
5571 * @param pVM The VM to operate on.
5572 * @param pInstr Guest context point to privileged instruction
5573 *
5574 * @note returns failure if patching is not allowed or possible
5575 *
5576 */
5577VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5578{
5579 PPATMPATCHREC pPatchRec;
5580
5581 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5582 if (pPatchRec)
5583 {
5584 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5585 if (rc == VWRN_PATCH_REMOVED)
5586 return VINF_SUCCESS;
5587 return PATMRemovePatch(pVM, pPatchRec, false);
5588 }
5589 AssertFailed();
5590 return VERR_PATCH_NOT_FOUND;
5591}
5592
5593/**
5594 * Mark patch as dirty
5595 *
5596 * @returns VBox status code.
5597 * @param pVM The VM to operate on.
5598 * @param pPatch Patch record
5599 *
5600 * @note returns failure if patching is not allowed or possible
5601 *
5602 */
5603VMMR3DECL(int) PATMR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5604{
5605 if (pPatch->pPatchBlockOffset)
5606 {
5607 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5608 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5609 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5610 }
5611
5612 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5613 /* Put back the replaced instruction. */
5614 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5615 if (rc == VWRN_PATCH_REMOVED)
5616 return VINF_SUCCESS;
5617
5618 /** @note we don't restore patch pages for patches that are not enabled! */
5619 /** @note be careful when changing this behaviour!! */
5620
5621 /* The patch pages are no longer marked for self-modifying code detection */
5622 if (pPatch->flags & PATMFL_CODE_MONITORED)
5623 {
5624 rc = patmRemovePatchPages(pVM, pPatch);
5625 AssertRCReturn(rc, rc);
5626 }
5627 pPatch->uState = PATCH_DIRTY;
5628
5629 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5630 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5631
5632 return VINF_SUCCESS;
5633}
5634
5635/**
5636 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5637 *
5638 * @returns VBox status code.
5639 * @param pVM The VM to operate on.
5640 * @param pPatch Patch block structure pointer
5641 * @param pPatchGC GC address in patch block
5642 */
5643RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5644{
5645 Assert(pPatch->Patch2GuestAddrTree);
5646 /* Get the closest record from below. */
5647 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5648 if (pPatchToGuestRec)
5649 return pPatchToGuestRec->pOrgInstrGC;
5650
5651 return 0;
5652}
5653
5654/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5655 *
5656 * @returns corresponding GC pointer in patch block
5657 * @param pVM The VM to operate on.
5658 * @param pPatch Current patch block pointer
5659 * @param pInstrGC Guest context pointer to privileged instruction
5660 *
5661 */
5662RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5663{
5664 if (pPatch->Guest2PatchAddrTree)
5665 {
5666 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
5667 if (pGuestToPatchRec)
5668 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5669 }
5670
5671 return 0;
5672}
5673
5674/* Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no identical match)
5675 *
5676 * @returns corresponding GC pointer in patch block
5677 * @param pVM The VM to operate on.
5678 * @param pPatch Current patch block pointer
5679 * @param pInstrGC Guest context pointer to privileged instruction
5680 *
5681 */
5682RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5683{
5684 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
5685 if (pGuestToPatchRec)
5686 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5687
5688 return 0;
5689}
5690
5691/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5692 *
5693 * @returns corresponding GC pointer in patch block
5694 * @param pVM The VM to operate on.
5695 * @param pInstrGC Guest context pointer to privileged instruction
5696 *
5697 */
5698VMMR3DECL(RTRCPTR) PATMR3GuestGCPtrToPatchGCPtr(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
5699{
5700 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5701 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
5702 {
5703 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
5704 }
5705 return 0;
5706}
5707
5708/**
5709 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5710 *
5711 * @returns original GC instruction pointer or 0 if not found
5712 * @param pVM The VM to operate on.
5713 * @param pPatchGC GC address in patch block
5714 * @param pEnmState State of the translated address (out)
5715 *
5716 */
5717VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
5718{
5719 PPATMPATCHREC pPatchRec;
5720 void *pvPatchCoreOffset;
5721 RTRCPTR pPrivInstrGC;
5722
5723 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
5724 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5725 if (pvPatchCoreOffset == 0)
5726 {
5727 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
5728 return 0;
5729 }
5730 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
5731 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
5732 if (pEnmState)
5733 {
5734 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
5735 || pPatchRec->patch.uState == PATCH_DIRTY
5736 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
5737 || pPatchRec->patch.uState == PATCH_UNUSABLE),
5738 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
5739
5740 if ( !pPrivInstrGC
5741 || pPatchRec->patch.uState == PATCH_UNUSABLE
5742 || pPatchRec->patch.uState == PATCH_REFUSED)
5743 {
5744 pPrivInstrGC = 0;
5745 *pEnmState = PATMTRANS_FAILED;
5746 }
5747 else
5748 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
5749 {
5750 *pEnmState = PATMTRANS_INHIBITIRQ;
5751 }
5752 else
5753 if ( pPatchRec->patch.uState == PATCH_ENABLED
5754 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
5755 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
5756 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5757 {
5758 *pEnmState = PATMTRANS_OVERWRITTEN;
5759 }
5760 else
5761 if (PATMFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
5762 {
5763 *pEnmState = PATMTRANS_OVERWRITTEN;
5764 }
5765 else
5766 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
5767 {
5768 *pEnmState = PATMTRANS_PATCHSTART;
5769 }
5770 else
5771 *pEnmState = PATMTRANS_SAFE;
5772 }
5773 return pPrivInstrGC;
5774}
5775
5776/**
5777 * Returns the GC pointer of the patch for the specified GC address
5778 *
5779 * @returns VBox status code.
5780 * @param pVM The VM to operate on.
5781 * @param pAddrGC Guest context address
5782 */
5783VMMR3DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
5784{
5785 PPATMPATCHREC pPatchRec;
5786
5787 // Find the patch record
5788 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
5789 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
5790 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
5791 return PATCHCODE_PTR_GC(&pPatchRec->patch);
5792
5793 return 0;
5794}
5795
5796/**
5797 * Attempt to recover dirty instructions
5798 *
5799 * @returns VBox status code.
5800 * @param pVM The VM to operate on.
5801 * @param pCtx CPU context
5802 * @param pPatch Patch record
5803 * @param pPatchToGuestRec Patch to guest address record
5804 * @param pEip GC pointer of trapping instruction
5805 */
5806static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
5807{
5808 DISCPUSTATE CpuOld, CpuNew;
5809 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
5810 int rc;
5811 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
5812 uint32_t cbDirty;
5813 PRECPATCHTOGUEST pRec;
5814 PVMCPU pVCpu = VMMGetCpu0(pVM);
5815
5816 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pPatchToGuestRec->pOrgInstrGC));
5817
5818 pRec = pPatchToGuestRec;
5819 pCurInstrGC = pPatchToGuestRec->pOrgInstrGC;
5820 pCurPatchInstrGC = pEip;
5821 cbDirty = 0;
5822 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5823
5824 /* Find all adjacent dirty instructions */
5825 while (true)
5826 {
5827 if (pRec->fJumpTarget)
5828 {
5829 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pPatchToGuestRec->pOrgInstrGC));
5830 pRec->fDirty = false;
5831 return VERR_PATCHING_REFUSED;
5832 }
5833
5834 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
5835 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5836 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
5837
5838 /* Only harmless instructions are acceptable. */
5839 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
5840 if ( RT_FAILURE(rc)
5841 || !(CpuOld.pCurInstr->optype & OPTYPE_HARMLESS))
5842 {
5843 if (RT_SUCCESS(rc))
5844 cbDirty += CpuOld.opsize;
5845 else
5846 if (!cbDirty)
5847 cbDirty = 1;
5848 break;
5849 }
5850
5851#ifdef DEBUG
5852 char szBuf[256];
5853 szBuf[0] = '\0';
5854 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, 0, szBuf, sizeof(szBuf), NULL);
5855 Log(("DIRTY: %s\n", szBuf));
5856#endif
5857 /* Mark as clean; if we fail we'll let it always fault. */
5858 pRec->fDirty = false;
5859
5860 /** Remove old lookup record. */
5861 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
5862
5863 pCurPatchInstrGC += CpuOld.opsize;
5864 cbDirty += CpuOld.opsize;
5865
5866 /* Let's see if there's another dirty instruction right after. */
5867 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
5868 if (!pRec || !pRec->fDirty)
5869 break; /* no more dirty instructions */
5870
5871 /* In case of complex instructions the next guest instruction could be quite far off. */
5872 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
5873 }
5874
5875 if ( RT_SUCCESS(rc)
5876 && (CpuOld.pCurInstr->optype & OPTYPE_HARMLESS)
5877 )
5878 {
5879 uint32_t cbLeft;
5880
5881 pCurPatchInstrHC = pPatchInstrHC;
5882 pCurPatchInstrGC = pEip;
5883 cbLeft = cbDirty;
5884
5885 while (cbLeft && RT_SUCCESS(rc))
5886 {
5887 bool fValidInstr;
5888
5889 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
5890
5891 fValidInstr = !!(CpuNew.pCurInstr->optype & OPTYPE_HARMLESS);
5892 if ( !fValidInstr
5893 && (CpuNew.pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
5894 )
5895 {
5896 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
5897
5898 if ( pTargetGC >= pPatchToGuestRec->pOrgInstrGC
5899 && pTargetGC <= pPatchToGuestRec->pOrgInstrGC + cbDirty
5900 )
5901 {
5902 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
5903 fValidInstr = true;
5904 }
5905 }
5906
5907 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
5908 if ( rc == VINF_SUCCESS
5909 && CpuNew.opsize <= cbLeft /* must still fit */
5910 && fValidInstr
5911 )
5912 {
5913#ifdef DEBUG
5914 char szBuf[256];
5915 szBuf[0] = '\0';
5916 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, 0, szBuf, sizeof(szBuf), NULL);
5917 Log(("NEW: %s\n", szBuf));
5918#endif
5919
5920 /* Copy the new instruction. */
5921 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.opsize);
5922 AssertRC(rc);
5923
5924 /* Add a new lookup record for the duplicated instruction. */
5925 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
5926 }
5927 else
5928 {
5929#ifdef DEBUG
5930 char szBuf[256];
5931 szBuf[0] = '\0';
5932 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, 0, szBuf, sizeof(szBuf), NULL);
5933 Log(("NEW: %s (FAILED)\n", szBuf));
5934#endif
5935 /* Restore the old lookup record for the duplicated instruction. */
5936 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
5937
5938 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
5939 rc = VERR_PATCHING_REFUSED;
5940 break;
5941 }
5942 pCurInstrGC += CpuNew.opsize;
5943 pCurPatchInstrHC += CpuNew.opsize;
5944 pCurPatchInstrGC += CpuNew.opsize;
5945 cbLeft -= CpuNew.opsize;
5946
5947 /* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
5948 if (!cbLeft)
5949 {
5950 /* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
5951 if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
5952 {
5953 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
5954 if (pRec)
5955 {
5956 unsigned cbFiller = pRec->Core.Key + pVM->patm.s.pPatchMemGC - pCurPatchInstrGC;
5957 uint8_t *pPatchFillHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5958
5959 Assert(!pRec->fDirty);
5960
5961 Log(("Room left in patched instruction stream (%d bytes)\n", cbFiller));
5962 if (cbFiller >= SIZEOF_NEARJUMP32)
5963 {
5964 pPatchFillHC[0] = 0xE9;
5965 *(uint32_t *)&pPatchFillHC[1] = cbFiller - SIZEOF_NEARJUMP32;
5966#ifdef DEBUG
5967 char szBuf[256];
5968 szBuf[0] = '\0';
5969 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, 0, szBuf, sizeof(szBuf), NULL);
5970 Log(("FILL: %s\n", szBuf));
5971#endif
5972 }
5973 else
5974 {
5975 for (unsigned i = 0; i < cbFiller; i++)
5976 {
5977 pPatchFillHC[i] = 0x90; /* NOP */
5978#ifdef DEBUG
5979 char szBuf[256];
5980 szBuf[0] = '\0';
5981 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC + i, 0, szBuf, sizeof(szBuf), NULL);
5982 Log(("FILL: %s\n", szBuf));
5983#endif
5984 }
5985 }
5986 }
5987 }
5988 }
5989 }
5990 }
5991 else
5992 rc = VERR_PATCHING_REFUSED;
5993
5994 if (RT_SUCCESS(rc))
5995 {
5996 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
5997 }
5998 else
5999 {
6000 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6001 Assert(cbDirty);
6002
6003 /* Mark the whole instruction stream with breakpoints. */
6004 if (cbDirty)
6005 memset(pPatchInstrHC, 0xCC, cbDirty);
6006
6007 if ( pVM->patm.s.fOutOfMemory == false
6008 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6009 {
6010 rc = patmR3RefreshPatch(pVM, pPatch);
6011 if (RT_FAILURE(rc))
6012 {
6013 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6014 }
6015 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6016 rc = VERR_PATCHING_REFUSED;
6017 }
6018 }
6019 return rc;
6020}
6021
6022/**
6023 * Handle trap inside patch code
6024 *
6025 * @returns VBox status code.
6026 * @param pVM The VM to operate on.
6027 * @param pCtx CPU context
6028 * @param pEip GC pointer of trapping instruction
6029 * @param ppNewEip GC pointer to new instruction
6030 */
6031VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6032{
6033 PPATMPATCHREC pPatch = 0;
6034 void *pvPatchCoreOffset;
6035 RTRCUINTPTR offset;
6036 RTRCPTR pNewEip;
6037 int rc ;
6038 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6039 PVMCPU pVCpu = VMMGetCpu0(pVM);
6040
6041 Assert(pVM->cCpus == 1);
6042
6043 pNewEip = 0;
6044 *ppNewEip = 0;
6045
6046 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6047
6048 /* Find the patch record. */
6049 /** @note there might not be a patch to guest translation record (global function) */
6050 offset = pEip - pVM->patm.s.pPatchMemGC;
6051 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6052 if (pvPatchCoreOffset)
6053 {
6054 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6055
6056 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6057
6058 if (pPatch->patch.uState == PATCH_DIRTY)
6059 {
6060 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6061 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6062 {
6063 /* Function duplication patches set fPIF to 1 on entry */
6064 pVM->patm.s.pGCStateHC->fPIF = 1;
6065 }
6066 }
6067 else
6068 if (pPatch->patch.uState == PATCH_DISABLED)
6069 {
6070 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6071 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6072 {
6073 /* Function duplication patches set fPIF to 1 on entry */
6074 pVM->patm.s.pGCStateHC->fPIF = 1;
6075 }
6076 }
6077 else
6078 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6079 {
6080 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6081
6082 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6083 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6084 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6085 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6086 }
6087
6088 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6089 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6090
6091 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6092 pPatch->patch.cTraps++;
6093 PATM_STAT_FAULT_INC(&pPatch->patch);
6094 }
6095 else
6096 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6097
6098 /* Check if we were interrupted in PATM generated instruction code. */
6099 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6100 {
6101 DISCPUSTATE Cpu;
6102 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6103 AssertRC(rc);
6104
6105 if ( rc == VINF_SUCCESS
6106 && ( Cpu.pCurInstr->opcode == OP_PUSHF
6107 || Cpu.pCurInstr->opcode == OP_PUSH
6108 || Cpu.pCurInstr->opcode == OP_CALL)
6109 )
6110 {
6111 uint64_t fFlags;
6112
6113 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6114
6115 if (Cpu.pCurInstr->opcode == OP_PUSH)
6116 {
6117 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6118 if ( rc == VINF_SUCCESS
6119 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6120 {
6121 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6122
6123 /* Reset the PATM stack. */
6124 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6125
6126 pVM->patm.s.pGCStateHC->fPIF = 1;
6127
6128 Log(("Faulting push -> go back to the original instruction\n"));
6129
6130 /* continue at the original instruction */
6131 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6132 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6133 return VINF_SUCCESS;
6134 }
6135 }
6136
6137 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6138 rc = PGMShwModifyPage(pVCpu, pCtx->esp, 1, X86_PTE_RW, ~(uint64_t)X86_PTE_RW);
6139 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6140 if (rc == VINF_SUCCESS)
6141 {
6142
6143 /* The guest page *must* be present. */
6144 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6145 if (rc == VINF_SUCCESS && (fFlags & X86_PTE_P))
6146 {
6147 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6148 return VINF_PATCH_CONTINUE;
6149 }
6150 }
6151 }
6152 else
6153 if (pPatch->patch.pPrivInstrGC == pNewEip)
6154 {
6155 /* Invalidated patch or first instruction overwritten.
6156 * We can ignore the fPIF state in this case.
6157 */
6158 /* Reset the PATM stack. */
6159 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6160
6161 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6162
6163 pVM->patm.s.pGCStateHC->fPIF = 1;
6164
6165 /* continue at the original instruction */
6166 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6167 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6168 return VINF_SUCCESS;
6169 }
6170
6171 char szBuf[256];
6172 szBuf[0] = '\0';
6173 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pEip, 0, szBuf, sizeof(szBuf), NULL);
6174
6175 /* Very bad. We crashed in emitted code. Probably stack? */
6176 if (pPatch)
6177 {
6178 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6179 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6180 }
6181 else
6182 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6183 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6184 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6185 }
6186
6187 /* From here on, we must have a valid patch to guest translation. */
6188 if (pvPatchCoreOffset == 0)
6189 {
6190 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6191 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6192 return VERR_PATCH_NOT_FOUND; //fatal error
6193 }
6194
6195 /* Take care of dirty/changed instructions. */
6196 if (pPatchToGuestRec->fDirty)
6197 {
6198 Assert(pPatchToGuestRec->Core.Key == offset);
6199 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6200
6201 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6202 if (RT_SUCCESS(rc))
6203 {
6204 /* Retry the current instruction. */
6205 pNewEip = pEip;
6206 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6207 }
6208 else
6209 {
6210 /* Reset the PATM stack. */
6211 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6212
6213 rc = VINF_SUCCESS; /* Continue at original instruction. */
6214 }
6215
6216 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6217 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6218 return rc;
6219 }
6220
6221#ifdef VBOX_STRICT
6222 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6223 {
6224 DISCPUSTATE cpu;
6225 bool disret;
6226 uint32_t opsize;
6227
6228 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6229 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6230 if (disret && cpu.pCurInstr->opcode == OP_RETN)
6231 {
6232 RTRCPTR retaddr;
6233 PCPUMCTX pCtx2;
6234
6235 pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6236
6237 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx2->esp, sizeof(retaddr));
6238 AssertRC(rc);
6239
6240 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6241 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6242 }
6243 }
6244#endif
6245
6246 /* Return original address, correct by subtracting the CS base address. */
6247 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6248
6249 /* Reset the PATM stack. */
6250 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6251
6252 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6253 {
6254 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6255 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6256#ifdef VBOX_STRICT
6257 DISCPUSTATE cpu;
6258 bool disret;
6259 uint32_t opsize;
6260
6261 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6262 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_ORGCODE);
6263
6264 if (disret && (cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_INT3))
6265 {
6266 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6267 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6268
6269 Assert(cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_IRET);
6270 }
6271#endif
6272 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6273 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6274 }
6275
6276 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6277#ifdef LOG_ENABLED
6278 CPUMR3DisasmInstr(pVM, pVCpu, pCtx, pNewEip, "PATCHRET: ");
6279#endif
6280 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6281 {
6282 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6283 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6284 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6285 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6286 return VERR_PATCH_DISABLED;
6287 }
6288
6289#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6290 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6291 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6292 {
6293 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6294 //we are only wasting time, back out the patch
6295 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6296 pTrapRec->pNextPatchInstr = 0;
6297 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6298 return VERR_PATCH_DISABLED;
6299 }
6300#endif
6301
6302 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6303 return VINF_SUCCESS;
6304}
6305
6306
6307/**
6308 * Handle page-fault in monitored page
6309 *
6310 * @returns VBox status code.
6311 * @param pVM The VM to operate on.
6312 */
6313VMMR3DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6314{
6315 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6316
6317 addr &= PAGE_BASE_GC_MASK;
6318
6319 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6320 AssertRC(rc); NOREF(rc);
6321
6322 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6323 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6324 {
6325 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6326 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6327 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6328 if (rc == VWRN_PATCH_REMOVED)
6329 return VINF_SUCCESS;
6330
6331 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6332
6333 if (addr == pPatchRec->patch.pPrivInstrGC)
6334 addr++;
6335 }
6336
6337 for(;;)
6338 {
6339 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6340
6341 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6342 break;
6343
6344 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6345 {
6346 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6347 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6348 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6349 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6350 }
6351 addr = pPatchRec->patch.pPrivInstrGC + 1;
6352 }
6353
6354 pVM->patm.s.pvFaultMonitor = 0;
6355 return VINF_SUCCESS;
6356}
6357
6358
6359#ifdef VBOX_WITH_STATISTICS
6360
6361static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6362{
6363 if (pPatch->flags & PATMFL_SYSENTER)
6364 {
6365 return "SYSENT";
6366 }
6367 else
6368 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6369 {
6370 static char szTrap[16];
6371 uint32_t iGate;
6372
6373 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6374 if (iGate < 256)
6375 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6376 else
6377 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6378 return szTrap;
6379 }
6380 else
6381 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6382 return "DUPFUNC";
6383 else
6384 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6385 return "FUNCCALL";
6386 else
6387 if (pPatch->flags & PATMFL_TRAMPOLINE)
6388 return "TRAMP";
6389 else
6390 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6391}
6392
6393static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6394{
6395 switch(pPatch->uState)
6396 {
6397 case PATCH_ENABLED:
6398 return "ENA";
6399 case PATCH_DISABLED:
6400 return "DIS";
6401 case PATCH_DIRTY:
6402 return "DIR";
6403 case PATCH_UNUSABLE:
6404 return "UNU";
6405 case PATCH_REFUSED:
6406 return "REF";
6407 case PATCH_DISABLE_PENDING:
6408 return "DIP";
6409 default:
6410 AssertFailed();
6411 return " ";
6412 }
6413}
6414
6415/**
6416 * Resets the sample.
6417 * @param pVM The VM handle.
6418 * @param pvSample The sample registered using STAMR3RegisterCallback.
6419 */
6420static void patmResetStat(PVM pVM, void *pvSample)
6421{
6422 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6423 Assert(pPatch);
6424
6425 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6426 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6427}
6428
6429/**
6430 * Prints the sample into the buffer.
6431 *
6432 * @param pVM The VM handle.
6433 * @param pvSample The sample registered using STAMR3RegisterCallback.
6434 * @param pszBuf The buffer to print into.
6435 * @param cchBuf The size of the buffer.
6436 */
6437static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6438{
6439 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6440 Assert(pPatch);
6441
6442 Assert(pPatch->uState != PATCH_REFUSED);
6443 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6444
6445 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6446 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6447 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6448}
6449
6450/**
6451 * Returns the GC address of the corresponding patch statistics counter
6452 *
6453 * @returns Stat address
6454 * @param pVM The VM to operate on.
6455 * @param pPatch Patch structure
6456 */
6457RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6458{
6459 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6460 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6461}
6462
6463#endif /* VBOX_WITH_STATISTICS */
6464
6465#ifdef VBOX_WITH_DEBUGGER
6466/**
6467 * The '.patmoff' command.
6468 *
6469 * @returns VBox status.
6470 * @param pCmd Pointer to the command descriptor (as registered).
6471 * @param pCmdHlp Pointer to command helper functions.
6472 * @param pVM Pointer to the current VM (if any).
6473 * @param paArgs Pointer to (readonly) array of arguments.
6474 * @param cArgs Number of arguments in the array.
6475 */
6476static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
6477{
6478 /*
6479 * Validate input.
6480 */
6481 if (!pVM)
6482 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6483
6484 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6485 PATMR3AllowPatching(pVM, false);
6486 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6487}
6488
6489/**
6490 * The '.patmon' command.
6491 *
6492 * @returns VBox status.
6493 * @param pCmd Pointer to the command descriptor (as registered).
6494 * @param pCmdHlp Pointer to command helper functions.
6495 * @param pVM Pointer to the current VM (if any).
6496 * @param paArgs Pointer to (readonly) array of arguments.
6497 * @param cArgs Number of arguments in the array.
6498 */
6499static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
6500{
6501 /*
6502 * Validate input.
6503 */
6504 if (!pVM)
6505 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6506
6507 PATMR3AllowPatching(pVM, true);
6508 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6509 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6510}
6511#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette